From d195173ba2c06b56c1bf5554ebf0736795798c91 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 6 Jul 2023 02:52:25 -0400 Subject: llvm: start tracking more things without relying on the llvm api --- src/Compilation.zig | 6 +- src/codegen/llvm.zig | 765 ++++++++++++++++++++++---------------- src/codegen/llvm/Builder.zig | 845 ++++++++++++++++++++++++++++++++++++++++++ src/codegen/llvm/bindings.zig | 30 +- src/link.zig | 1 + src/main.zig | 8 + 6 files changed, 1324 insertions(+), 331 deletions(-) create mode 100644 src/codegen/llvm/Builder.zig (limited to 'src') diff --git a/src/Compilation.zig b/src/Compilation.zig index eb4b67933d..3a95f4831a 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -538,6 +538,7 @@ pub const InitOptions = struct { want_lto: ?bool = null, want_unwind_tables: ?bool = null, use_llvm: ?bool = null, + use_lib_llvm: ?bool = null, use_lld: ?bool = null, use_clang: ?bool = null, single_threaded: ?bool = null, @@ -753,7 +754,8 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { const root_name = try arena.dupeZ(u8, options.root_name); // Make a decision on whether to use LLVM or our own backend. - const use_llvm = build_options.have_llvm and blk: { + const use_lib_llvm = options.use_lib_llvm orelse build_options.have_llvm; + const use_llvm = blk: { if (options.use_llvm) |explicit| break :blk explicit; @@ -1161,6 +1163,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { hash.add(valgrind); hash.add(single_threaded); hash.add(use_llvm); + hash.add(use_lib_llvm); hash.add(dll_export_fns); hash.add(options.is_test); hash.add(options.test_evented_io); @@ -1444,6 +1447,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { .optimize_mode = options.optimize_mode, .use_lld = use_lld, .use_llvm = use_llvm, + .use_lib_llvm = use_lib_llvm, .link_libc = link_libc, .link_libcpp = link_libcpp, .link_libunwind = link_libunwind, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 4960414499..0ac4ae7b33 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -7,6 +7,7 @@ const math = std.math; const native_endian = builtin.cpu.arch.endian(); const DW = std.dwarf; +const Builder = @import("llvm/Builder.zig"); const llvm = @import("llvm/bindings.zig"); const link = @import("../link.zig"); const Compilation = @import("../Compilation.zig"); @@ -338,6 +339,8 @@ fn deleteLlvmGlobal(llvm_global: *llvm.Value) void { pub const Object = struct { gpa: Allocator, + builder: Builder, + module: *Module, llvm_module: *llvm.Module, di_builder: ?*llvm.DIBuilder, @@ -359,7 +362,7 @@ pub const Object = struct { /// version of the name and incorrectly get function not found in the llvm module. /// * it works for functions not all globals. /// Therefore, this table keeps track of the mapping. - decl_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *llvm.Value), + decl_map: std.AutoHashMapUnmanaged(Module.Decl.Index, Builder.Global.Index), /// Serves the same purpose as `decl_map` but only used for the `is_named_enum_value` instruction. named_enum_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *llvm.Value), /// Maps Zig types to LLVM types. The table memory is backed by the GPA of @@ -394,13 +397,19 @@ pub const Object = struct { } pub fn init(gpa: Allocator, options: link.Options) !Object { - const context = llvm.Context.create(); - errdefer context.dispose(); + var builder = Builder{ + .gpa = gpa, + .use_lib_llvm = options.use_lib_llvm, + + .llvm_context = llvm.Context.create(), + .llvm_module = undefined, + }; + errdefer builder.llvm_context.dispose(); initializeLLVMTarget(options.target.cpu.arch); - const llvm_module = llvm.Module.createWithName(options.root_name.ptr, context); - errdefer llvm_module.dispose(); + builder.llvm_module = llvm.Module.createWithName(options.root_name.ptr, builder.llvm_context); + errdefer builder.llvm_module.dispose(); const llvm_target_triple = try targetTriple(gpa, options.target); defer gpa.free(llvm_target_triple); @@ -414,7 +423,7 @@ pub const Object = struct { return error.InvalidLlvmTriple; } - llvm_module.setTarget(llvm_target_triple.ptr); + builder.llvm_module.setTarget(llvm_target_triple.ptr); var opt_di_builder: ?*llvm.DIBuilder = null; errdefer if (opt_di_builder) |di_builder| di_builder.dispose(); @@ -422,10 +431,10 @@ pub const Object = struct { if (!options.strip) { switch (options.target.ofmt) { - .coff => llvm_module.addModuleCodeViewFlag(), - else => llvm_module.addModuleDebugInfoFlag(options.dwarf_format == std.dwarf.Format.@"64"), + .coff => builder.llvm_module.addModuleCodeViewFlag(), + else => builder.llvm_module.addModuleDebugInfoFlag(options.dwarf_format == std.dwarf.Format.@"64"), } - const di_builder = llvm_module.createDIBuilder(true); + const di_builder = builder.llvm_module.createDIBuilder(true); opt_di_builder = di_builder; // Don't use the version string here; LLVM misparses it when it @@ -508,24 +517,35 @@ pub const Object = struct { const target_data = target_machine.createTargetDataLayout(); errdefer target_data.dispose(); - llvm_module.setModuleDataLayout(target_data); + builder.llvm_module.setModuleDataLayout(target_data); - if (options.pic) llvm_module.setModulePICLevel(); - if (options.pie) llvm_module.setModulePIELevel(); - if (code_model != .Default) llvm_module.setModuleCodeModel(code_model); + if (options.pic) builder.llvm_module.setModulePICLevel(); + if (options.pie) builder.llvm_module.setModulePIELevel(); + if (code_model != .Default) builder.llvm_module.setModuleCodeModel(code_model); if (options.opt_bisect_limit >= 0) { - context.setOptBisectLimit(std.math.lossyCast(c_int, options.opt_bisect_limit)); + builder.llvm_context.setOptBisectLimit(std.math.lossyCast(c_int, options.opt_bisect_limit)); } + try builder.init(); + errdefer builder.deinit(); + builder.source_filename = try builder.string(options.root_name); + builder.data_layout = rep: { + const rep = target_data.stringRep(); + defer llvm.disposeMessage(rep); + break :rep try builder.string(std.mem.span(rep)); + }; + builder.target_triple = try builder.string(llvm_target_triple); + return Object{ .gpa = gpa, + .builder = builder, .module = options.module.?, - .llvm_module = llvm_module, + .llvm_module = builder.llvm_module, .di_map = .{}, .di_builder = opt_di_builder, .di_compile_unit = di_compile_unit, - .context = context, + .context = builder.llvm_context, .target_machine = target_machine, .target_data = target_data, .target = options.target, @@ -553,6 +573,7 @@ pub const Object = struct { self.named_enum_map.deinit(gpa); self.type_map.deinit(gpa); self.extern_collisions.deinit(gpa); + self.builder.deinit(); self.* = undefined; } @@ -671,34 +692,36 @@ pub const Object = struct { // This map has externs with incorrect symbol names. for (object.extern_collisions.keys()) |decl_index| { - const entry = object.decl_map.getEntry(decl_index) orelse continue; - const llvm_global = entry.value_ptr.*; + const global = object.decl_map.get(decl_index) orelse continue; + const llvm_global = global.toLlvm(&object.builder); // Same logic as below but for externs instead of exports. - const decl = mod.declPtr(decl_index); - const other_global = object.getLlvmGlobal(mod.intern_pool.stringToSlice(decl.name)) orelse continue; - if (other_global == llvm_global) continue; + const decl_name = object.builder.stringIfExists(mod.intern_pool.stringToSlice(mod.declPtr(decl_index).name)) orelse continue; + const other_global = object.builder.getGlobal(decl_name) orelse continue; + const other_llvm_global = other_global.toLlvm(&object.builder); + if (other_llvm_global == llvm_global) continue; - llvm_global.replaceAllUsesWith(other_global); + llvm_global.replaceAllUsesWith(other_llvm_global); deleteLlvmGlobal(llvm_global); - entry.value_ptr.* = other_global; + object.builder.llvm_globals.items[@intFromEnum(global)] = other_llvm_global; } object.extern_collisions.clearRetainingCapacity(); - const export_keys = mod.decl_exports.keys(); - for (mod.decl_exports.values(), 0..) |export_list, i| { - const decl_index = export_keys[i]; - const llvm_global = object.decl_map.get(decl_index) orelse continue; + for (mod.decl_exports.keys(), mod.decl_exports.values()) |decl_index, export_list| { + const global = object.decl_map.get(decl_index) orelse continue; + const llvm_global = global.toLlvm(&object.builder); for (export_list.items) |exp| { // Detect if the LLVM global has already been created as an extern. In such // case, we need to replace all uses of it with this exported global. - const exp_name = mod.intern_pool.stringToSlice(exp.opts.name); + const exp_name = object.builder.stringIfExists(mod.intern_pool.stringToSlice(exp.opts.name)) orelse continue; - const other_global = object.getLlvmGlobal(exp_name.ptr) orelse continue; - if (other_global == llvm_global) continue; + const other_global = object.builder.getGlobal(exp_name) orelse continue; + const other_llvm_global = other_global.toLlvm(&object.builder); + if (other_llvm_global == llvm_global) continue; - other_global.replaceAllUsesWith(llvm_global); - llvm_global.takeName(other_global); - deleteLlvmGlobal(other_global); + other_llvm_global.replaceAllUsesWith(llvm_global); + try global.takeName(&object.builder, other_global); + deleteLlvmGlobal(other_llvm_global); + object.builder.llvm_globals.items[@intFromEnum(other_global)] = llvm_global; // Problem: now we need to replace in the decl_map that // the extern decl index points to this new global. However we don't // know the decl index. @@ -813,6 +836,12 @@ pub const Object = struct { emit_asm_msg, emit_bin_msg, emit_llvm_ir_msg, emit_llvm_bc_msg, }); + { + const writer = std.io.getStdErr().writer(); + try writer.writeAll("\n" ++ "-" ** 200 ++ "\n\n"); + try self.builder.dump(writer); + } + // Unfortunately, LLVM shits the bed when we ask for both binary and assembly. // So we call the entire pipeline multiple times if this is requested. var error_message: [*:0]const u8 = undefined; @@ -884,7 +913,9 @@ pub const Object = struct { .err_msg = null, }; - const llvm_func = try o.resolveLlvmFunction(decl_index); + const function_index = try o.resolveLlvmFunction(decl_index); + const function = function_index.ptr(&o.builder); + const llvm_func = function.global.toLlvm(&o.builder); if (func.analysis(ip).is_noinline) { o.addFnAttr(llvm_func, "noinline"); @@ -932,6 +963,7 @@ pub const Object = struct { const builder = o.context.createBuilder(); + function.body = {}; const entry_block = o.context.appendBasicBlock(llvm_func, "Entry"); builder.positionBuilderAtEnd(entry_block); @@ -988,7 +1020,7 @@ pub const Object = struct { }, .byref => { const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const param_llvm_ty = try o.lowerType(param_ty); + const param_llvm_ty = try o.lowerLlvmType(param_ty); const param = llvm_func.getParam(llvm_arg_i); const alignment = param_ty.abiAlignment(mod); @@ -1007,7 +1039,7 @@ pub const Object = struct { }, .byref_mut => { const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const param_llvm_ty = try o.lowerType(param_ty); + const param_llvm_ty = try o.lowerLlvmType(param_ty); const param = llvm_func.getParam(llvm_arg_i); const alignment = param_ty.abiAlignment(mod); @@ -1030,7 +1062,7 @@ pub const Object = struct { const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; - const param_llvm_ty = try o.lowerType(param_ty); + const param_llvm_ty = try o.lowerLlvmType(param_ty); const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod))); const int_llvm_ty = o.context.intType(abi_size * 8); const alignment = @max( @@ -1075,7 +1107,7 @@ pub const Object = struct { const len_param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; - const slice_llvm_ty = try o.lowerType(param_ty); + const slice_llvm_ty = try o.lowerLlvmType(param_ty); const partial = builder.buildInsertValue(slice_llvm_ty.getUndef(), ptr_param, 0, ""); const aggregate = builder.buildInsertValue(partial, len_param, 1, ""); try args.append(aggregate); @@ -1084,7 +1116,7 @@ pub const Object = struct { assert(!it.byval_attr); const field_types = it.llvm_types_buffer[0..it.llvm_types_len]; const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const param_llvm_ty = try o.lowerType(param_ty); + const param_llvm_ty = try o.lowerLlvmType(param_ty); const param_alignment = param_ty.abiAlignment(mod); const arg_ptr = buildAllocaInner(o.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target); const llvm_ty = o.context.structType(field_types.ptr, @as(c_uint, @intCast(field_types.len)), .False); @@ -1115,7 +1147,7 @@ pub const Object = struct { }, .float_array => { const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const param_llvm_ty = try o.lowerType(param_ty); + const param_llvm_ty = try o.lowerLlvmType(param_ty); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; @@ -1133,7 +1165,7 @@ pub const Object = struct { }, .i32_array, .i64_array => { const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const param_llvm_ty = try o.lowerType(param_ty); + const param_llvm_ty = try o.lowerLlvmType(param_ty); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; @@ -1243,14 +1275,6 @@ pub const Object = struct { try self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); } - /// TODO replace this with a call to `Module::getNamedValue`. This will require adding - /// a new wrapper in zig_llvm.h/zig_llvm.cpp. - fn getLlvmGlobal(o: Object, name: [*:0]const u8) ?*llvm.Value { - if (o.llvm_module.getNamedFunction(name)) |x| return x; - if (o.llvm_module.getNamedGlobal(name)) |x| return x; - return null; - } - pub fn updateDeclExports( self: *Object, mod: *Module, @@ -1260,45 +1284,49 @@ pub const Object = struct { const gpa = mod.gpa; // If the module does not already have the function, we ignore this function call // because we call `updateDeclExports` at the end of `updateFunc` and `updateDecl`. - const llvm_global = self.decl_map.get(decl_index) orelse return; + const global_index = self.decl_map.get(decl_index) orelse return; + const llvm_global = global_index.toLlvm(&self.builder); const decl = mod.declPtr(decl_index); if (decl.isExtern(mod)) { - var free_decl_name = false; const decl_name = decl_name: { const decl_name = mod.intern_pool.stringToSlice(decl.name); if (mod.getTarget().isWasm() and try decl.isFunction(mod)) { if (mod.intern_pool.stringToSliceUnwrap(decl.getOwnedExternFunc(mod).?.lib_name)) |lib_name| { if (!std.mem.eql(u8, lib_name, "c")) { - free_decl_name = true; - break :decl_name try std.fmt.allocPrintZ(gpa, "{s}|{s}", .{ - decl_name, lib_name, - }); + break :decl_name try self.builder.fmt("{s}|{s}", .{ decl_name, lib_name }); } } } - break :decl_name decl_name; + break :decl_name try self.builder.string(decl_name); }; - defer if (free_decl_name) gpa.free(decl_name); - llvm_global.setValueName(decl_name); - if (self.getLlvmGlobal(decl_name)) |other_global| { - if (other_global != llvm_global) { + if (self.builder.getGlobal(decl_name)) |other_global| { + if (other_global.toLlvm(&self.builder) != llvm_global) { try self.extern_collisions.put(gpa, decl_index, {}); } } + + try global_index.rename(&self.builder, decl_name); + const decl_name_slice = decl_name.toSlice(&self.builder).?; + const global = global_index.ptr(&self.builder); + global.unnamed_addr = .none; llvm_global.setUnnamedAddr(.False); + global.linkage = .external; llvm_global.setLinkage(.External); - if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default); + if (mod.wantDllExports()) { + global.dll_storage_class = .default; + llvm_global.setDLLStorageClass(.Default); + } if (self.di_map.get(decl)) |di_node| { if (try decl.isFunction(mod)) { const di_func = @as(*llvm.DISubprogram, @ptrCast(di_node)); - const linkage_name = llvm.MDString.get(self.context, decl_name.ptr, decl_name.len); + const linkage_name = llvm.MDString.get(self.context, decl_name_slice.ptr, decl_name_slice.len); di_func.replaceLinkageName(linkage_name); } else { const di_global = @as(*llvm.DIGlobalVariable, @ptrCast(di_node)); - const linkage_name = llvm.MDString.get(self.context, decl_name.ptr, decl_name.len); + const linkage_name = llvm.MDString.get(self.context, decl_name_slice.ptr, decl_name_slice.len); di_global.replaceLinkageName(linkage_name); } } @@ -1313,18 +1341,19 @@ pub const Object = struct { } } } else if (exports.len != 0) { - const exp_name = mod.intern_pool.stringToSlice(exports[0].opts.name); - llvm_global.setValueName2(exp_name.ptr, exp_name.len); + const exp_name = try self.builder.string(mod.intern_pool.stringToSlice(exports[0].opts.name)); + try global_index.rename(&self.builder, exp_name); llvm_global.setUnnamedAddr(.False); if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport); if (self.di_map.get(decl)) |di_node| { + const exp_name_slice = exp_name.toSlice(&self.builder).?; if (try decl.isFunction(mod)) { const di_func = @as(*llvm.DISubprogram, @ptrCast(di_node)); - const linkage_name = llvm.MDString.get(self.context, exp_name.ptr, exp_name.len); + const linkage_name = llvm.MDString.get(self.context, exp_name_slice.ptr, exp_name_slice.len); di_func.replaceLinkageName(linkage_name); } else { const di_global = @as(*llvm.DIGlobalVariable, @ptrCast(di_node)); - const linkage_name = llvm.MDString.get(self.context, exp_name.ptr, exp_name.len); + const linkage_name = llvm.MDString.get(self.context, exp_name_slice.ptr, exp_name_slice.len); di_global.replaceLinkageName(linkage_name); } } @@ -1369,8 +1398,8 @@ pub const Object = struct { } } } else { - const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); - llvm_global.setValueName2(fqn.ptr, fqn.len); + const fqn = try self.builder.string(mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod))); + try global_index.rename(&self.builder, fqn); llvm_global.setLinkage(.Internal); if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default); llvm_global.setUnnamedAddr(.True); @@ -1386,8 +1415,8 @@ pub const Object = struct { } pub fn freeDecl(self: *Object, decl_index: Module.Decl.Index) void { - const llvm_value = self.decl_map.get(decl_index) orelse return; - llvm_value.deleteGlobal(); + const global = self.decl_map.get(decl_index) orelse return; + global.toLlvm(&self.builder).deleteGlobal(); } fn getDIFile(o: *Object, gpa: Allocator, file: *const Module.File) !*llvm.DIFile { @@ -2459,27 +2488,34 @@ pub const Object = struct { /// If the llvm function does not exist, create it. /// Note that this can be called before the function's semantic analysis has /// completed, so if any attributes rely on that, they must be done in updateFunc, not here. - fn resolveLlvmFunction(o: *Object, decl_index: Module.Decl.Index) !*llvm.Value { + fn resolveLlvmFunction(o: *Object, decl_index: Module.Decl.Index) !Builder.Function.Index { const mod = o.module; const gpa = o.gpa; const decl = mod.declPtr(decl_index); const zig_fn_type = decl.ty; const gop = try o.decl_map.getOrPut(gpa, decl_index); - if (gop.found_existing) return gop.value_ptr.*; + if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.function; assert(decl.has_tv); const fn_info = mod.typeToFunc(zig_fn_type).?; const target = mod.getTarget(); const sret = firstParamSRet(fn_info, mod); - const fn_type = try o.lowerType(zig_fn_type); + const fn_type = try o.lowerLlvmType(zig_fn_type); - const fqn = try decl.getFullyQualifiedName(mod); const ip = &mod.intern_pool; + const fqn = try o.builder.string(ip.stringToSlice(try decl.getFullyQualifiedName(mod))); const llvm_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); - const llvm_fn = o.llvm_module.addFunctionInAddressSpace(ip.stringToSlice(fqn), fn_type, llvm_addrspace); - gop.value_ptr.* = llvm_fn; + const llvm_fn = o.llvm_module.addFunctionInAddressSpace(fqn.toSlice(&o.builder).?, fn_type, llvm_addrspace); + + var global = Builder.Global{ + .type = .void, + .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) }, + }; + var function = Builder.Function{ + .global = @enumFromInt(o.builder.globals.count()), + }; const is_extern = decl.isExtern(mod); if (!is_extern) { @@ -2500,7 +2536,7 @@ pub const Object = struct { o.addArgAttr(llvm_fn, 0, "nonnull"); // Sret pointers must not be address 0 o.addArgAttr(llvm_fn, 0, "noalias"); - const raw_llvm_ret_ty = try o.lowerType(fn_info.return_type.toType()); + const raw_llvm_ret_ty = try o.lowerLlvmType(fn_info.return_type.toType()); llvm_fn.addSretAttr(raw_llvm_ret_ty); } @@ -2554,7 +2590,7 @@ pub const Object = struct { }, .byref => { const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1]; - const param_llvm_ty = try o.lowerType(param_ty.toType()); + const param_llvm_ty = try o.lowerLlvmType(param_ty.toType()); const alignment = param_ty.toType().abiAlignment(mod); o.addByRefParamAttrs(llvm_fn, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); }, @@ -2576,7 +2612,10 @@ pub const Object = struct { }; } - return llvm_fn; + try o.builder.llvm_globals.append(o.gpa, llvm_fn); + gop.value_ptr.* = try o.builder.addGlobal(fqn, global); + try o.builder.functions.append(o.gpa, function); + return global.kind.function; } fn addCommonFnAttributes(o: *Object, llvm_fn: *llvm.Value) void { @@ -2622,60 +2661,89 @@ pub const Object = struct { } } - fn resolveGlobalDecl(o: *Object, decl_index: Module.Decl.Index) Error!*llvm.Value { + fn resolveGlobalDecl(o: *Object, decl_index: Module.Decl.Index) Error!Builder.Object.Index { const gop = try o.decl_map.getOrPut(o.gpa, decl_index); - if (gop.found_existing) return gop.value_ptr.*; + if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.object; errdefer assert(o.decl_map.remove(decl_index)); const mod = o.module; const decl = mod.declPtr(decl_index); - const fqn = try decl.getFullyQualifiedName(mod); + const fqn = try o.builder.string(mod.intern_pool.stringToSlice( + try decl.getFullyQualifiedName(mod), + )); const target = mod.getTarget(); - const llvm_type = try o.lowerType(decl.ty); + const llvm_type = try o.lowerLlvmType(decl.ty); const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); + var global = Builder.Global{ + .type = .void, + .kind = .{ .object = @enumFromInt(o.builder.objects.items.len) }, + }; + var object = Builder.Object{ + .global = @enumFromInt(o.builder.globals.count()), + }; + + const is_extern = decl.isExtern(mod); + const name = if (is_extern) + try o.builder.string(mod.intern_pool.stringToSlice(decl.name)) + else + fqn; const llvm_global = o.llvm_module.addGlobalInAddressSpace( llvm_type, - mod.intern_pool.stringToSlice(fqn), + fqn.toSlice(&o.builder).?, llvm_actual_addrspace, ); - gop.value_ptr.* = llvm_global; // This is needed for declarations created by `@extern`. - if (decl.isExtern(mod)) { - llvm_global.setValueName(mod.intern_pool.stringToSlice(decl.name)); + if (is_extern) { + global.unnamed_addr = .none; llvm_global.setUnnamedAddr(.False); + global.linkage = .external; llvm_global.setLinkage(.External); if (decl.val.getVariable(mod)) |variable| { const single_threaded = mod.comp.bin_file.options.single_threaded; if (variable.is_threadlocal and !single_threaded) { + object.thread_local = .generaldynamic; llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { + object.thread_local = .none; llvm_global.setThreadLocalMode(.NotThreadLocal); } - if (variable.is_weak_linkage) llvm_global.setLinkage(.ExternalWeak); + if (variable.is_weak_linkage) { + global.linkage = .extern_weak; + llvm_global.setLinkage(.ExternalWeak); + } } } else { + global.linkage = .internal; llvm_global.setLinkage(.Internal); + global.unnamed_addr = .unnamed_addr; llvm_global.setUnnamedAddr(.True); } - return llvm_global; + try o.builder.llvm_globals.append(o.gpa, llvm_global); + gop.value_ptr.* = try o.builder.addGlobal(name, global); + try o.builder.objects.append(o.gpa, object); + return global.kind.object; } fn isUnnamedType(o: *Object, ty: Type, val: *llvm.Value) bool { - // Once `lowerType` succeeds, successive calls to it with the same Zig type - // are guaranteed to succeed. So if a call to `lowerType` fails here it means + // Once `lowerLlvmType` succeeds, successive calls to it with the same Zig type + // are guaranteed to succeed. So if a call to `lowerLlvmType` fails here it means // it is the first time lowering the type, which means the value can't possible // have that type. - const llvm_ty = o.lowerType(ty) catch return true; + const llvm_ty = o.lowerLlvmType(ty) catch return true; return val.typeOf() != llvm_ty; } - fn lowerType(o: *Object, t: Type) Allocator.Error!*llvm.Type { - const llvm_ty = try lowerTypeInner(o, t); + fn lowerLlvmType(o: *Object, t: Type) Allocator.Error!*llvm.Type { + const ty = try o.lowerType(t); + const llvm_ty = if (ty != .none) + o.builder.llvm_types.items[@intFromEnum(ty)] + else + try o.lowerLlvmTypeInner(t); const mod = o.module; if (std.debug.runtime_safety and false) check: { if (t.zigTypeTag(mod) == .Opaque) break :check; @@ -2693,7 +2761,7 @@ pub const Object = struct { return llvm_ty; } - fn lowerTypeInner(o: *Object, t: Type) Allocator.Error!*llvm.Type { + fn lowerLlvmTypeInner(o: *Object, t: Type) Allocator.Error!*llvm.Type { const gpa = o.gpa; const mod = o.module; const target = mod.getTarget(); @@ -2714,7 +2782,7 @@ pub const Object = struct { 16 => return if (backendSupportsF16(target)) o.context.halfType() else o.context.intType(16), 32 => return o.context.floatType(), 64 => return o.context.doubleType(), - 80 => return if (backendSupportsF80(target)) o.context.x86FP80Type() else o.context.intType(80), + 80 => return if (backendSupportsF80(target)) o.context.x86_fp80Type() else o.context.intType(80), 128 => return o.context.fp128Type(), else => unreachable, }, @@ -2724,8 +2792,8 @@ pub const Object = struct { const ptr_type = t.slicePtrFieldType(mod); const fields: [2]*llvm.Type = .{ - try o.lowerType(ptr_type), - try o.lowerType(Type.usize), + try o.lowerLlvmType(ptr_type), + try o.lowerLlvmType(Type.usize), }; return o.context.structType(&fields, fields.len, .False); } @@ -2749,12 +2817,12 @@ pub const Object = struct { .Array => { const elem_ty = t.childType(mod); if (std.debug.runtime_safety) assert((try elem_ty.onePossibleValue(mod)) == null); - const elem_llvm_ty = try o.lowerType(elem_ty); + const elem_llvm_ty = try o.lowerLlvmType(elem_ty); const total_len = t.arrayLen(mod) + @intFromBool(t.sentinel(mod) != null); return elem_llvm_ty.arrayType(@as(c_uint, @intCast(total_len))); }, .Vector => { - const elem_type = try o.lowerType(t.childType(mod)); + const elem_type = try o.lowerLlvmType(t.childType(mod)); return elem_type.vectorType(t.vectorLen(mod)); }, .Optional => { @@ -2762,7 +2830,7 @@ pub const Object = struct { if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) { return o.context.intType(8); } - const payload_llvm_ty = try o.lowerType(child_ty); + const payload_llvm_ty = try o.lowerLlvmType(child_ty); if (t.optionalReprIsPayload(mod)) { return payload_llvm_ty; } @@ -2783,10 +2851,10 @@ pub const Object = struct { .ErrorUnion => { const payload_ty = t.errorUnionPayload(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return try o.lowerType(Type.anyerror); + return try o.lowerLlvmType(Type.anyerror); } - const llvm_error_type = try o.lowerType(Type.anyerror); - const llvm_payload_type = try o.lowerType(payload_ty); + const llvm_error_type = try o.lowerLlvmType(Type.anyerror); + const llvm_payload_type = try o.lowerLlvmType(payload_ty); const payload_align = payload_ty.abiAlignment(mod); const error_align = Type.anyerror.abiAlignment(mod); @@ -2855,7 +2923,7 @@ pub const Object = struct { const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); try llvm_field_types.append(gpa, llvm_array_ty); } - const field_llvm_ty = try o.lowerType(field_ty.toType()); + const field_llvm_ty = try o.lowerLlvmType(field_ty.toType()); try llvm_field_types.append(gpa, field_llvm_ty); offset += field_ty.toType().abiSize(mod); @@ -2886,14 +2954,17 @@ pub const Object = struct { if (struct_obj.layout == .Packed) { assert(struct_obj.haveLayout()); - const int_llvm_ty = try o.lowerType(struct_obj.backing_int_ty); + const int_llvm_ty = try o.lowerLlvmType(struct_obj.backing_int_ty); gop.value_ptr.* = int_llvm_ty; return int_llvm_ty; } - const name = mod.intern_pool.stringToSlice(try struct_obj.getFullyQualifiedName(mod)); + const name = try o.builder.string(mod.intern_pool.stringToSlice( + try struct_obj.getFullyQualifiedName(mod), + )); + _ = try o.builder.opaqueType(name); - const llvm_struct_ty = o.context.structCreateNamed(name); + const llvm_struct_ty = o.context.structCreateNamed(name.toSlice(&o.builder).?); gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls assert(struct_obj.haveFieldTypes()); @@ -2924,7 +2995,7 @@ pub const Object = struct { const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); try llvm_field_types.append(gpa, llvm_array_ty); } - const field_llvm_ty = try o.lowerType(field.ty); + const field_llvm_ty = try o.lowerLlvmType(field.ty); try llvm_field_types.append(gpa, field_llvm_ty); offset += field.ty.abiSize(mod); @@ -2962,7 +3033,7 @@ pub const Object = struct { } if (layout.payload_size == 0) { - const enum_tag_llvm_ty = try o.lowerType(union_obj.tag_ty); + const enum_tag_llvm_ty = try o.lowerLlvmType(union_obj.tag_ty); gop.value_ptr.* = enum_tag_llvm_ty; return enum_tag_llvm_ty; } @@ -2973,7 +3044,7 @@ pub const Object = struct { gop.value_ptr.* = llvm_union_ty; // must be done before any recursive calls const aligned_field = union_obj.fields.values()[layout.most_aligned_field]; - const llvm_aligned_field_ty = try o.lowerType(aligned_field.ty); + const llvm_aligned_field_ty = try o.lowerLlvmType(aligned_field.ty); const llvm_payload_ty = t: { if (layout.most_aligned_field_size == layout.payload_size) { @@ -2995,7 +3066,7 @@ pub const Object = struct { llvm_union_ty.structSetBody(&llvm_fields, llvm_fields.len, .False); return llvm_union_ty; } - const enum_tag_llvm_ty = try o.lowerType(union_obj.tag_ty); + const enum_tag_llvm_ty = try o.lowerLlvmType(union_obj.tag_ty); // Put the tag before or after the payload depending on which one's // alignment is greater. @@ -3017,7 +3088,7 @@ pub const Object = struct { llvm_union_ty.structSetBody(&llvm_fields, llvm_fields_len, .False); return llvm_union_ty; }, - .Fn => return lowerTypeFn(o, t), + .Fn => return lowerLlvmTypeFn(o, t), .ComptimeInt => unreachable, .ComptimeFloat => unreachable, .Type => unreachable, @@ -3030,7 +3101,17 @@ pub const Object = struct { } } - fn lowerTypeFn(o: *Object, fn_ty: Type) Allocator.Error!*llvm.Type { + fn lowerType(o: *Object, t: Type) Allocator.Error!Builder.Type { + const mod = o.module; + switch (t.toIntern()) { + .void_type, .noreturn_type => return .void, + else => switch (mod.intern_pool.indexToKey(t.toIntern())) { + else => return .none, + }, + } + } + + fn lowerLlvmTypeFn(o: *Object, fn_ty: Type) Allocator.Error!*llvm.Type { const mod = o.module; const ip = &mod.intern_pool; const fn_info = mod.typeToFunc(fn_ty).?; @@ -3047,7 +3128,7 @@ pub const Object = struct { mod.comp.bin_file.options.error_return_tracing) { const ptr_ty = try mod.singleMutPtrType(try o.getStackTraceType()); - try llvm_params.append(try o.lowerType(ptr_ty)); + try llvm_params.append(try o.lowerLlvmType(ptr_ty)); } var it = iterateParamTypes(o, fn_info); @@ -3055,7 +3136,7 @@ pub const Object = struct { .no_bits => continue, .byval => { const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - try llvm_params.append(try o.lowerType(param_ty)); + try llvm_params.append(try o.lowerLlvmType(param_ty)); }, .byref, .byref_mut => { try llvm_params.append(o.context.pointerType(0)); @@ -3071,8 +3152,8 @@ pub const Object = struct { param_ty.optionalChild(mod).slicePtrFieldType(mod) else param_ty.slicePtrFieldType(mod); - const ptr_llvm_ty = try o.lowerType(ptr_ty); - const len_llvm_ty = try o.lowerType(Type.usize); + const ptr_llvm_ty = try o.lowerLlvmType(ptr_ty); + const len_llvm_ty = try o.lowerLlvmType(Type.usize); try llvm_params.ensureUnusedCapacity(2); llvm_params.appendAssumeCapacity(ptr_llvm_ty); @@ -3086,7 +3167,7 @@ pub const Object = struct { }, .float_array => |count| { const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?); + const float_ty = try o.lowerLlvmType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?); const field_count = @as(c_uint, @intCast(count)); const arr_ty = float_ty.arrayType(field_count); try llvm_params.append(arr_ty); @@ -3106,7 +3187,7 @@ pub const Object = struct { ); } - /// Use this instead of lowerType when you want to handle correctly the case of elem_ty + /// Use this instead of lowerLlvmType when you want to handle correctly the case of elem_ty /// being a zero bit type, but it should still be lowered as an i8 in such case. /// There are other similar cases handled here as well. fn lowerPtrElemTy(o: *Object, elem_ty: Type) Allocator.Error!*llvm.Type { @@ -3118,7 +3199,7 @@ pub const Object = struct { else => elem_ty.hasRuntimeBitsIgnoreComptime(mod), }; const llvm_elem_ty = if (lower_elem_ty) - try o.lowerType(elem_ty) + try o.lowerLlvmType(elem_ty) else o.context.intType(8); @@ -3135,7 +3216,7 @@ pub const Object = struct { else => {}, } if (tv.val.isUndefDeep(mod)) { - const llvm_type = try o.lowerType(tv.ty); + const llvm_type = try o.lowerLlvmType(tv.ty); return llvm_type.getUndef(); } @@ -3168,7 +3249,7 @@ pub const Object = struct { .generic_poison, => unreachable, // non-runtime values .false, .true => { - const llvm_type = try o.lowerType(tv.ty); + const llvm_type = try o.lowerLlvmType(tv.ty); return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(); }, }, @@ -3180,13 +3261,15 @@ pub const Object = struct { const fn_decl_index = extern_func.decl; const fn_decl = mod.declPtr(fn_decl_index); try mod.markDeclAlive(fn_decl); - return o.resolveLlvmFunction(fn_decl_index); + const function_index = try o.resolveLlvmFunction(fn_decl_index); + return function_index.toLlvm(&o.builder); }, .func => |func| { const fn_decl_index = func.owner_decl; const fn_decl = mod.declPtr(fn_decl_index); try mod.markDeclAlive(fn_decl); - return o.resolveLlvmFunction(fn_decl_index); + const function_index = try o.resolveLlvmFunction(fn_decl_index); + return function_index.toLlvm(&o.builder); }, .int => { var bigint_space: Value.BigIntSpace = undefined; @@ -3194,7 +3277,7 @@ pub const Object = struct { return lowerBigInt(o, tv.ty, bigint); }, .err => |err| { - const llvm_ty = try o.lowerType(Type.anyerror); + const llvm_ty = try o.lowerLlvmType(Type.anyerror); const int = try mod.getErrorValue(err.name); return llvm_ty.constInt(int, .False); }, @@ -3230,7 +3313,7 @@ pub const Object = struct { }); var fields_buf: [3]*llvm.Value = undefined; - const llvm_ty = try o.lowerType(tv.ty); + const llvm_ty = try o.lowerLlvmType(tv.ty); const llvm_field_count = llvm_ty.countStructElementTypes(); if (llvm_field_count > 2) { assert(llvm_field_count == 3); @@ -3274,7 +3357,7 @@ pub const Object = struct { return unsigned_val; }, .float => { - const llvm_ty = try o.lowerType(tv.ty); + const llvm_ty = try o.lowerLlvmType(tv.ty); switch (tv.ty.floatBits(target)) { 16 => { const repr = @as(u16, @bitCast(tv.val.toFloat(f16, mod))); @@ -3359,7 +3442,7 @@ pub const Object = struct { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return non_null_bit; } - const llvm_ty = try o.lowerType(tv.ty); + const llvm_ty = try o.lowerLlvmType(tv.ty); if (tv.ty.optionalReprIsPayload(mod)) return switch (opt.val) { .none => llvm_ty.constNull(), else => |payload| o.lowerValue(.{ .ty = payload_ty, .val = payload.toValue() }), @@ -3405,7 +3488,7 @@ pub const Object = struct { .True, ); } else { - const llvm_elem_ty = try o.lowerType(elem_ty); + const llvm_elem_ty = try o.lowerLlvmType(elem_ty); return llvm_elem_ty.constArray( llvm_elems.ptr, @as(c_uint, @intCast(llvm_elems.len)), @@ -3440,7 +3523,7 @@ pub const Object = struct { .True, ); } else { - const llvm_elem_ty = try o.lowerType(elem_ty); + const llvm_elem_ty = try o.lowerLlvmType(elem_ty); return llvm_elem_ty.constArray( llvm_elems.ptr, @as(c_uint, @intCast(llvm_elems.len)), @@ -3527,7 +3610,7 @@ pub const Object = struct { .False, ); } else { - const llvm_struct_ty = try o.lowerType(tv.ty); + const llvm_struct_ty = try o.lowerLlvmType(tv.ty); return llvm_struct_ty.constNamedStruct( llvm_fields.items.ptr, @as(c_uint, @intCast(llvm_fields.items.len)), @@ -3536,7 +3619,7 @@ pub const Object = struct { }, .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - const llvm_struct_ty = try o.lowerType(tv.ty); + const llvm_struct_ty = try o.lowerLlvmType(tv.ty); if (struct_obj.layout == .Packed) { assert(struct_obj.haveLayout()); @@ -3633,7 +3716,7 @@ pub const Object = struct { else => unreachable, }, .un => { - const llvm_union_ty = try o.lowerType(tv.ty); + const llvm_union_ty = try o.lowerLlvmType(tv.ty); const tag_and_val: Value.Payload.Union.Data = switch (tv.val.toIntern()) { .none => tv.val.castTag(.@"union").?.data, else => switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { @@ -3803,7 +3886,7 @@ pub const Object = struct { llvm_u32.constInt(0, .False), llvm_u32.constInt(payload_offset, .False), }; - const eu_llvm_ty = try o.lowerType(eu_ty); + const eu_llvm_ty = try o.lowerLlvmType(eu_ty); return eu_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); }, .opt_payload => |opt_ptr| { @@ -3824,19 +3907,19 @@ pub const Object = struct { llvm_u32.constInt(0, .False), llvm_u32.constInt(0, .False), }; - const opt_llvm_ty = try o.lowerType(opt_ty); + const opt_llvm_ty = try o.lowerLlvmType(opt_ty); return opt_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); }, .comptime_field => unreachable, .elem => |elem_ptr| { const parent_llvm_ptr = try o.lowerParentPtr(elem_ptr.base.toValue(), true); - const llvm_usize = try o.lowerType(Type.usize); + const llvm_usize = try o.lowerLlvmType(Type.usize); const indices: [1]*llvm.Value = .{ llvm_usize.constInt(elem_ptr.index, .False), }; const elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod); - const elem_llvm_ty = try o.lowerType(elem_ty); + const elem_llvm_ty = try o.lowerLlvmType(elem_ty); return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); }, .field => |field_ptr| { @@ -3865,7 +3948,7 @@ pub const Object = struct { llvm_u32.constInt(0, .False), llvm_u32.constInt(llvm_pl_index, .False), }; - const parent_llvm_ty = try o.lowerType(parent_ty); + const parent_llvm_ty = try o.lowerLlvmType(parent_ty); return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); }, .Struct => { @@ -3888,7 +3971,7 @@ pub const Object = struct { return field_addr.constIntToPtr(final_llvm_ty); } - const parent_llvm_ty = try o.lowerType(parent_ty); + const parent_llvm_ty = try o.lowerLlvmType(parent_ty); if (llvmField(parent_ty, field_index, mod)) |llvm_field| { const indices: [2]*llvm.Value = .{ llvm_u32.constInt(0, .False), @@ -3907,7 +3990,7 @@ pub const Object = struct { llvm_u32.constInt(0, .False), llvm_u32.constInt(field_index, .False), }; - const parent_llvm_ty = try o.lowerType(parent_ty); + const parent_llvm_ty = try o.lowerLlvmType(parent_ty); return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); }, else => unreachable, @@ -3949,9 +4032,9 @@ pub const Object = struct { try mod.markDeclAlive(decl); const llvm_decl_val = if (is_fn_body) - try o.resolveLlvmFunction(decl_index) + (try o.resolveLlvmFunction(decl_index)).toLlvm(&o.builder) else - try o.resolveGlobalDecl(decl_index); + (try o.resolveGlobalDecl(decl_index)).toLlvm(&o.builder); const target = mod.getTarget(); const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); @@ -3961,7 +4044,7 @@ pub const Object = struct { break :blk llvm_decl_val.constAddrSpaceCast(llvm_decl_wanted_ptr_ty); } else llvm_decl_val; - const llvm_type = try o.lowerType(tv.ty); + const llvm_type = try o.lowerLlvmType(tv.ty); if (tv.ty.zigTypeTag(mod) == .Int) { return llvm_val.constPtrToInt(llvm_type); } else { @@ -3976,8 +4059,8 @@ pub const Object = struct { // The value cannot be undefined, because we use the `nonnull` annotation // for non-optional pointers. We also need to respect the alignment, even though // the address will never be dereferenced. - const llvm_usize = try o.lowerType(Type.usize); - const llvm_ptr_ty = try o.lowerType(ptr_ty); + const llvm_usize = try o.lowerLlvmType(Type.usize); + const llvm_ptr_ty = try o.lowerLlvmType(ptr_ty); if (ptr_ty.ptrInfo(mod).flags.alignment.toByteUnitsOptional()) |alignment| { return llvm_usize.constInt(alignment, .False).constIntToPtr(llvm_ptr_ty); } @@ -4159,20 +4242,26 @@ pub const DeclGen = struct { _ = try o.resolveLlvmFunction(extern_func.decl); } else { const target = mod.getTarget(); - var global = try o.resolveGlobalDecl(decl_index); - global.setAlignment(decl.getAlignment(mod)); - if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s| global.setSection(s); + const object_index = try o.resolveGlobalDecl(decl_index); + const object = object_index.ptr(&o.builder); + const global = object.global.ptr(&o.builder); + var llvm_global = object.global.toLlvm(&o.builder); + global.alignment = Builder.Alignment.fromByteUnits(decl.getAlignment(mod)); + llvm_global.setAlignment(decl.getAlignment(mod)); + if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s| llvm_global.setSection(s); assert(decl.has_tv); const init_val = if (decl.val.getVariable(mod)) |variable| init_val: { + object.mutability = .global; break :init_val variable.init; } else init_val: { - global.setGlobalConstant(.True); + object.mutability = .constant; + llvm_global.setGlobalConstant(.True); break :init_val decl.val.toIntern(); }; if (init_val != .none) { const llvm_init = try o.lowerValue(.{ .ty = decl.ty, .val = init_val.toValue() }); - if (global.globalGetValueType() == llvm_init.typeOf()) { - global.setInitializer(llvm_init); + if (llvm_global.globalGetValueType() == llvm_init.typeOf()) { + llvm_global.setInitializer(llvm_init); } else { // LLVM does not allow us to change the type of globals. So we must // create a new global with the correct type, copy all its attributes, @@ -4193,18 +4282,18 @@ pub const DeclGen = struct { "", llvm_global_addrspace, ); - new_global.setLinkage(global.getLinkage()); - new_global.setUnnamedAddr(global.getUnnamedAddress()); - new_global.setAlignment(global.getAlignment()); + new_global.setLinkage(llvm_global.getLinkage()); + new_global.setUnnamedAddr(llvm_global.getUnnamedAddress()); + new_global.setAlignment(llvm_global.getAlignment()); if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s| new_global.setSection(s); new_global.setInitializer(llvm_init); // TODO: How should this work then the address space of a global changed? - global.replaceAllUsesWith(new_global); - o.decl_map.putAssumeCapacity(decl_index, new_global); - new_global.takeName(global); - global.deleteGlobal(); - global = new_global; + llvm_global.replaceAllUsesWith(new_global); + new_global.takeName(llvm_global); + o.builder.llvm_globals.items[@intFromEnum(object.global)] = new_global; + llvm_global.deleteGlobal(); + llvm_global = new_global; } } @@ -4216,7 +4305,7 @@ pub const DeclGen = struct { const di_global = dib.createGlobalVariableExpression( di_file.toScope(), mod.intern_pool.stringToSlice(decl.name), - global.getValueName(), + llvm_global.getValueName(), di_file, line_number, try o.lowerDebugType(decl.ty, .full), @@ -4224,7 +4313,7 @@ pub const DeclGen = struct { ); try o.di_map.put(o.gpa, dg.decl, di_global.getVariable().toNode()); - if (!is_internal_linkage or decl.isExtern(mod)) global.attachMetaData(di_global); + if (!is_internal_linkage or decl.isExtern(mod)) llvm_global.attachMetaData(di_global); } } } @@ -4618,7 +4707,7 @@ pub const FuncGen = struct { defer llvm_args.deinit(); const ret_ptr = if (!sret) null else blk: { - const llvm_ret_ty = try o.lowerType(return_type); + const llvm_ret_ty = try o.lowerLlvmType(return_type); const ret_ptr = self.buildAlloca(llvm_ret_ty, return_type.abiAlignment(mod)); try llvm_args.append(ret_ptr); break :blk ret_ptr; @@ -4637,7 +4726,7 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const llvm_param_ty = try o.lowerType(param_ty); + const llvm_param_ty = try o.lowerLlvmType(param_ty); if (isByRef(param_ty, mod)) { const alignment = param_ty.abiAlignment(mod); const load_inst = self.builder.buildLoad(llvm_param_ty, llvm_arg, ""); @@ -4668,7 +4757,7 @@ pub const FuncGen = struct { const llvm_arg = try self.resolveInst(arg); const alignment = param_ty.abiAlignment(mod); - const param_llvm_ty = try o.lowerType(param_ty); + const param_llvm_ty = try o.lowerLlvmType(param_ty); const arg_ptr = self.buildAlloca(param_llvm_ty, alignment); if (isByRef(param_ty, mod)) { const load_inst = self.builder.buildLoad(param_llvm_ty, llvm_arg, ""); @@ -4759,7 +4848,7 @@ pub const FuncGen = struct { llvm_arg = store_inst; } - const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty, mod).?); + const float_ty = try o.lowerLlvmType(aarch64_c_abi.getFloatArrayType(arg_ty, mod).?); const array_llvm_ty = float_ty.arrayType(count); const alignment = arg_ty.abiAlignment(mod); @@ -4788,7 +4877,7 @@ pub const FuncGen = struct { }; const call = self.builder.buildCall( - try o.lowerType(zig_fn_ty), + try o.lowerLlvmType(zig_fn_ty), llvm_fn, llvm_args.items.ptr, @as(c_uint, @intCast(llvm_args.items.len)), @@ -4813,7 +4902,7 @@ pub const FuncGen = struct { .byref => { const param_index = it.zig_index - 1; const param_ty = fn_info.param_types.get(ip)[param_index].toType(); - const param_llvm_ty = try o.lowerType(param_ty); + const param_llvm_ty = try o.lowerLlvmType(param_ty); const alignment = param_ty.abiAlignment(mod); o.addByRefParamAttrs(call, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); }, @@ -4862,7 +4951,7 @@ pub const FuncGen = struct { return null; } - const llvm_ret_ty = try o.lowerType(return_type); + const llvm_ret_ty = try o.lowerLlvmType(return_type); if (ret_ptr) |rp| { call.setCallSret(llvm_ret_ty); @@ -4939,8 +5028,8 @@ pub const FuncGen = struct { const fn_info = mod.typeToFunc(panic_decl.ty).?; const panic_global = try o.resolveLlvmFunction(panic_func.owner_decl); _ = fg.builder.buildCall( - try o.lowerType(panic_decl.ty), - panic_global, + try o.lowerLlvmType(panic_decl.ty), + panic_global.toLlvm(&o.builder), &args, args.len, toLlvmCallConv(fn_info.cc, target), @@ -4968,7 +5057,7 @@ pub const FuncGen = struct { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. - const err_int = try o.lowerType(Type.anyerror); + const err_int = try o.lowerLlvmType(Type.anyerror); _ = self.builder.buildRet(err_int.constInt(0, .False)); } else { _ = self.builder.buildRetVoid(); @@ -5016,7 +5105,7 @@ pub const FuncGen = struct { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. - const err_int = try o.lowerType(Type.anyerror); + const err_int = try o.lowerLlvmType(Type.anyerror); _ = self.builder.buildRet(err_int.constInt(0, .False)); } else { _ = self.builder.buildRetVoid(); @@ -5040,7 +5129,7 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const list = try self.resolveInst(ty_op.operand); const arg_ty = self.air.getRefType(ty_op.ty); - const llvm_arg_ty = try o.lowerType(arg_ty); + const llvm_arg_ty = try o.lowerLlvmType(arg_ty); return self.builder.buildVAArg(list, llvm_arg_ty, ""); } @@ -5050,7 +5139,7 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_list = try self.resolveInst(ty_op.operand); const va_list_ty = self.air.getRefType(ty_op.ty); - const llvm_va_list_ty = try o.lowerType(va_list_ty); + const llvm_va_list_ty = try o.lowerLlvmType(va_list_ty); const mod = o.module; const result_alignment = va_list_ty.abiAlignment(mod); @@ -5098,7 +5187,7 @@ pub const FuncGen = struct { const o = self.dg.object; const mod = o.module; const va_list_ty = self.typeOfIndex(inst); - const llvm_va_list_ty = try o.lowerType(va_list_ty); + const llvm_va_list_ty = try o.lowerLlvmType(va_list_ty); const result_alignment = va_list_ty.abiAlignment(mod); const list = self.buildAlloca(llvm_va_list_ty, result_alignment); @@ -5177,7 +5266,7 @@ pub const FuncGen = struct { // We need to emit instructions to check for equality/inequality // of optionals that are not pointers. const is_by_ref = isByRef(scalar_ty, mod); - const opt_llvm_ty = try o.lowerType(scalar_ty); + const opt_llvm_ty = try o.lowerLlvmType(scalar_ty); const lhs_non_null = self.optIsNonNull(opt_llvm_ty, lhs, is_by_ref); const rhs_non_null = self.optIsNonNull(opt_llvm_ty, rhs, is_by_ref); const llvm_i2 = self.context.intType(2); @@ -5287,7 +5376,7 @@ pub const FuncGen = struct { const is_body = inst_ty.zigTypeTag(mod) == .Fn; if (!is_body and !inst_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; - const raw_llvm_ty = try o.lowerType(inst_ty); + const raw_llvm_ty = try o.lowerLlvmType(inst_ty); const llvm_ty = ty: { // If the zig tag type is a function, this represents an actual function body; not @@ -5392,11 +5481,11 @@ pub const FuncGen = struct { const mod = o.module; const payload_ty = err_union_ty.errorUnionPayload(mod); const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod); - const err_union_llvm_ty = try o.lowerType(err_union_ty); + const err_union_llvm_ty = try o.lowerLlvmType(err_union_ty); if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { const is_err = err: { - const err_set_ty = try o.lowerType(Type.anyerror); + const err_set_ty = try o.lowerLlvmType(Type.anyerror); const zero = err_set_ty.constNull(); if (!payload_has_bits) { // TODO add alignment to this load @@ -5531,9 +5620,9 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.typeOf(ty_op.operand); const array_ty = operand_ty.childType(mod); - const llvm_usize = try o.lowerType(Type.usize); + const llvm_usize = try o.lowerLlvmType(Type.usize); const len = llvm_usize.constInt(array_ty.arrayLen(mod), .False); - const slice_llvm_ty = try o.lowerType(self.typeOfIndex(inst)); + const slice_llvm_ty = try o.lowerLlvmType(self.typeOfIndex(inst)); const operand = try self.resolveInst(ty_op.operand); if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) { const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), operand, 0, ""); @@ -5542,7 +5631,7 @@ pub const FuncGen = struct { const indices: [2]*llvm.Value = .{ llvm_usize.constNull(), llvm_usize.constNull(), }; - const array_llvm_ty = try o.lowerType(array_ty); + const array_llvm_ty = try o.lowerLlvmType(array_ty); const ptr = self.builder.buildInBoundsGEP(array_llvm_ty, operand, &indices, indices.len, ""); const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), ptr, 0, ""); return self.builder.buildInsertValue(partial, len, 1, ""); @@ -5559,7 +5648,7 @@ pub const FuncGen = struct { const dest_ty = self.typeOfIndex(inst); const dest_scalar_ty = dest_ty.scalarType(mod); - const dest_llvm_ty = try o.lowerType(dest_ty); + const dest_llvm_ty = try o.lowerLlvmType(dest_ty); const target = mod.getTarget(); if (intrinsicsAllowed(dest_scalar_ty, target)) { @@ -5600,7 +5689,7 @@ pub const FuncGen = struct { param_types = [1]*llvm.Type{v2i64}; } - const libc_fn = self.getLibcFunction(fn_name, ¶m_types, dest_llvm_ty); + const libc_fn = try self.getLibcFunction(fn_name, ¶m_types, dest_llvm_ty); const params = [1]*llvm.Value{extended}; return self.builder.buildCall(libc_fn.globalGetValueType(), libc_fn, ¶ms, params.len, .C, .Auto, ""); @@ -5620,7 +5709,7 @@ pub const FuncGen = struct { const dest_ty = self.typeOfIndex(inst); const dest_scalar_ty = dest_ty.scalarType(mod); - const dest_llvm_ty = try o.lowerType(dest_ty); + const dest_llvm_ty = try o.lowerLlvmType(dest_ty); if (intrinsicsAllowed(operand_scalar_ty, target)) { // TODO set fast math flag @@ -5652,9 +5741,9 @@ pub const FuncGen = struct { compiler_rt_dest_abbrev, }) catch unreachable; - const operand_llvm_ty = try o.lowerType(operand_ty); + const operand_llvm_ty = try o.lowerLlvmType(operand_ty); const param_types = [1]*llvm.Type{operand_llvm_ty}; - const libc_fn = self.getLibcFunction(fn_name, ¶m_types, libc_ret_ty); + const libc_fn = try self.getLibcFunction(fn_name, ¶m_types, libc_ret_ty); const params = [1]*llvm.Value{operand}; var result = self.builder.buildCall(libc_fn.globalGetValueType(), libc_fn, ¶ms, params.len, .C, .Auto, ""); @@ -5762,7 +5851,7 @@ pub const FuncGen = struct { const array_ty = self.typeOf(bin_op.lhs); const array_llvm_val = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const array_llvm_ty = try o.lowerType(array_ty); + const array_llvm_ty = try o.lowerLlvmType(array_ty); const elem_ty = array_ty.childType(mod); if (isByRef(array_ty, mod)) { const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs }; @@ -5773,7 +5862,7 @@ pub const FuncGen = struct { return self.loadByRef(elem_ptr, elem_ty, elem_ty.abiAlignment(mod), false); } else { - const elem_llvm_ty = try o.lowerType(elem_ty); + const elem_llvm_ty = try o.lowerLlvmType(elem_ty); if (Air.refToIndex(bin_op.lhs)) |lhs_index| { if (self.air.instructions.items(.tag)[lhs_index] == .load) { const load_data = self.air.instructions.items(.data)[lhs_index]; @@ -5898,7 +5987,7 @@ pub const FuncGen = struct { const containing_int = struct_llvm_val; const shift_amt = containing_int.typeOf().constInt(bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); - const elem_llvm_ty = try o.lowerType(field_ty); + const elem_llvm_ty = try o.lowerLlvmType(field_ty); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); const same_size_int = self.context.intType(elem_bits); @@ -5920,7 +6009,7 @@ pub const FuncGen = struct { .Union => { assert(struct_ty.containerLayout(mod) == .Packed); const containing_int = struct_llvm_val; - const elem_llvm_ty = try o.lowerType(field_ty); + const elem_llvm_ty = try o.lowerLlvmType(field_ty); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); const same_size_int = self.context.intType(elem_bits); @@ -5942,7 +6031,7 @@ pub const FuncGen = struct { .Struct => { assert(struct_ty.containerLayout(mod) != .Packed); const llvm_field = llvmField(struct_ty, field_index, mod).?; - const struct_llvm_ty = try o.lowerType(struct_ty); + const struct_llvm_ty = try o.lowerLlvmType(struct_ty); const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field.index, ""); const field_ptr_ty = try mod.ptrType(.{ .child = llvm_field.ty.toIntern(), @@ -5961,11 +6050,11 @@ pub const FuncGen = struct { } }, .Union => { - const union_llvm_ty = try o.lowerType(struct_ty); + const union_llvm_ty = try o.lowerLlvmType(struct_ty); const layout = struct_ty.unionGetLayout(mod); const payload_index = @intFromBool(layout.tag_align >= layout.payload_align); const field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_llvm_val, payload_index, ""); - const llvm_field_ty = try o.lowerType(field_ty); + const llvm_field_ty = try o.lowerLlvmType(field_ty); if (isByRef(field_ty, mod)) { if (canElideLoad(self, body_tail)) return field_ptr; @@ -5991,7 +6080,7 @@ pub const FuncGen = struct { const parent_ty = self.air.getRefType(ty_pl.ty).childType(mod); const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); - const res_ty = try o.lowerType(self.air.getRefType(ty_pl.ty)); + const res_ty = try o.lowerLlvmType(self.air.getRefType(ty_pl.ty)); if (field_offset == 0) { return field_ptr; } @@ -6273,7 +6362,7 @@ pub const FuncGen = struct { } } else { const ret_ty = self.typeOfIndex(inst); - llvm_ret_types[llvm_ret_i] = try o.lowerType(ret_ty); + llvm_ret_types[llvm_ret_i] = try o.lowerLlvmType(ret_ty); llvm_ret_i += 1; } @@ -6316,7 +6405,7 @@ pub const FuncGen = struct { llvm_param_types[llvm_param_i] = arg_llvm_value.typeOf(); } else { const alignment = arg_ty.abiAlignment(mod); - const arg_llvm_ty = try o.lowerType(arg_ty); + const arg_llvm_ty = try o.lowerLlvmType(arg_ty); const load_inst = self.builder.buildLoad(arg_llvm_ty, arg_llvm_value, ""); load_inst.setAlignment(alignment); llvm_param_values[llvm_param_i] = load_inst; @@ -6554,7 +6643,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(un_op); const operand_ty = self.typeOf(un_op); const optional_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; - const optional_llvm_ty = try o.lowerType(optional_ty); + const optional_llvm_ty = try o.lowerLlvmType(optional_ty); const payload_ty = optional_ty.optionalChild(mod); if (optional_ty.optionalReprIsPayload(mod)) { const loaded = if (operand_is_ptr) @@ -6563,7 +6652,7 @@ pub const FuncGen = struct { operand; if (payload_ty.isSlice(mod)) { const slice_ptr = self.builder.buildExtractValue(loaded, 0, ""); - const ptr_ty = try o.lowerType(payload_ty.slicePtrFieldType(mod)); + const ptr_ty = try o.lowerLlvmType(payload_ty.slicePtrFieldType(mod)); return self.builder.buildICmp(pred, slice_ptr, ptr_ty.constNull(), ""); } return self.builder.buildICmp(pred, loaded, optional_llvm_ty.constNull(), ""); @@ -6602,7 +6691,7 @@ pub const FuncGen = struct { const operand_ty = self.typeOf(un_op); const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; const payload_ty = err_union_ty.errorUnionPayload(mod); - const err_set_ty = try o.lowerType(Type.anyerror); + const err_set_ty = try o.lowerLlvmType(Type.anyerror); const zero = err_set_ty.constNull(); if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { @@ -6616,7 +6705,7 @@ pub const FuncGen = struct { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const loaded = if (operand_is_ptr) - self.builder.buildLoad(try o.lowerType(err_union_ty), operand, "") + self.builder.buildLoad(try o.lowerLlvmType(err_union_ty), operand, "") else operand; return self.builder.buildICmp(op, loaded, zero, ""); @@ -6625,7 +6714,7 @@ pub const FuncGen = struct { const err_field_index = errUnionErrorOffset(payload_ty, mod); if (operand_is_ptr or isByRef(err_union_ty, mod)) { - const err_union_llvm_ty = try o.lowerType(err_union_ty); + const err_union_llvm_ty = try o.lowerLlvmType(err_union_ty); const err_field_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, err_field_index, ""); const loaded = self.builder.buildLoad(err_set_ty, err_field_ptr, ""); return self.builder.buildICmp(op, loaded, zero, ""); @@ -6651,7 +6740,7 @@ pub const FuncGen = struct { // The payload and the optional are the same value. return operand; } - const optional_llvm_ty = try o.lowerType(optional_ty); + const optional_llvm_ty = try o.lowerLlvmType(optional_ty); return self.builder.buildStructGEP(optional_llvm_ty, operand, 0, ""); } @@ -6677,7 +6766,7 @@ pub const FuncGen = struct { } // First set the non-null bit. - const optional_llvm_ty = try o.lowerType(optional_ty); + const optional_llvm_ty = try o.lowerLlvmType(optional_ty); const non_null_ptr = self.builder.buildStructGEP(optional_llvm_ty, operand, 1, ""); // TODO set alignment on this store _ = self.builder.buildStore(non_null_bit, non_null_ptr); @@ -6704,7 +6793,7 @@ pub const FuncGen = struct { return operand; } - const opt_llvm_ty = try o.lowerType(optional_ty); + const opt_llvm_ty = try o.lowerLlvmType(optional_ty); const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false; return self.optPayloadHandle(opt_llvm_ty, operand, optional_ty, can_elide_load); } @@ -6728,7 +6817,7 @@ pub const FuncGen = struct { return if (operand_is_ptr) operand else null; } const offset = errUnionPayloadOffset(payload_ty, mod); - const err_union_llvm_ty = try o.lowerType(err_union_ty); + const err_union_llvm_ty = try o.lowerLlvmType(err_union_ty); if (operand_is_ptr) { return self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, ""); } else if (isByRef(err_union_ty, mod)) { @@ -6758,7 +6847,7 @@ pub const FuncGen = struct { const operand_ty = self.typeOf(ty_op.operand); const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { - const err_llvm_ty = try o.lowerType(Type.anyerror); + const err_llvm_ty = try o.lowerLlvmType(Type.anyerror); if (operand_is_ptr) { return operand; } else { @@ -6766,7 +6855,7 @@ pub const FuncGen = struct { } } - const err_set_llvm_ty = try o.lowerType(Type.anyerror); + const err_set_llvm_ty = try o.lowerLlvmType(Type.anyerror); const payload_ty = err_union_ty.errorUnionPayload(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -6777,7 +6866,7 @@ pub const FuncGen = struct { const offset = errUnionErrorOffset(payload_ty, mod); if (operand_is_ptr or isByRef(err_union_ty, mod)) { - const err_union_llvm_ty = try o.lowerType(err_union_ty); + const err_union_llvm_ty = try o.lowerLlvmType(err_union_ty); const err_field_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, ""); return self.builder.buildLoad(err_set_llvm_ty, err_field_ptr, ""); } @@ -6798,7 +6887,7 @@ pub const FuncGen = struct { _ = self.builder.buildStore(non_error_val, operand); return operand; } - const err_union_llvm_ty = try o.lowerType(err_union_ty); + const err_union_llvm_ty = try o.lowerLlvmType(err_union_ty); { const error_offset = errUnionErrorOffset(payload_ty, mod); // First set the non-error value. @@ -6834,7 +6923,7 @@ pub const FuncGen = struct { const mod = o.module; const llvm_field = llvmField(struct_ty, field_index, mod).?; - const struct_llvm_ty = try o.lowerType(struct_ty); + const struct_llvm_ty = try o.lowerLlvmType(struct_ty); const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field.index, ""); const field_ptr_ty = try mod.ptrType(.{ .child = llvm_field.ty.toIntern(), @@ -6858,7 +6947,7 @@ pub const FuncGen = struct { if (optional_ty.optionalReprIsPayload(mod)) { return operand; } - const llvm_optional_ty = try o.lowerType(optional_ty); + const llvm_optional_ty = try o.lowerLlvmType(optional_ty); if (isByRef(optional_ty, mod)) { const optional_ptr = self.buildAlloca(llvm_optional_ty, optional_ty.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 0, ""); @@ -6882,8 +6971,8 @@ pub const FuncGen = struct { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return operand; } - const ok_err_code = (try o.lowerType(Type.anyerror)).constNull(); - const err_un_llvm_ty = try o.lowerType(err_un_ty); + const ok_err_code = (try o.lowerLlvmType(Type.anyerror)).constNull(); + const err_un_llvm_ty = try o.lowerLlvmType(err_un_ty); const payload_offset = errUnionPayloadOffset(payload_ty, mod); const error_offset = errUnionErrorOffset(payload_ty, mod); @@ -6912,7 +7001,7 @@ pub const FuncGen = struct { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return operand; } - const err_un_llvm_ty = try o.lowerType(err_un_ty); + const err_un_llvm_ty = try o.lowerLlvmType(err_un_ty); const payload_offset = errUnionPayloadOffset(payload_ty, mod); const error_offset = errUnionErrorOffset(payload_ty, mod); @@ -6968,7 +7057,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(extra.rhs); const loaded_vector = blk: { - const elem_llvm_ty = try o.lowerType(vector_ptr_ty.childType(mod)); + const elem_llvm_ty = try o.lowerLlvmType(vector_ptr_ty.childType(mod)); const load_inst = self.builder.buildLoad(elem_llvm_ty, vector_ptr, ""); load_inst.setAlignment(vector_ptr_ty.ptrAlignment(mod)); load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr(mod))); @@ -7012,7 +7101,7 @@ pub const FuncGen = struct { const ptr = try self.resolveInst(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); const inst_ty = self.typeOfIndex(inst); - const llvm_slice_ty = try o.lowerType(inst_ty); + const llvm_slice_ty = try o.lowerLlvmType(inst_ty); // In case of slicing a global, the result type looks something like `{ i8*, i64 }` // but `ptr` is pointing to the global directly. @@ -7056,7 +7145,7 @@ pub const FuncGen = struct { true => signed_intrinsic, false => unsigned_intrinsic, }; - const llvm_inst_ty = try o.lowerType(inst_ty); + const llvm_inst_ty = try o.lowerLlvmType(inst_ty); const llvm_fn = fg.getIntrinsic(intrinsic_name, &.{llvm_inst_ty}); const result_struct = fg.builder.buildCall( llvm_fn.globalGetValueType(), @@ -7229,11 +7318,11 @@ pub const FuncGen = struct { return self.buildFloatOp(.floor, inst_ty, 1, .{result}); } if (scalar_ty.isSignedInt(mod)) { - const inst_llvm_ty = try o.lowerType(inst_ty); + const inst_llvm_ty = try o.lowerLlvmType(inst_ty); const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { const vec_len = inst_ty.vectorLen(mod); - const scalar_llvm_ty = try o.lowerType(scalar_ty); + const scalar_llvm_ty = try o.lowerLlvmType(scalar_ty); const shifts = try self.gpa.alloc(*llvm.Value, vec_len); defer self.gpa.free(shifts); @@ -7295,7 +7384,7 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.typeOfIndex(inst); - const inst_llvm_ty = try o.lowerType(inst_ty); + const inst_llvm_ty = try o.lowerLlvmType(inst_ty); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { @@ -7310,7 +7399,7 @@ pub const FuncGen = struct { const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { const vec_len = inst_ty.vectorLen(mod); - const scalar_llvm_ty = try o.lowerType(scalar_ty); + const scalar_llvm_ty = try o.lowerLlvmType(scalar_ty); const shifts = try self.gpa.alloc(*llvm.Value, vec_len); defer self.gpa.free(shifts); @@ -7408,8 +7497,8 @@ pub const FuncGen = struct { const intrinsic_name = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic; - const llvm_lhs_ty = try o.lowerType(lhs_ty); - const llvm_dest_ty = try o.lowerType(dest_ty); + const llvm_lhs_ty = try o.lowerLlvmType(lhs_ty); + const llvm_dest_ty = try o.lowerLlvmType(dest_ty); const llvm_fn = self.getIntrinsic(intrinsic_name, &.{llvm_lhs_ty}); const result_struct = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &[_]*llvm.Value{ lhs, rhs }, 2, .Fast, .Auto, ""); @@ -7472,15 +7561,30 @@ pub const FuncGen = struct { fn_name: [:0]const u8, param_types: []const *llvm.Type, return_type: *llvm.Type, - ) *llvm.Value { + ) Allocator.Error!*llvm.Value { const o = self.dg.object; return o.llvm_module.getNamedFunction(fn_name.ptr) orelse b: { const alias = o.llvm_module.getNamedGlobalAlias(fn_name.ptr, fn_name.len); break :b if (alias) |a| a.getAliasee() else null; } orelse b: { + const name = try o.builder.string(fn_name); + const params_len = @as(c_uint, @intCast(param_types.len)); const fn_type = llvm.functionType(return_type, param_types.ptr, params_len, .False); - const f = o.llvm_module.addFunction(fn_name, fn_type); + const f = o.llvm_module.addFunction(name.toSlice(&o.builder).?, fn_type); + + var global = Builder.Global{ + .type = .void, + .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) }, + }; + var function = Builder.Function{ + .global = @enumFromInt(o.builder.globals.count()), + }; + + try o.builder.llvm_globals.append(self.gpa, f); + _ = try o.builder.addGlobal(name, global); + try o.builder.functions.append(self.gpa, function); + break :b f; }; } @@ -7497,7 +7601,7 @@ pub const FuncGen = struct { const mod = o.module; const target = o.module.getTarget(); const scalar_ty = ty.scalarType(mod); - const scalar_llvm_ty = try o.lowerType(scalar_ty); + const scalar_llvm_ty = try o.lowerLlvmType(scalar_ty); if (intrinsicsAllowed(scalar_ty, target)) { const llvm_predicate: llvm.RealPredicate = switch (pred) { @@ -7528,7 +7632,7 @@ pub const FuncGen = struct { const param_types = [2]*llvm.Type{ scalar_llvm_ty, scalar_llvm_ty }; const llvm_i32 = self.context.intType(32); - const libc_fn = self.getLibcFunction(fn_name, param_types[0..], llvm_i32); + const libc_fn = try self.getLibcFunction(fn_name, param_types[0..], llvm_i32); const zero = llvm_i32.constInt(0, .False); const int_pred: llvm.IntPredicate = switch (pred) { @@ -7600,8 +7704,8 @@ pub const FuncGen = struct { const mod = o.module; const target = mod.getTarget(); const scalar_ty = ty.scalarType(mod); - const llvm_ty = try o.lowerType(ty); - const scalar_llvm_ty = try o.lowerType(scalar_ty); + const llvm_ty = try o.lowerLlvmType(ty); + const scalar_llvm_ty = try o.lowerLlvmType(scalar_ty); const intrinsics_allowed = op != .tan and intrinsicsAllowed(scalar_ty, target); var fn_name_buf: [64]u8 = undefined; @@ -7672,7 +7776,7 @@ pub const FuncGen = struct { .intrinsic => |fn_name| self.getIntrinsic(fn_name, &.{llvm_ty}), .libc => |fn_name| b: { const param_types = [3]*llvm.Type{ scalar_llvm_ty, scalar_llvm_ty, scalar_llvm_ty }; - const libc_fn = self.getLibcFunction(fn_name, param_types[0..params.len], scalar_llvm_ty); + const libc_fn = try self.getLibcFunction(fn_name, param_types[0..params.len], scalar_llvm_ty); if (ty.zigTypeTag(mod) == .Vector) { const result = llvm_ty.getUndef(); return self.buildElementwiseCall(libc_fn, ¶ms, result, ty.vectorLen(mod)); @@ -7711,10 +7815,10 @@ pub const FuncGen = struct { const rhs_scalar_ty = rhs_ty.scalarType(mod); const dest_ty = self.typeOfIndex(inst); - const llvm_dest_ty = try o.lowerType(dest_ty); + const llvm_dest_ty = try o.lowerLlvmType(dest_ty); const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) - self.builder.buildZExt(rhs, try o.lowerType(lhs_ty), "") + self.builder.buildZExt(rhs, try o.lowerLlvmType(lhs_ty), "") else rhs; @@ -7785,7 +7889,7 @@ pub const FuncGen = struct { const rhs_scalar_ty = rhs_ty.scalarType(mod); const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) - self.builder.buildZExt(rhs, try o.lowerType(lhs_ty), "") + self.builder.buildZExt(rhs, try o.lowerLlvmType(lhs_ty), "") else rhs; if (lhs_scalar_ty.isSignedInt(mod)) return self.builder.buildNSWShl(lhs, casted_rhs, ""); @@ -7806,7 +7910,7 @@ pub const FuncGen = struct { const rhs_scalar_ty = rhs_type.scalarType(mod); const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) - self.builder.buildZExt(rhs, try o.lowerType(lhs_type), "") + self.builder.buildZExt(rhs, try o.lowerLlvmType(lhs_type), "") else rhs; return self.builder.buildShl(lhs, casted_rhs, ""); @@ -7841,7 +7945,7 @@ pub const FuncGen = struct { // poison value." // However Zig semantics says that saturating shift left can never produce // undefined; instead it saturates. - const lhs_scalar_llvm_ty = try o.lowerType(lhs_scalar_ty); + const lhs_scalar_llvm_ty = try o.lowerLlvmType(lhs_scalar_ty); const bits = lhs_scalar_llvm_ty.constInt(lhs_bits, .False); const lhs_max = lhs_scalar_llvm_ty.constAllOnes(); if (rhs_ty.zigTypeTag(mod) == .Vector) { @@ -7870,7 +7974,7 @@ pub const FuncGen = struct { const rhs_scalar_ty = rhs_ty.scalarType(mod); const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) - self.builder.buildZExt(rhs, try o.lowerType(lhs_ty), "") + self.builder.buildZExt(rhs, try o.lowerLlvmType(lhs_ty), "") else rhs; const is_signed_int = lhs_scalar_ty.isSignedInt(mod); @@ -7896,7 +8000,7 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const dest_ty = self.typeOfIndex(inst); const dest_info = dest_ty.intInfo(mod); - const dest_llvm_ty = try o.lowerType(dest_ty); + const dest_llvm_ty = try o.lowerLlvmType(dest_ty); const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); const operand_info = operand_ty.intInfo(mod); @@ -7917,7 +8021,7 @@ pub const FuncGen = struct { const o = self.dg.object; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const dest_llvm_ty = try o.lowerType(self.typeOfIndex(inst)); + const dest_llvm_ty = try o.lowerLlvmType(self.typeOfIndex(inst)); return self.builder.buildTrunc(operand, dest_llvm_ty, ""); } @@ -7933,11 +8037,11 @@ pub const FuncGen = struct { const src_bits = operand_ty.floatBits(target); if (intrinsicsAllowed(dest_ty, target) and intrinsicsAllowed(operand_ty, target)) { - const dest_llvm_ty = try o.lowerType(dest_ty); + const dest_llvm_ty = try o.lowerLlvmType(dest_ty); return self.builder.buildFPTrunc(operand, dest_llvm_ty, ""); } else { - const operand_llvm_ty = try o.lowerType(operand_ty); - const dest_llvm_ty = try o.lowerType(dest_ty); + const operand_llvm_ty = try o.lowerLlvmType(operand_ty); + const dest_llvm_ty = try o.lowerLlvmType(dest_ty); var fn_name_buf: [64]u8 = undefined; const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__trunc{s}f{s}f2", .{ @@ -7946,7 +8050,7 @@ pub const FuncGen = struct { const params = [1]*llvm.Value{operand}; const param_types = [1]*llvm.Type{operand_llvm_ty}; - const llvm_fn = self.getLibcFunction(fn_name, ¶m_types, dest_llvm_ty); + const llvm_fn = try self.getLibcFunction(fn_name, ¶m_types, dest_llvm_ty); return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .C, .Auto, ""); } @@ -7964,11 +8068,11 @@ pub const FuncGen = struct { const src_bits = operand_ty.floatBits(target); if (intrinsicsAllowed(dest_ty, target) and intrinsicsAllowed(operand_ty, target)) { - const dest_llvm_ty = try o.lowerType(dest_ty); + const dest_llvm_ty = try o.lowerLlvmType(dest_ty); return self.builder.buildFPExt(operand, dest_llvm_ty, ""); } else { - const operand_llvm_ty = try o.lowerType(operand_ty); - const dest_llvm_ty = try o.lowerType(dest_ty); + const operand_llvm_ty = try o.lowerLlvmType(operand_ty); + const dest_llvm_ty = try o.lowerLlvmType(dest_ty); var fn_name_buf: [64]u8 = undefined; const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__extend{s}f{s}f2", .{ @@ -7977,7 +8081,7 @@ pub const FuncGen = struct { const params = [1]*llvm.Value{operand}; const param_types = [1]*llvm.Type{operand_llvm_ty}; - const llvm_fn = self.getLibcFunction(fn_name, ¶m_types, dest_llvm_ty); + const llvm_fn = try self.getLibcFunction(fn_name, ¶m_types, dest_llvm_ty); return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .C, .Auto, ""); } @@ -7989,7 +8093,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); const operand_ptr = self.sliceOrArrayPtr(operand, ptr_ty); - const dest_llvm_ty = try o.lowerType(self.typeOfIndex(inst)); + const dest_llvm_ty = try o.lowerLlvmType(self.typeOfIndex(inst)); return self.builder.buildPtrToInt(operand_ptr, dest_llvm_ty, ""); } @@ -8006,7 +8110,7 @@ pub const FuncGen = struct { const mod = o.module; const operand_is_ref = isByRef(operand_ty, mod); const result_is_ref = isByRef(inst_ty, mod); - const llvm_dest_ty = try o.lowerType(inst_ty); + const llvm_dest_ty = try o.lowerLlvmType(inst_ty); if (operand_is_ref and result_is_ref) { // They are both pointers, so just return the same opaque pointer :) @@ -8036,7 +8140,7 @@ pub const FuncGen = struct { } else { // If the ABI size of the element type is not evenly divisible by size in bits; // a simple bitcast will not work, and we fall back to extractelement. - const llvm_usize = try o.lowerType(Type.usize); + const llvm_usize = try o.lowerLlvmType(Type.usize); const llvm_u32 = self.context.intType(32); const zero = llvm_usize.constNull(); const vector_len = operand_ty.arrayLen(mod); @@ -8053,7 +8157,7 @@ pub const FuncGen = struct { return array_ptr; } else if (operand_ty.zigTypeTag(mod) == .Array and inst_ty.zigTypeTag(mod) == .Vector) { const elem_ty = operand_ty.childType(mod); - const llvm_vector_ty = try o.lowerType(inst_ty); + const llvm_vector_ty = try o.lowerLlvmType(inst_ty); if (!operand_is_ref) { return self.dg.todo("implement bitcast non-ref array to vector", .{}); } @@ -8068,9 +8172,9 @@ pub const FuncGen = struct { } else { // If the ABI size of the element type is not evenly divisible by size in bits; // a simple bitcast will not work, and we fall back to extractelement. - const array_llvm_ty = try o.lowerType(operand_ty); - const elem_llvm_ty = try o.lowerType(elem_ty); - const llvm_usize = try o.lowerType(Type.usize); + const array_llvm_ty = try o.lowerLlvmType(operand_ty); + const elem_llvm_ty = try o.lowerLlvmType(elem_ty); + const llvm_usize = try o.lowerLlvmType(Type.usize); const llvm_u32 = self.context.intType(32); const zero = llvm_usize.constNull(); const vector_len = operand_ty.arrayLen(mod); @@ -8179,7 +8283,7 @@ pub const FuncGen = struct { if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return o.lowerPtrToVoid(ptr_ty); - const pointee_llvm_ty = try o.lowerType(pointee_type); + const pointee_llvm_ty = try o.lowerLlvmType(pointee_type); const alignment = ptr_ty.ptrAlignment(mod); return self.buildAlloca(pointee_llvm_ty, alignment); } @@ -8191,7 +8295,7 @@ pub const FuncGen = struct { const ret_ty = ptr_ty.childType(mod); if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return o.lowerPtrToVoid(ptr_ty); if (self.ret_ptr) |ret_ptr| return ret_ptr; - const ret_llvm_ty = try o.lowerType(ret_ty); + const ret_llvm_ty = try o.lowerLlvmType(ret_ty); return self.buildAlloca(ret_llvm_ty, ptr_ty.ptrAlignment(mod)); } @@ -8223,7 +8327,7 @@ pub const FuncGen = struct { else u8_llvm_ty.getUndef(); const operand_size = operand_ty.abiSize(mod); - const usize_llvm_ty = try o.lowerType(Type.usize); + const usize_llvm_ty = try o.lowerLlvmType(Type.usize); const len = usize_llvm_ty.constInt(operand_size, .False); const dest_ptr_align = ptr_ty.ptrAlignment(mod); _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr(mod)); @@ -8296,7 +8400,7 @@ pub const FuncGen = struct { _ = inst; const o = self.dg.object; const mod = o.module; - const llvm_usize = try o.lowerType(Type.usize); + const llvm_usize = try o.lowerLlvmType(Type.usize); const target = mod.getTarget(); if (!target_util.supportsReturnAddress(target)) { // https://github.com/ziglang/zig/issues/11946 @@ -8324,7 +8428,7 @@ pub const FuncGen = struct { const params = [_]*llvm.Value{llvm_i32.constNull()}; const ptr_val = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .Fast, .Auto, ""); - const llvm_usize = try o.lowerType(Type.usize); + const llvm_usize = try o.lowerLlvmType(Type.usize); return self.builder.buildPtrToInt(ptr_val, llvm_usize, ""); } @@ -8370,7 +8474,7 @@ pub const FuncGen = struct { var payload = self.builder.buildExtractValue(result, 0, ""); if (opt_abi_ty != null) { - payload = self.builder.buildTrunc(payload, try o.lowerType(operand_ty), ""); + payload = self.builder.buildTrunc(payload, try o.lowerLlvmType(operand_ty), ""); } const success_bit = self.builder.buildExtractValue(result, 1, ""); @@ -8415,7 +8519,7 @@ pub const FuncGen = struct { ordering, single_threaded, ); - const operand_llvm_ty = try o.lowerType(operand_ty); + const operand_llvm_ty = try o.lowerLlvmType(operand_ty); if (is_float) { return self.builder.buildBitCast(uncasted_result, operand_llvm_ty, ""); } else { @@ -8428,7 +8532,7 @@ pub const FuncGen = struct { } // It's a pointer but we need to treat it as an int. - const usize_llvm_ty = try o.lowerType(Type.usize); + const usize_llvm_ty = try o.lowerLlvmType(Type.usize); const casted_operand = self.builder.buildPtrToInt(operand, usize_llvm_ty, ""); const uncasted_result = self.builder.buildAtomicRmw( op, @@ -8437,7 +8541,7 @@ pub const FuncGen = struct { ordering, single_threaded, ); - const operand_llvm_ty = try o.lowerType(operand_ty); + const operand_llvm_ty = try o.lowerLlvmType(operand_ty); return self.builder.buildIntToPtr(uncasted_result, operand_llvm_ty, ""); } @@ -8456,7 +8560,7 @@ pub const FuncGen = struct { const ptr_alignment = @as(u32, @intCast(ptr_info.flags.alignment.toByteUnitsOptional() orelse ptr_info.child.toType().abiAlignment(mod))); const ptr_volatile = llvm.Bool.fromBool(ptr_info.flags.is_volatile); - const elem_llvm_ty = try o.lowerType(elem_ty); + const elem_llvm_ty = try o.lowerLlvmType(elem_ty); if (opt_abi_llvm_ty) |abi_llvm_ty| { // operand needs widening and truncating @@ -8606,7 +8710,7 @@ pub const FuncGen = struct { .One => llvm_usize_ty.constInt(ptr_ty.childType(mod).arrayLen(mod), .False), .Many, .C => unreachable, }; - const elem_llvm_ty = try o.lowerType(elem_ty); + const elem_llvm_ty = try o.lowerLlvmType(elem_ty); const len_gep = [_]*llvm.Value{len}; const end_ptr = self.builder.buildInBoundsGEP(elem_llvm_ty, dest_ptr, &len_gep, len_gep.len, ""); _ = self.builder.buildBr(loop_block); @@ -8731,7 +8835,7 @@ pub const FuncGen = struct { _ = self.builder.buildStore(new_tag, union_ptr); return null; } - const un_llvm_ty = try o.lowerType(un_ty); + const un_llvm_ty = try o.lowerLlvmType(un_ty); const tag_index = @intFromBool(layout.tag_align < layout.payload_align); const tag_field_ptr = self.builder.buildStructGEP(un_llvm_ty, union_ptr, tag_index, ""); // TODO alignment on this store @@ -8748,7 +8852,7 @@ pub const FuncGen = struct { if (layout.tag_size == 0) return null; const union_handle = try self.resolveInst(ty_op.operand); if (isByRef(un_ty, mod)) { - const llvm_un_ty = try o.lowerType(un_ty); + const llvm_un_ty = try o.lowerLlvmType(un_ty); if (layout.payload_size == 0) { return self.builder.buildLoad(llvm_un_ty, union_handle, ""); } @@ -8790,13 +8894,13 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const llvm_i1 = self.context.intType(1); - const operand_llvm_ty = try o.lowerType(operand_ty); + const operand_llvm_ty = try o.lowerLlvmType(operand_ty); const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); const params = [_]*llvm.Value{ operand, llvm_i1.constNull() }; const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); const result_ty = self.typeOfIndex(inst); - const result_llvm_ty = try o.lowerType(result_ty); + const result_llvm_ty = try o.lowerLlvmType(result_ty); const bits = operand_ty.intInfo(mod).bits; const result_bits = result_ty.intInfo(mod).bits; @@ -8817,12 +8921,12 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const params = [_]*llvm.Value{operand}; - const operand_llvm_ty = try o.lowerType(operand_ty); + const operand_llvm_ty = try o.lowerLlvmType(operand_ty); const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); const result_ty = self.typeOfIndex(inst); - const result_llvm_ty = try o.lowerType(result_ty); + const result_llvm_ty = try o.lowerLlvmType(result_ty); const bits = operand_ty.intInfo(mod).bits; const result_bits = result_ty.intInfo(mod).bits; @@ -8844,7 +8948,7 @@ pub const FuncGen = struct { assert(bits % 8 == 0); var operand = try self.resolveInst(ty_op.operand); - var operand_llvm_ty = try o.lowerType(operand_ty); + var operand_llvm_ty = try o.lowerLlvmType(operand_ty); if (bits % 16 == 8) { // If not an even byte-multiple, we need zero-extend + shift-left 1 byte @@ -8878,7 +8982,7 @@ pub const FuncGen = struct { const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); const result_ty = self.typeOfIndex(inst); - const result_llvm_ty = try o.lowerType(result_ty); + const result_llvm_ty = try o.lowerLlvmType(result_ty); const result_bits = result_ty.intInfo(mod).bits; if (bits > result_bits) { return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); @@ -8957,9 +9061,9 @@ pub const FuncGen = struct { const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{}", .{fqn.fmt(&mod.intern_pool)}); - const param_types = [_]*llvm.Type{try o.lowerType(enum_type.tag_ty.toType())}; + const param_types = [_]*llvm.Type{try o.lowerLlvmType(enum_type.tag_ty.toType())}; - const llvm_ret_ty = try o.lowerType(Type.bool); + const llvm_ret_ty = try o.lowerLlvmType(Type.bool); const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False); const fn_val = o.llvm_module.addFunction(llvm_fn_name, fn_type); fn_val.setLinkage(.Internal); @@ -9020,29 +9124,32 @@ pub const FuncGen = struct { // TODO: detect when the type changes and re-emit this function. const gop = try o.decl_map.getOrPut(o.gpa, enum_type.decl); - if (gop.found_existing) return gop.value_ptr.*; + if (gop.found_existing) return gop.value_ptr.toLlvm(&o.builder); errdefer assert(o.decl_map.remove(enum_type.decl)); - var arena_allocator = std.heap.ArenaAllocator.init(self.gpa); - defer arena_allocator.deinit(); - const arena = arena_allocator.allocator(); - const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod); - const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{}", .{fqn.fmt(&mod.intern_pool)}); + const llvm_fn_name = try o.builder.fmt("__zig_tag_name_{}", .{fqn.fmt(&mod.intern_pool)}); const slice_ty = Type.slice_const_u8_sentinel_0; - const llvm_ret_ty = try o.lowerType(slice_ty); - const usize_llvm_ty = try o.lowerType(Type.usize); + const llvm_ret_ty = try o.lowerLlvmType(slice_ty); + const usize_llvm_ty = try o.lowerLlvmType(Type.usize); const slice_alignment = slice_ty.abiAlignment(mod); - const param_types = [_]*llvm.Type{try o.lowerType(enum_type.tag_ty.toType())}; + const param_types = [_]*llvm.Type{try o.lowerLlvmType(enum_type.tag_ty.toType())}; const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False); - const fn_val = o.llvm_module.addFunction(llvm_fn_name, fn_type); + const fn_val = o.llvm_module.addFunction(llvm_fn_name.toSlice(&o.builder).?, fn_type); fn_val.setLinkage(.Internal); fn_val.setFunctionCallConv(.Fast); o.addCommonFnAttributes(fn_val); - gop.value_ptr.* = fn_val; + + var global = Builder.Global{ + .type = .void, + .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) }, + }; + var function = Builder.Function{ + .global = @enumFromInt(o.builder.globals.count()), + }; const prev_block = self.builder.getInsertBlock(); const prev_debug_location = self.builder.getCurrentDebugLocation2(); @@ -9104,6 +9211,10 @@ pub const FuncGen = struct { self.builder.positionBuilderAtEnd(bad_value_block); _ = self.builder.buildUnreachable(); + + try o.builder.llvm_globals.append(self.gpa, fn_val); + gop.value_ptr.* = try o.builder.addGlobal(llvm_fn_name, global); + try o.builder.functions.append(self.gpa, function); return fn_val; } @@ -9116,8 +9227,8 @@ pub const FuncGen = struct { // Function signature: fn (anyerror) bool - const ret_llvm_ty = try o.lowerType(Type.bool); - const anyerror_llvm_ty = try o.lowerType(Type.anyerror); + const ret_llvm_ty = try o.lowerLlvmType(Type.bool); + const anyerror_llvm_ty = try o.lowerLlvmType(Type.anyerror); const param_types = [_]*llvm.Type{anyerror_llvm_ty}; const fn_type = llvm.functionType(ret_llvm_ty, ¶m_types, param_types.len, .False); @@ -9133,7 +9244,7 @@ pub const FuncGen = struct { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const slice_ty = self.typeOfIndex(inst); - const slice_llvm_ty = try o.lowerType(slice_ty); + const slice_llvm_ty = try o.lowerLlvmType(slice_ty); const error_name_table_ptr = try self.getErrorNameTable(); const ptr_slice_llvm_ty = self.context.pointerType(0); @@ -9219,7 +9330,7 @@ pub const FuncGen = struct { accum_init: *llvm.Value, ) !*llvm.Value { const o = self.dg.object; - const llvm_usize_ty = try o.lowerType(Type.usize); + const llvm_usize_ty = try o.lowerLlvmType(Type.usize); const llvm_vector_len = llvm_usize_ty.constInt(vector_len, .False); const llvm_result_ty = accum_init.typeOf(); @@ -9296,7 +9407,7 @@ pub const FuncGen = struct { .Add => switch (scalar_ty.zigTypeTag(mod)) { .Int => return self.builder.buildAddReduce(operand), .Float => if (intrinsicsAllowed(scalar_ty, target)) { - const scalar_llvm_ty = try o.lowerType(scalar_ty); + const scalar_llvm_ty = try o.lowerLlvmType(scalar_ty); const neutral_value = scalar_llvm_ty.constReal(-0.0); return self.builder.buildFPAddReduce(neutral_value, operand); }, @@ -9305,7 +9416,7 @@ pub const FuncGen = struct { .Mul => switch (scalar_ty.zigTypeTag(mod)) { .Int => return self.builder.buildMulReduce(operand), .Float => if (intrinsicsAllowed(scalar_ty, target)) { - const scalar_llvm_ty = try o.lowerType(scalar_ty); + const scalar_llvm_ty = try o.lowerLlvmType(scalar_ty); const neutral_value = scalar_llvm_ty.constReal(1.0); return self.builder.buildFPMulReduce(neutral_value, operand); }, @@ -9333,9 +9444,9 @@ pub const FuncGen = struct { else => unreachable, }; - const param_llvm_ty = try o.lowerType(scalar_ty); + const param_llvm_ty = try o.lowerLlvmType(scalar_ty); const param_types = [2]*llvm.Type{ param_llvm_ty, param_llvm_ty }; - const libc_fn = self.getLibcFunction(fn_name, ¶m_types, param_llvm_ty); + const libc_fn = try self.getLibcFunction(fn_name, ¶m_types, param_llvm_ty); const init_value = try o.lowerValue(.{ .ty = scalar_ty, .val = try mod.floatValue(scalar_ty, switch (reduce.operation) { @@ -9356,7 +9467,7 @@ pub const FuncGen = struct { const result_ty = self.typeOfIndex(inst); const len = @as(usize, @intCast(result_ty.arrayLen(mod))); const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len])); - const llvm_result_ty = try o.lowerType(result_ty); + const llvm_result_ty = try o.lowerLlvmType(result_ty); switch (result_ty.zigTypeTag(mod)) { .Vector => { @@ -9444,7 +9555,7 @@ pub const FuncGen = struct { .Array => { assert(isByRef(result_ty, mod)); - const llvm_usize = try o.lowerType(Type.usize); + const llvm_usize = try o.lowerLlvmType(Type.usize); const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); const array_info = result_ty.arrayInfo(mod); @@ -9487,7 +9598,7 @@ pub const FuncGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; const union_ty = self.typeOfIndex(inst); - const union_llvm_ty = try o.lowerType(union_ty); + const union_llvm_ty = try o.lowerLlvmType(union_ty); const layout = union_ty.unionGetLayout(mod); const union_obj = mod.typeToUnion(union_ty).?; @@ -9529,7 +9640,7 @@ pub const FuncGen = struct { const llvm_payload = try self.resolveInst(extra.init); assert(union_obj.haveFieldTypes()); const field = union_obj.fields.values()[extra.field_index]; - const field_llvm_ty = try o.lowerType(field.ty); + const field_llvm_ty = try o.lowerLlvmType(field.ty); const field_size = field.ty.abiSize(mod); const field_align = field.normalAlignment(mod); @@ -9552,7 +9663,7 @@ pub const FuncGen = struct { const fields: [1]*llvm.Type = .{payload}; break :t self.context.structType(&fields, fields.len, .False); } - const tag_llvm_ty = try o.lowerType(union_obj.tag_ty); + const tag_llvm_ty = try o.lowerLlvmType(union_obj.tag_ty); var fields: [3]*llvm.Type = undefined; var fields_len: c_uint = 2; if (layout.tag_align >= layout.payload_align) { @@ -9605,7 +9716,7 @@ pub const FuncGen = struct { index_type.constInt(@intFromBool(layout.tag_align < layout.payload_align), .False), }; const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, result_ptr, &indices, indices.len, ""); - const tag_llvm_ty = try o.lowerType(union_obj.tag_ty); + const tag_llvm_ty = try o.lowerLlvmType(union_obj.tag_ty); const llvm_tag = tag_llvm_ty.constInt(tag_int, .False); const store_inst = self.builder.buildStore(llvm_tag, field_ptr); store_inst.setAlignment(union_obj.tag_ty.abiAlignment(mod)); @@ -9687,7 +9798,7 @@ pub const FuncGen = struct { const inst_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); - const llvm_dest_ty = try o.lowerType(inst_ty); + const llvm_dest_ty = try o.lowerLlvmType(inst_ty); return self.builder.buildAddrSpaceCast(operand, llvm_dest_ty, ""); } @@ -9821,7 +9932,7 @@ pub const FuncGen = struct { return fg.loadByRef(payload_ptr, payload_ty, payload_alignment, false); } - const payload_llvm_ty = try o.lowerType(payload_ty); + const payload_llvm_ty = try o.lowerLlvmType(payload_ty); const load_inst = fg.builder.buildLoad(payload_llvm_ty, payload_ptr, ""); load_inst.setAlignment(payload_alignment); return load_inst; @@ -9838,7 +9949,7 @@ pub const FuncGen = struct { non_null_bit: *llvm.Value, ) !?*llvm.Value { const o = self.dg.object; - const optional_llvm_ty = try o.lowerType(optional_ty); + const optional_llvm_ty = try o.lowerLlvmType(optional_ty); const non_null_field = self.builder.buildZExt(non_null_bit, self.context.intType(8), ""); const mod = o.module; @@ -9893,7 +10004,7 @@ pub const FuncGen = struct { const byte_offset = struct_ty.packedStructFieldByteOffset(field_index, mod); if (byte_offset == 0) return struct_ptr; const byte_llvm_ty = self.context.intType(8); - const llvm_usize = try o.lowerType(Type.usize); + const llvm_usize = try o.lowerLlvmType(Type.usize); const llvm_index = llvm_usize.constInt(byte_offset, .False); const indices: [1]*llvm.Value = .{llvm_index}; return self.builder.buildInBoundsGEP(byte_llvm_ty, struct_ptr, &indices, indices.len, ""); @@ -9919,7 +10030,7 @@ pub const FuncGen = struct { const layout = struct_ty.unionGetLayout(mod); if (layout.payload_size == 0 or struct_ty.containerLayout(mod) == .Packed) return struct_ptr; const payload_index = @intFromBool(layout.tag_align >= layout.payload_align); - const union_llvm_ty = try o.lowerType(struct_ty); + const union_llvm_ty = try o.lowerLlvmType(struct_ty); const union_field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_ptr, payload_index, ""); return union_field_ptr; }, @@ -9944,7 +10055,7 @@ pub const FuncGen = struct { ) !*llvm.Value { const o = fg.dg.object; const mod = o.module; - const pointee_llvm_ty = try o.lowerType(pointee_type); + const pointee_llvm_ty = try o.lowerLlvmType(pointee_type); const result_align = @max(ptr_alignment, pointee_type.abiAlignment(mod)); const result_ptr = fg.buildAlloca(pointee_llvm_ty, result_align); const llvm_usize = fg.context.intType(Type.usize.intInfo(mod).bits); @@ -9977,7 +10088,7 @@ pub const FuncGen = struct { assert(info.flags.vector_index != .runtime); if (info.flags.vector_index != .none) { const index_u32 = self.context.intType(32).constInt(@intFromEnum(info.flags.vector_index), .False); - const vec_elem_ty = try o.lowerType(elem_ty); + const vec_elem_ty = try o.lowerLlvmType(elem_ty); const vec_ty = vec_elem_ty.vectorType(info.packed_offset.host_size); const loaded_vector = self.builder.buildLoad(vec_ty, ptr, ""); @@ -9991,7 +10102,7 @@ pub const FuncGen = struct { if (isByRef(elem_ty, mod)) { return self.loadByRef(ptr, elem_ty, ptr_alignment, info.flags.is_volatile); } - const elem_llvm_ty = try o.lowerType(elem_ty); + const elem_llvm_ty = try o.lowerLlvmType(elem_ty); const llvm_inst = self.builder.buildLoad(elem_llvm_ty, ptr, ""); llvm_inst.setAlignment(ptr_alignment); llvm_inst.setVolatile(ptr_volatile); @@ -10006,7 +10117,7 @@ pub const FuncGen = struct { const elem_bits = @as(c_uint, @intCast(ptr_ty.childType(mod).bitSize(mod))); const shift_amt = containing_int.typeOf().constInt(info.packed_offset.bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); - const elem_llvm_ty = try o.lowerType(elem_ty); + const elem_llvm_ty = try o.lowerLlvmType(elem_ty); if (isByRef(elem_ty, mod)) { const result_align = elem_ty.abiAlignment(mod); @@ -10054,7 +10165,7 @@ pub const FuncGen = struct { assert(info.flags.vector_index != .runtime); if (info.flags.vector_index != .none) { const index_u32 = self.context.intType(32).constInt(@intFromEnum(info.flags.vector_index), .False); - const vec_elem_ty = try o.lowerType(elem_ty); + const vec_elem_ty = try o.lowerLlvmType(elem_ty); const vec_ty = vec_elem_ty.vectorType(info.packed_offset.host_size); const loaded_vector = self.builder.buildLoad(vec_ty, ptr, ""); @@ -10702,7 +10813,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { // anyerror return type instead, so that it can be coerced into a function // pointer type which has anyerror as the return type. if (return_type.isError(mod)) { - return o.lowerType(Type.anyerror); + return o.lowerLlvmType(Type.anyerror); } else { return o.context.voidType(); } @@ -10713,19 +10824,19 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { if (isByRef(return_type, mod)) { return o.context.voidType(); } else { - return o.lowerType(return_type); + return o.lowerLlvmType(return_type); } }, .C => { switch (target.cpu.arch) { - .mips, .mipsel => return o.lowerType(return_type), + .mips, .mipsel => return o.lowerLlvmType(return_type), .x86_64 => switch (target.os.tag) { .windows => return lowerWin64FnRetTy(o, fn_info), else => return lowerSystemVFnRetTy(o, fn_info), }, .wasm32 => { if (isScalar(mod, return_type)) { - return o.lowerType(return_type); + return o.lowerLlvmType(return_type); } const classes = wasm_c_abi.classifyType(return_type, mod); if (classes[0] == .indirect or classes[0] == .none) { @@ -10740,8 +10851,8 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { .aarch64, .aarch64_be => { switch (aarch64_c_abi.classifyType(return_type, mod)) { .memory => return o.context.voidType(), - .float_array => return o.lowerType(return_type), - .byval => return o.lowerType(return_type), + .float_array => return o.lowerLlvmType(return_type), + .byval => return o.lowerLlvmType(return_type), .integer => { const bit_size = return_type.bitSize(mod); return o.context.intType(@as(c_uint, @intCast(bit_size))); @@ -10757,7 +10868,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { } else { return o.context.voidType(); }, - .byval => return o.lowerType(return_type), + .byval => return o.lowerLlvmType(return_type), } }, .riscv32, .riscv64 => { @@ -10774,23 +10885,23 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { }; return o.context.structType(&llvm_types_buffer, 2, .False); }, - .byval => return o.lowerType(return_type), + .byval => return o.lowerLlvmType(return_type), } }, // TODO investigate C ABI for other architectures - else => return o.lowerType(return_type), + else => return o.lowerLlvmType(return_type), } }, .Win64 => return lowerWin64FnRetTy(o, fn_info), .SysV => return lowerSystemVFnRetTy(o, fn_info), .Stdcall => { if (isScalar(mod, return_type)) { - return o.lowerType(return_type); + return o.lowerLlvmType(return_type); } else { return o.context.voidType(); } }, - else => return o.lowerType(return_type), + else => return o.lowerLlvmType(return_type), } } @@ -10800,7 +10911,7 @@ fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { switch (x86_64_abi.classifyWindows(return_type, mod)) { .integer => { if (isScalar(mod, return_type)) { - return o.lowerType(return_type); + return o.lowerLlvmType(return_type); } else { const abi_size = return_type.abiSize(mod); return o.context.intType(@as(c_uint, @intCast(abi_size * 8))); @@ -10808,7 +10919,7 @@ fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { }, .win_i128 => return o.context.intType(64).vectorType(2), .memory => return o.context.voidType(), - .sse => return o.lowerType(return_type), + .sse => return o.lowerLlvmType(return_type), else => unreachable, } } @@ -10817,7 +10928,7 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type const mod = o.module; const return_type = fn_info.return_type.toType(); if (isScalar(mod, return_type)) { - return o.lowerType(return_type); + return o.lowerLlvmType(return_type); } const classes = x86_64_abi.classifySystemV(return_type, mod, .ret); if (classes[0] == .memory) { @@ -10847,7 +10958,7 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type if (llvm_types_index != 0 or classes[2] != .none) { return o.context.voidType(); } - llvm_types_buffer[llvm_types_index] = o.context.x86FP80Type(); + llvm_types_buffer[llvm_types_index] = o.context.x86_fp80Type(); llvm_types_index += 1; }, .x87up => continue, diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig new file mode 100644 index 0000000000..2aff03a042 --- /dev/null +++ b/src/codegen/llvm/Builder.zig @@ -0,0 +1,845 @@ +gpa: Allocator, +use_lib_llvm: bool, + +llvm_context: *llvm.Context, +llvm_module: *llvm.Module, +di_builder: ?*llvm.DIBuilder = null, +llvm_types: std.ArrayListUnmanaged(*llvm.Type) = .{}, +llvm_globals: std.ArrayListUnmanaged(*llvm.Value) = .{}, + +source_filename: String = .none, +data_layout: String = .none, +target_triple: String = .none, + +string_map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, +string_bytes: std.ArrayListUnmanaged(u8) = .{}, +string_indices: std.ArrayListUnmanaged(u32) = .{}, + +types: std.AutoArrayHashMapUnmanaged(String, Type) = .{}, +next_unnamed_type: String = @enumFromInt(0), +type_map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, +type_data: std.ArrayListUnmanaged(Type.Data) = .{}, +type_extra: std.ArrayListUnmanaged(u32) = .{}, + +globals: std.AutoArrayHashMapUnmanaged(String, Global) = .{}, +next_unnamed_global: String = @enumFromInt(0), +next_unique_global_id: std.AutoHashMapUnmanaged(String, u32) = .{}, +aliases: std.ArrayListUnmanaged(Alias) = .{}, +objects: std.ArrayListUnmanaged(Object) = .{}, +functions: std.ArrayListUnmanaged(Function) = .{}, + +pub const String = enum(u32) { + none = std.math.maxInt(u31), + empty, + debugme, + _, + + pub fn toSlice(self: String, b: *const Builder) ?[:0]const u8 { + const index = self.toIndex() orelse return null; + const start = b.string_indices.items[index]; + const end = b.string_indices.items[index + 1]; + return b.string_bytes.items[start .. end - 1 :0]; + } + + const FormatData = struct { + string: String, + builder: *const Builder, + }; + fn format( + data: FormatData, + comptime fmt_str: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + assert(data.string != .none); + const slice = data.string.toSlice(data.builder) orelse + return writer.print("{d}", .{@intFromEnum(data.string)}); + const need_quotes = if (comptime std.mem.eql(u8, fmt_str, "")) + !isValidIdentifier(slice) + else if (comptime std.mem.eql(u8, fmt_str, "\"")) + true + else + @compileError("invalid format string: '" ++ fmt_str ++ "'"); + if (need_quotes) try writer.writeByte('\"'); + for (slice) |c| switch (c) { + '\\' => try writer.writeAll("\\\\"), + ' '...'"' - 1, '"' + 1...'\\' - 1, '\\' + 1...'~' => try writer.writeByte(c), + else => try writer.print("\\{X:0>2}", .{c}), + }; + if (need_quotes) try writer.writeByte('\"'); + } + pub fn fmt(self: String, builder: *const Builder) std.fmt.Formatter(format) { + return .{ .data = .{ .string = self, .builder = builder } }; + } + + fn fromIndex(index: ?usize) String { + return @enumFromInt(@as(u32, @intCast((index orelse return .none) + + @intFromEnum(String.empty)))); + } + fn toIndex(self: String) ?usize { + return std.math.sub(u32, @intFromEnum(self), @intFromEnum(String.empty)) catch null; + } + + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: Adapter, key: []const u8) u32 { + return @truncate(std.hash.Wyhash.hash(0, key)); + } + pub fn eql(ctx: Adapter, lhs: []const u8, _: void, rhs_index: usize) bool { + return std.mem.eql(u8, lhs, String.fromIndex(rhs_index).toSlice(ctx.builder).?); + } + }; +}; + +pub const Type = enum(u32) { + void, + half, + bfloat, + float, + double, + fp128, + x86_fp80, + ppc_fp128, + x86_amx, + x86_mmx, + label, + token, + metadata, + + i1, + i8, + i16, + i32, + i64, + i128, + ptr, + + none = std.math.maxInt(u32), + _, + + const Tag = enum(u4) { + simple, + function, + integer, + pointer, + target, + vector, + vscale_vector, + array, + structure, + packed_structure, + named_structure, + }; + + const Simple = enum { + void, + half, + bfloat, + float, + double, + fp128, + x86_fp80, + ppc_fp128, + x86_amx, + x86_mmx, + label, + token, + metadata, + }; + + const NamedStructure = struct { + id: String, + child: Type, + }; + + const Data = packed struct(u32) { + tag: Tag, + data: ExtraIndex, + }; + + const ExtraIndex = u28; + + const FormatData = struct { + type: Type, + builder: *const Builder, + }; + fn format( + data: FormatData, + comptime fmt_str: []const u8, + fmt_opts: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + assert(data.type != .none); + if (std.enums.tagName(Type, data.type)) |name| return writer.writeAll(name); + const type_data = data.builder.type_data.items[@intFromEnum(data.type)]; + switch (type_data.tag) { + .named_structure => { + const extra = data.builder.typeExtraData(NamedStructure, type_data.data); + if (comptime std.mem.eql(u8, fmt_str, "")) try writer.print("%{}", .{ + extra.id.fmt(data.builder), + }) else if (comptime std.mem.eql(u8, fmt_str, "+")) switch (extra.child) { + .none => try writer.writeAll("opaque"), + else => try format(.{ + .type = extra.child, + .builder = data.builder, + }, fmt_str, fmt_opts, writer), + } else @compileError("invalid format string: '" ++ fmt_str ++ "'"); + }, + else => try writer.print("", .{@intFromEnum(data.type)}), + } + } + pub fn fmt(self: Type, builder: *const Builder) std.fmt.Formatter(format) { + return .{ .data = .{ .type = self, .builder = builder } }; + } +}; + +pub const Linkage = enum { + default, + private, + internal, + available_externally, + linkonce, + weak, + common, + appending, + extern_weak, + linkonce_odr, + weak_odr, + external, + + pub fn format( + self: Linkage, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + if (self == .default) return; + try writer.writeAll(@tagName(self)); + try writer.writeByte(' '); + } +}; + +pub const Preemption = enum { + none, + dso_preemptable, + dso_local, + + pub fn format( + self: Preemption, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + if (self == .none) return; + try writer.writeAll(@tagName(self)); + try writer.writeByte(' '); + } +}; + +pub const Visibility = enum { + default, + hidden, + protected, + + pub fn format( + self: Visibility, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + if (self == .default) return; + try writer.writeAll(@tagName(self)); + try writer.writeByte(' '); + } +}; + +pub const DllStorageClass = enum { + default, + dllimport, + dllexport, + + pub fn format( + self: DllStorageClass, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + if (self == .default) return; + try writer.writeAll(@tagName(self)); + try writer.writeByte(' '); + } +}; + +pub const ThreadLocal = enum { + none, + generaldynamic, + localdynamic, + initialexec, + localexec, + + pub fn format( + self: ThreadLocal, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + if (self == .none) return; + try writer.writeAll("thread_local"); + if (self != .generaldynamic) { + try writer.writeByte('('); + try writer.writeAll(@tagName(self)); + try writer.writeByte(')'); + } + try writer.writeByte(' '); + } +}; + +pub const UnnamedAddr = enum { + none, + unnamed_addr, + local_unnamed_addr, + + pub fn format( + self: UnnamedAddr, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + if (self == .none) return; + try writer.writeAll(@tagName(self)); + try writer.writeByte(' '); + } +}; + +pub const AddrSpace = enum(u24) { + none, + _, + + pub fn format( + self: AddrSpace, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + if (self == .none) return; + try writer.print("addrspace({d}) ", .{@intFromEnum(self)}); + } +}; + +pub const ExternallyInitialized = enum { + none, + externally_initialized, + + pub fn format( + self: ExternallyInitialized, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + if (self == .none) return; + try writer.writeAll(@tagName(self)); + try writer.writeByte(' '); + } +}; + +pub const Alignment = enum(u6) { + default = std.math.maxInt(u6), + _, + + pub fn fromByteUnits(bytes: u64) Alignment { + if (bytes == 0) return .default; + assert(std.math.isPowerOfTwo(bytes)); + assert(bytes <= 1 << 32); + return @enumFromInt(@ctz(bytes)); + } + + pub fn toByteUnits(self: Alignment) ?u64 { + return if (self == .default) null else @as(u64, 1) << @intFromEnum(self); + } + + pub fn format( + self: Alignment, + comptime prefix: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + try writer.print("{s} align {d}", .{ prefix, self.toByteUnits() orelse return }); + } +}; + +pub const Global = struct { + linkage: Linkage = .default, + preemption: Preemption = .none, + visibility: Visibility = .default, + dll_storage_class: DllStorageClass = .default, + unnamed_addr: UnnamedAddr = .none, + addr_space: AddrSpace = .none, + externally_initialized: ExternallyInitialized = .none, + type: Type, + alignment: Alignment = .default, + kind: union(enum) { + alias: Alias.Index, + object: Object.Index, + function: Function.Index, + }, + + pub const Index = enum(u32) { + _, + + pub fn ptr(self: Index, builder: *Builder) *Global { + return &builder.globals.values()[@intFromEnum(self)]; + } + + pub fn ptrConst(self: Index, builder: *const Builder) *const Global { + return &builder.globals.values()[@intFromEnum(self)]; + } + + pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value { + return builder.llvm_globals.items[@intFromEnum(self)]; + } + + pub fn rename(self: Index, builder: *Builder, name: String) Allocator.Error!void { + try builder.ensureUnusedCapacityGlobal(name); + self.renameAssumeCapacity(builder, name); + } + + pub fn renameAssumeCapacity(self: Index, builder: *Builder, name: String) void { + const index = @intFromEnum(self); + if (builder.globals.keys()[index] == name) return; + if (builder.useLibLlvm()) builder.llvm_globals.appendAssumeCapacity(builder.llvm_globals.items[index]); + _ = builder.addGlobalAssumeCapacity(name, builder.globals.values()[index]); + if (builder.useLibLlvm()) _ = builder.llvm_globals.pop(); + builder.globals.swapRemoveAt(index); + self.updateName(builder); + } + + pub fn takeName(self: Index, builder: *Builder, other: Index) Allocator.Error!void { + try builder.ensureUnusedCapacityGlobal(.empty); + self.takeNameAssumeCapacity(builder, other); + } + + pub fn takeNameAssumeCapacity(self: Index, builder: *Builder, other: Index) void { + const other_name = builder.globals.keys()[@intFromEnum(other)]; + other.renameAssumeCapacity(builder, .none); + self.renameAssumeCapacity(builder, other_name); + } + + fn updateName(self: Index, builder: *const Builder) void { + if (!builder.useLibLlvm()) return; + const index = @intFromEnum(self); + const slice = builder.globals.keys()[index].toSlice(builder) orelse ""; + builder.llvm_globals.items[index].setValueName2(slice.ptr, slice.len); + } + }; + + fn deinit(self: *Global, _: Allocator) void { + self.* = undefined; + } +}; + +pub const Alias = struct { + global: Global.Index, + + pub const Index = enum(u32) { + _, + + pub fn ptr(self: Index, builder: *Builder) *Alias { + return &builder.aliases.items[@intFromEnum(self)]; + } + + pub fn ptrConst(self: Index, builder: *const Builder) *const Alias { + return &builder.aliases.items[@intFromEnum(self)]; + } + + pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value { + return self.ptrConst(builder).global.toLlvm(builder); + } + }; +}; + +pub const Object = struct { + global: Global.Index, + thread_local: ThreadLocal = .none, + mutability: enum { global, constant } = .global, + init: void = {}, + + pub const Index = enum(u32) { + _, + + pub fn ptr(self: Index, builder: *Builder) *Object { + return &builder.objects.items[@intFromEnum(self)]; + } + + pub fn ptrConst(self: Index, builder: *const Builder) *const Object { + return &builder.objects.items[@intFromEnum(self)]; + } + + pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value { + return self.ptrConst(builder).global.toLlvm(builder); + } + }; +}; + +pub const Function = struct { + global: Global.Index, + body: ?void = null, + + fn deinit(self: *Function, _: Allocator) void { + self.* = undefined; + } + + pub const Index = enum(u32) { + _, + + pub fn ptr(self: Index, builder: *Builder) *Function { + return &builder.functions.items[@intFromEnum(self)]; + } + + pub fn ptrConst(self: Index, builder: *const Builder) *const Function { + return &builder.functions.items[@intFromEnum(self)]; + } + + pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value { + return self.ptrConst(builder).global.toLlvm(builder); + } + }; +}; + +pub fn init(self: *Builder) Allocator.Error!void { + try self.string_indices.append(self.gpa, 0); + assert(try self.string("") == .empty); + assert(try self.string("debugme") == .debugme); + + { + const static_len = @typeInfo(Type).Enum.fields.len - 1; + try self.type_map.ensureTotalCapacity(self.gpa, static_len); + try self.type_data.ensureTotalCapacity(self.gpa, static_len); + if (self.useLibLlvm()) try self.llvm_types.ensureTotalCapacity(self.gpa, static_len); + inline for (@typeInfo(Type.Simple).Enum.fields) |simple_field| { + const result = self.typeNoExtraAssumeCapacity(.{ + .tag = .simple, + .data = simple_field.value, + }); + assert(result.new and result.type == @field(Type, simple_field.name)); + if (self.useLibLlvm()) self.llvm_types.appendAssumeCapacity( + @field(llvm.Context, simple_field.name ++ "Type")(self.llvm_context), + ); + } + inline for (.{ 1, 8, 16, 32, 64, 128 }) |bits| assert(self.intTypeAssumeCapacity(bits) == + @field(Type, std.fmt.comptimePrint("i{d}", .{bits}))); + inline for (.{0}) |addr_space| + assert(self.pointerTypeAssumeCapacity(@enumFromInt(addr_space)) == .ptr); + } +} + +pub fn deinit(self: *Builder) void { + self.llvm_types.deinit(self.gpa); + self.llvm_globals.deinit(self.gpa); + + self.string_map.deinit(self.gpa); + self.string_bytes.deinit(self.gpa); + self.string_indices.deinit(self.gpa); + + self.types.deinit(self.gpa); + self.type_map.deinit(self.gpa); + self.type_data.deinit(self.gpa); + self.type_extra.deinit(self.gpa); + + self.globals.deinit(self.gpa); + self.next_unique_global_id.deinit(self.gpa); + self.aliases.deinit(self.gpa); + self.objects.deinit(self.gpa); + self.functions.deinit(self.gpa); + + self.* = undefined; +} + +pub fn string(self: *Builder, bytes: []const u8) Allocator.Error!String { + try self.string_bytes.ensureUnusedCapacity(self.gpa, bytes.len + 1); + try self.string_indices.ensureUnusedCapacity(self.gpa, 1); + try self.string_map.ensureUnusedCapacity(self.gpa, 1); + + const gop = self.string_map.getOrPutAssumeCapacityAdapted(bytes, String.Adapter{ .builder = self }); + if (!gop.found_existing) { + self.string_bytes.appendSliceAssumeCapacity(bytes); + self.string_bytes.appendAssumeCapacity(0); + self.string_indices.appendAssumeCapacity(@intCast(self.string_bytes.items.len)); + } + return String.fromIndex(gop.index); +} + +pub fn stringIfExists(self: *const Builder, bytes: []const u8) ?String { + return String.fromIndex( + self.string_map.getIndexAdapted(bytes, String.Adapter{ .builder = self }) orelse return null, + ); +} + +pub fn fmt(self: *Builder, comptime fmt_str: []const u8, fmt_args: anytype) Allocator.Error!String { + try self.string_map.ensureUnusedCapacity(self.gpa, 1); + try self.string_bytes.ensureUnusedCapacity(self.gpa, std.fmt.count(fmt_str ++ .{0}, fmt_args)); + try self.string_indices.ensureUnusedCapacity(self.gpa, 1); + return self.fmtAssumeCapacity(fmt_str, fmt_args); +} + +pub fn fmtAssumeCapacity(self: *Builder, comptime fmt_str: []const u8, fmt_args: anytype) String { + const start = self.string_bytes.items.len; + self.string_bytes.writer(self.gpa).print(fmt_str ++ .{0}, fmt_args) catch unreachable; + const bytes: []const u8 = self.string_bytes.items[start .. self.string_bytes.items.len - 1]; + + const gop = self.string_map.getOrPutAssumeCapacityAdapted(bytes, String.Adapter{ .builder = self }); + if (gop.found_existing) { + self.string_bytes.shrinkRetainingCapacity(start); + } else { + self.string_indices.appendAssumeCapacity(@intCast(self.string_bytes.items.len)); + } + return String.fromIndex(gop.index); +} + +pub fn opaqueType(self: *Builder, name: String) Allocator.Error!Type { + try self.types.ensureUnusedCapacity(self.gpa, 1); + try self.ensureUnusedCapacityTypes(1, Type.NamedStructure); + return self.opaqueTypeAssumeCapacity(name); +} + +pub fn intType(self: *Builder, bits: u24) Allocator.Error!Type { + try self.ensureUnusedCapacityTypes(1); + return self.intTypeAssumeCapacity(bits); +} + +pub fn pointerType(self: *Builder, addr_space: AddrSpace) Allocator.Error!Type { + try self.ensureUnusedCapacityTypes(1, null); + return self.pointerTypeAssumeCapacity(addr_space); +} + +pub fn addGlobal(self: *Builder, name: String, global: Global) Allocator.Error!Global.Index { + try self.ensureUnusedCapacityGlobal(name); + return self.addGlobalAssumeCapacity(name, global); +} + +pub fn addGlobalAssumeCapacity(self: *Builder, name: String, global: Global) Global.Index { + var id = name; + if (id == .none) { + id = self.next_unnamed_global; + self.next_unnamed_global = @enumFromInt(@intFromEnum(self.next_unnamed_global) + 1); + } + while (true) { + const global_gop = self.globals.getOrPutAssumeCapacity(id); + if (!global_gop.found_existing) { + global_gop.value_ptr.* = global; + const index: Global.Index = @enumFromInt(global_gop.index); + index.updateName(self); + return index; + } + + const unique_gop = self.next_unique_global_id.getOrPutAssumeCapacity(name); + if (!unique_gop.found_existing) unique_gop.value_ptr.* = 2; + id = self.fmtAssumeCapacity("{s}.{d}", .{ name.toSlice(self).?, unique_gop.value_ptr.* }); + unique_gop.value_ptr.* += 1; + } +} + +pub fn getGlobal(self: *const Builder, name: String) ?Global.Index { + return @enumFromInt(self.globals.getIndex(name) orelse return null); +} + +fn ensureUnusedCapacityGlobal(self: *Builder, name: String) Allocator.Error!void { + if (self.useLibLlvm()) try self.llvm_globals.ensureUnusedCapacity(self.gpa, 1); + try self.string_map.ensureUnusedCapacity(self.gpa, 1); + try self.string_bytes.ensureUnusedCapacity(self.gpa, name.toSlice(self).?.len + + comptime std.fmt.count("{d}" ++ .{0}, .{std.math.maxInt(u32)})); + try self.string_indices.ensureUnusedCapacity(self.gpa, 1); + try self.globals.ensureUnusedCapacity(self.gpa, 1); + try self.next_unique_global_id.ensureUnusedCapacity(self.gpa, 1); +} + +fn addTypeExtraAssumeCapacity(self: *Builder, extra: anytype) Type.ExtraIndex { + const result: Type.ExtraIndex = @intCast(self.type_extra.items.len); + inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { + const value = @field(extra, field.name); + self.type_extra.appendAssumeCapacity(switch (field.type) { + String, Type => @intFromEnum(value), + else => @compileError("bad field type: " ++ @typeName(field.type)), + }); + } + return result; +} + +fn typeExtraDataTrail( + self: *const Builder, + comptime T: type, + index: Type.ExtraIndex, +) struct { data: T, end: Type.ExtraIndex } { + var result: T = undefined; + const fields = @typeInfo(T).Struct.fields; + inline for (fields, self.type_extra.items[index..][0..fields.len]) |field, data| + @field(result, field.name) = switch (field.type) { + String, Type => @enumFromInt(data), + else => @compileError("bad field type: " ++ @typeName(field.type)), + }; + return .{ .data = result, .end = index + @as(Type.ExtraIndex, @intCast(fields.len)) }; +} + +fn typeExtraData(self: *const Builder, comptime T: type, index: Type.ExtraIndex) T { + return self.typeExtraDataTrail(T, index).data; +} + +fn opaqueTypeAssumeCapacity(self: *Builder, name: String) Type { + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: String) u32 { + return std.hash.uint32(@intFromEnum(key)); + } + pub fn eql(ctx: @This(), lhs: String, _: void, rhs_index: usize) bool { + const rhs_data = ctx.builder.type_data.items[rhs_index]; + return rhs_data.tag == .named_structure and + lhs == ctx.builder.typeExtraData(Type.NamedStructure, rhs_data.data).id; + } + }; + const id = if (name == .none) name: { + const next_name = self.next_unnamed_type; + assert(next_name != .none); + self.next_unnamed_type = @enumFromInt(@intFromEnum(next_name) + 1); + break :name next_name; + } else name: { + assert(name.toIndex() != null); + break :name name; + }; + const gop = self.type_map.getOrPutAssumeCapacityAdapted(id, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.type_data.appendAssumeCapacity(.{ + .tag = .named_structure, + .data = self.addTypeExtraAssumeCapacity(Type.NamedStructure{ .id = id, .child = .none }), + }); + } + const result: Type = @enumFromInt(gop.index); + self.types.putAssumeCapacityNoClobber(id, result); + return result; +} + +fn intTypeAssumeCapacity(self: *Builder, bits: u24) Type { + const result = self.typeNoExtraAssumeCapacity(.{ .tag = .integer, .data = bits }); + if (self.useLibLlvm() and result.new) + self.llvm_types.appendAssumeCapacity(self.llvm_context.intType(bits)); + return result.type; +} + +fn pointerTypeAssumeCapacity(self: *Builder, addr_space: AddrSpace) Type { + const result = self.typeNoExtraAssumeCapacity(.{ .tag = .pointer, .data = @intFromEnum(addr_space) }); + if (self.useLibLlvm() and result.new) + self.llvm_types.appendAssumeCapacity(self.llvm_context.pointerType(@intFromEnum(addr_space))); + return result.type; +} + +fn ensureUnusedCapacityTypes(self: *Builder, count: usize, comptime Extra: ?type) Allocator.Error!void { + try self.type_map.ensureUnusedCapacity(self.gpa, count); + try self.type_data.ensureUnusedCapacity(self.gpa, count); + if (Extra) |E| + try self.type_extra.ensureUnusedCapacity(self.gpa, count * @typeInfo(E).Struct.fields.len); + if (self.useLibLlvm()) try self.llvm_types.ensureUnusedCapacity(self.gpa, count); +} + +fn typeNoExtraAssumeCapacity(self: *Builder, data: Type.Data) struct { new: bool, type: Type } { + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: Type.Data) u32 { + return std.hash.uint32(@bitCast(key)); + } + pub fn eql(ctx: @This(), lhs: Type.Data, _: void, rhs_index: usize) bool { + const lhs_bits: u32 = @bitCast(lhs); + const rhs_bits: u32 = @bitCast(ctx.builder.type_data.items[rhs_index]); + return lhs_bits == rhs_bits; + } + }; + const gop = self.type_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.type_data.appendAssumeCapacity(data); + } + return .{ .new = !gop.found_existing, .type = @enumFromInt(gop.index) }; +} + +fn isValidIdentifier(id: []const u8) bool { + for (id, 0..) |c, i| switch (c) { + '$', '-', '.', 'A'...'Z', '_', 'a'...'z' => {}, + '0'...'9' => if (i == 0) return false, + else => return false, + }; + return true; +} + +pub fn dump(self: *Builder, writer: anytype) @TypeOf(writer).Error!void { + if (self.source_filename != .none) try writer.print( + \\; ModuleID = '{s}' + \\source_filename = {"} + \\ + , .{ self.source_filename.toSlice(self).?, self.source_filename.fmt(self) }); + if (self.data_layout != .none) try writer.print( + \\target datalayout = {"} + \\ + , .{self.data_layout.fmt(self)}); + if (self.target_triple != .none) try writer.print( + \\target triple = {"} + \\ + , .{self.target_triple.fmt(self)}); + try writer.writeByte('\n'); + for (self.types.keys(), self.types.values()) |id, ty| try writer.print( + \\%{} = type {+} + \\ + , .{ id.fmt(self), ty.fmt(self) }); + try writer.writeByte('\n'); + for (self.objects.items) |object| { + const global = self.globals.entries.get(@intFromEnum(object.global)); + try writer.print( + \\@{} = {}{}{}{}{}{}{}{}{s} {}{,} + \\ + , .{ + global.key.fmt(self), + global.value.linkage, + global.value.preemption, + global.value.visibility, + global.value.dll_storage_class, + object.thread_local, + global.value.unnamed_addr, + global.value.addr_space, + global.value.externally_initialized, + @tagName(object.mutability), + global.value.type.fmt(self), + global.value.alignment, + }); + } + try writer.writeByte('\n'); + for (self.functions.items) |function| { + const global = self.globals.entries.get(@intFromEnum(function.global)); + try writer.print( + \\{s} {}{}{}{}void @{}() {}{}{{ + \\ ret void + \\}} + \\ + , .{ + if (function.body) |_| "define" else "declare", + global.value.linkage, + global.value.preemption, + global.value.visibility, + global.value.dll_storage_class, + global.key.fmt(self), + global.value.unnamed_addr, + global.value.alignment, + }); + } + try writer.writeByte('\n'); +} + +inline fn useLibLlvm(self: *const Builder) bool { + return build_options.have_llvm and self.use_lib_llvm; +} + +const assert = std.debug.assert; +const build_options = @import("build_options"); +const llvm = @import("bindings.zig"); +const std = @import("std"); + +const Allocator = std.mem.Allocator; +const Builder = @This(); diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index b093588e80..687437cb56 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -40,21 +40,42 @@ pub const Context = opaque { pub const halfType = LLVMHalfTypeInContext; extern fn LLVMHalfTypeInContext(C: *Context) *Type; + pub const bfloatType = LLVMBFloatTypeInContext; + extern fn LLVMBFloatTypeInContext(C: *Context) *Type; + pub const floatType = LLVMFloatTypeInContext; extern fn LLVMFloatTypeInContext(C: *Context) *Type; pub const doubleType = LLVMDoubleTypeInContext; extern fn LLVMDoubleTypeInContext(C: *Context) *Type; - pub const x86FP80Type = LLVMX86FP80TypeInContext; - extern fn LLVMX86FP80TypeInContext(C: *Context) *Type; - pub const fp128Type = LLVMFP128TypeInContext; extern fn LLVMFP128TypeInContext(C: *Context) *Type; + pub const x86_fp80Type = LLVMX86FP80TypeInContext; + extern fn LLVMX86FP80TypeInContext(C: *Context) *Type; + + pub const ppc_fp128Type = LLVMPPCFP128TypeInContext; + extern fn LLVMPPCFP128TypeInContext(C: *Context) *Type; + + pub const x86_amxType = LLVMX86AMXTypeInContext; + extern fn LLVMX86AMXTypeInContext(C: *Context) *Type; + + pub const x86_mmxType = LLVMX86MMXTypeInContext; + extern fn LLVMX86MMXTypeInContext(C: *Context) *Type; + pub const voidType = LLVMVoidTypeInContext; extern fn LLVMVoidTypeInContext(C: *Context) *Type; + pub const labelType = LLVMLabelTypeInContext; + extern fn LLVMLabelTypeInContext(C: *Context) *Type; + + pub const tokenType = LLVMTokenTypeInContext; + extern fn LLVMTokenTypeInContext(C: *Context) *Type; + + pub const metadataType = LLVMMetadataTypeInContext; + extern fn LLVMMetadataTypeInContext(C: *Context) *Type; + pub const structType = LLVMStructTypeInContext; extern fn LLVMStructTypeInContext( C: *Context, @@ -1071,6 +1092,9 @@ pub const TargetData = opaque { pub const abiSizeOfType = LLVMABISizeOfType; extern fn LLVMABISizeOfType(TD: *TargetData, Ty: *Type) c_ulonglong; + + pub const stringRep = LLVMCopyStringRepOfTargetData; + extern fn LLVMCopyStringRepOfTargetData(TD: *TargetData) [*:0]const u8; }; pub const CodeModel = enum(c_int) { diff --git a/src/link.zig b/src/link.zig index eb6c085663..262718e5af 100644 --- a/src/link.zig +++ b/src/link.zig @@ -110,6 +110,7 @@ pub const Options = struct { /// other objects. /// Otherwise (depending on `use_lld`) this link code directly outputs and updates the final binary. use_llvm: bool, + use_lib_llvm: bool, link_libc: bool, link_libcpp: bool, link_libunwind: bool, diff --git a/src/main.zig b/src/main.zig index c92c69e173..59655eadb6 100644 --- a/src/main.zig +++ b/src/main.zig @@ -439,6 +439,8 @@ const usage_build_generic = \\ -fno-unwind-tables Never produce unwind table entries \\ -fLLVM Force using LLVM as the codegen backend \\ -fno-LLVM Prevent using LLVM as the codegen backend + \\ -flibLLVM Force using LLVM shared library apias the codegen backend + \\ -fno-libLLVM Prevent using LLVM as the codegen backend \\ -fClang Force using Clang as the C/C++ compilation backend \\ -fno-Clang Prevent using Clang as the C/C++ compilation backend \\ -freference-trace[=num] How many lines of reference trace should be shown per compile error @@ -821,6 +823,7 @@ fn buildOutputType( var stack_size_override: ?u64 = null; var image_base_override: ?u64 = null; var use_llvm: ?bool = null; + var use_lib_llvm: ?bool = null; var use_lld: ?bool = null; var use_clang: ?bool = null; var link_eh_frame_hdr = false; @@ -1261,6 +1264,10 @@ fn buildOutputType( use_llvm = true; } else if (mem.eql(u8, arg, "-fno-LLVM")) { use_llvm = false; + } else if (mem.eql(u8, arg, "-flibLLVM")) { + use_lib_llvm = true; + } else if (mem.eql(u8, arg, "-fno-libLLVM")) { + use_lib_llvm = false; } else if (mem.eql(u8, arg, "-fLLD")) { use_lld = true; } else if (mem.eql(u8, arg, "-fno-LLD")) { @@ -3119,6 +3126,7 @@ fn buildOutputType( .want_tsan = want_tsan, .want_compiler_rt = want_compiler_rt, .use_llvm = use_llvm, + .use_lib_llvm = use_lib_llvm, .use_lld = use_lld, .use_clang = use_clang, .hash_style = hash_style, -- cgit v1.2.3 From 3314fd83af5a6a269926a6f8ad3fcb62b04242c9 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 7 Jul 2023 09:22:27 -0400 Subject: llvm: compute data layout without help like a grownup compiler --- lib/std/target.zig | 18 +- lib/test_runner.zig | 2 +- src/codegen/llvm.zig | 485 ++++++++++++++++++++++++++++++++++++------ src/codegen/llvm/Builder.zig | 106 +++++---- src/codegen/llvm/bindings.zig | 7 + 5 files changed, 511 insertions(+), 107 deletions(-) (limited to 'src') diff --git a/lib/std/target.zig b/lib/std/target.zig index 2a96e84001..d40ef11bb7 100644 --- a/lib/std/target.zig +++ b/lib/std/target.zig @@ -1357,8 +1357,6 @@ pub const Target = struct { } }; - pub const stack_align = 16; - pub fn zigTriple(self: Target, allocator: mem.Allocator) ![]u8 { return std.zig.CrossTarget.fromTarget(self).zigTriple(allocator); } @@ -1833,7 +1831,7 @@ pub const Target = struct { }; } - pub fn ptrBitWidth(target: std.Target) u16 { + pub fn ptrBitWidth(target: Target) u16 { switch (target.abi) { .gnux32, .muslx32, .gnuabin32, .gnuilp32 => return 32, .gnuabi64 => return 64, @@ -1910,6 +1908,18 @@ pub const Target = struct { } } + pub fn stackAlignment(target: Target) u16 { + return switch (target.cpu.arch) { + .x86 => switch (target.os.tag) { + .windows => 4, + else => 16, + }, + .arm, .armeb, .mips, .mipsel => 8, + .aarch64, .aarch64_be, .powerpc64, .powerpc64le, .riscv64, .x86_64, .wasm32, .wasm64 => 16, + else => @divExact(target.ptrBitWidth(), 8), + }; + } + /// Default signedness of `char` for the native C compiler for this target /// Note that char signedness is implementation-defined and many compilers provide /// an option to override the default signedness e.g. GCC's -funsigned-char / -fsigned-char @@ -2428,7 +2438,7 @@ pub const Target = struct { else => {}, }, .avr => switch (c_type) { - .int, .uint, .long, .ulong, .float, .longdouble => return 1, + .char, .int, .uint, .long, .ulong, .float, .longdouble => return 1, .short, .ushort => return 2, .double => return 4, .longlong, .ulonglong => return 8, diff --git a/lib/test_runner.zig b/lib/test_runner.zig index 842babcdeb..42fdf3bec7 100644 --- a/lib/test_runner.zig +++ b/lib/test_runner.zig @@ -136,7 +136,7 @@ fn mainTerminal() void { const have_tty = progress.terminal != null and (progress.supports_ansi_escape_codes or progress.is_windows_terminal); - var async_frame_buffer: []align(std.Target.stack_align) u8 = undefined; + var async_frame_buffer: []align(builtin.target.stackAlignment()) u8 = undefined; // TODO this is on the next line (using `undefined` above) because otherwise zig incorrectly // ignores the alignment of the slice. async_frame_buffer = &[_]u8{}; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 0ac4ae7b33..4e5570bd5f 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -328,6 +328,212 @@ pub fn supportsTailCall(target: std.Target) bool { } } +const DataLayoutBuilder = struct { + target: std.Target, + + pub fn format( + self: DataLayoutBuilder, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + const is_aarch64_windows = self.target.cpu.arch == .aarch64 and self.target.os.tag == .windows; + try writer.print("{c}-m:{c}", .{ + @as(u8, switch (self.target.cpu.arch.endian()) { + .Little => 'e', + .Big => 'E', + }), + @as(u8, if (self.target.cpu.arch.isMIPS()) + 'm' // Mips mangling: Private symbols get a $ prefix. + else switch (self.target.ofmt) { + .elf => 'e', // ELF mangling: Private symbols get a `.L` prefix. + //.goff => 'l', // GOFF mangling: Private symbols get a `@` prefix. + .macho => 'o', // Mach-O mangling: Private symbols get `L` prefix. + // Other symbols get a `_` prefix. + .coff => switch (self.target.os.tag) { + .windows => switch (self.target.cpu.arch) { + .x86 => 'x', // Windows x86 COFF mangling: Private symbols get the usual prefix. + // Regular C symbols get a `_` prefix. Functions with `__stdcall`, `__fastcall`, + // and `__vectorcall` have custom mangling that appends `@N` where N is the + // number of bytes used to pass parameters. C++ symbols starting with `?` are + // not mangled in any way. + else => 'w', // Windows COFF mangling: Similar to x, except that normal C + // symbols do not receive a `_` prefix. + }, + else => 'e', + }, + //.xcoff => 'a', // XCOFF mangling: Private symbols get a `L..` prefix. + else => 'e', + }), + }); + var any_non_integral = false; + const ptr_bit_width = self.target.ptrBitWidth(); + var default_info = struct { size: u16, abi: u16, pref: u16, idx: u16 }{ + .size = 64, + .abi = 64, + .pref = 64, + .idx = 64, + }; + const address_space_info = llvmAddressSpaceInfo(self.target); + assert(address_space_info[0].llvm == llvm.address_space.default); + for (address_space_info) |info| { + const is_default = info.llvm == llvm.address_space.default; + if (info.non_integral) { + assert(!is_default); + any_non_integral = true; + } + const size = info.size orelse ptr_bit_width; + const abi = info.abi orelse ptr_bit_width; + const pref = info.pref orelse abi; + const idx = info.idx orelse size; + const matches_default = + size == default_info.size and + abi == default_info.abi and + pref == default_info.pref and + idx == default_info.idx; + if (is_default) default_info = .{ + .size = size, + .abi = abi, + .pref = pref, + .idx = idx, + }; + if (!info.force_in_data_layout and matches_default and + self.target.cpu.arch != .riscv64 and !is_aarch64_windows) continue; + try writer.writeAll("-p"); + if (!is_default) try writer.print("{d}", .{info.llvm}); + try writer.print(":{d}:{d}", .{ size, abi }); + if (pref != abi or idx != size) { + try writer.print(":{d}", .{pref}); + if (idx != size) try writer.print(":{d}", .{idx}); + } + } + if (self.target.cpu.arch.isARM() or self.target.cpu.arch.isThumb()) + try writer.writeAll("-Fi8"); // for thumb interwork + try self.typeAlignment(.integer, 8, 8, 8, false, writer); + try self.typeAlignment(.integer, 16, 16, 16, false, writer); + try self.typeAlignment(.integer, 32, if (is_aarch64_windows) 0 else 32, 32, false, writer); + try self.typeAlignment(.integer, 64, 32, 64, false, writer); + try self.typeAlignment(.integer, 128, 32, 64, false, writer); + if (backendSupportsF16(self.target)) try self.typeAlignment(.float, 16, 16, 16, false, writer); + try self.typeAlignment(.float, 32, 32, 32, false, writer); + try self.typeAlignment(.float, 64, 64, 64, false, writer); + if (backendSupportsF80(self.target)) try self.typeAlignment(.float, 80, 0, 0, false, writer); + try self.typeAlignment(.float, 128, 128, 128, false, writer); + try self.typeAlignment(.vector, 64, 64, 64, false, writer); + try self.typeAlignment(.vector, 128, 128, 128, false, writer); + if (self.target.os.tag != .windows) try self.typeAlignment(.aggregate, 0, 0, 64, false, writer); + for (@as([]const u24, switch (self.target.cpu.arch) { + .aarch64_32, + .arm, + .armeb, + .mips, + .mipsel, + .powerpc, + .powerpcle, + .thumb, + .thumbeb, + .riscv32, + => &.{32}, + .aarch64, + .aarch64_be, + .mips64, + .mips64el, + .powerpc64, + .powerpc64le, + .riscv64, + .wasm32, + .wasm64, + => &.{ 32, 64 }, + .x86 => &.{ 8, 16, 32 }, + .x86_64 => &.{ 8, 16, 32, 64 }, + else => &.{}, + }), 0..) |natural, index| switch (index) { + 0 => try writer.print("-n{d}", .{natural}), + else => try writer.print(":{d}", .{natural}), + }; + if (self.target.os.tag == .windows) try self.typeAlignment(.aggregate, 0, 0, 64, false, writer); + const stack_abi = self.target.stackAlignment() * 8; + if (self.target.os.tag == .windows or stack_abi != ptr_bit_width) + try writer.print("-S{d}", .{stack_abi}); + try self.typeAlignment(.vector, 256, 128, 128, true, writer); + try self.typeAlignment(.vector, 512, 128, 128, true, writer); + if (any_non_integral) { + try writer.writeAll("-ni"); + for (address_space_info) |info| if (info.non_integral) + try writer.print(":{d}", .{info.llvm}); + } + } + + fn typeAlignment( + self: DataLayoutBuilder, + kind: enum { integer, vector, float, aggregate }, + size: u24, + default_abi: u24, + default_pref: u24, + force_pref: bool, + writer: anytype, + ) @TypeOf(writer).Error!void { + var abi = default_abi; + var pref = default_pref; + if (kind == .float and size == 80) { + abi = 128; + pref = 128; + } + for (@as([]const std.Target.CType, switch (kind) { + .integer => &.{ .char, .short, .int, .long, .longlong }, + .float => &.{ .float, .double, .longdouble }, + .vector, .aggregate => &.{}, + })) |cty| { + if (self.target.c_type_bit_size(cty) != size) continue; + abi = self.target.c_type_alignment(cty) * 8; + pref = self.target.c_type_preferred_alignment(cty) * 8; + break; + } + switch (kind) { + .integer => { + abi = @min(abi, self.target.maxIntAlignment() * 8); + switch (self.target.os.tag) { + .linux => switch (self.target.cpu.arch) { + .aarch64, .aarch64_be, .mips, .mipsel => pref = @max(pref, 32), + else => {}, + }, + else => {}, + } + switch (self.target.cpu.arch) { + .aarch64, .aarch64_be, .riscv64 => switch (size) { + 128 => { + abi = size; + pref = size; + }, + else => {}, + }, + else => {}, + } + }, + .vector => if (self.target.cpu.arch.isARM() or self.target.cpu.arch.isThumb()) { + switch (size) { + 128 => abi = 64, + else => {}, + } + } else if (self.target.cpu.arch.isPPC64()) { + abi = size; + pref = size; + }, + .float => {}, + .aggregate => if (self.target.os.tag == .windows or + self.target.cpu.arch.isARM() or self.target.cpu.arch.isThumb()) + { + pref = @min(pref, self.target.ptrBitWidth()); + }, + } + if (abi == default_abi and pref == default_pref) return; + try writer.print("-{c}", .{@tagName(kind)[0]}); + if (size != 0) try writer.print("{d}", .{size}); + try writer.print(":{d}", .{abi}); + if (pref != abi or force_pref) try writer.print(":{d}", .{pref}); + } +}; + /// TODO can this be done with simpler logic / different API binding? fn deleteLlvmGlobal(llvm_global: *llvm.Value) void { if (llvm_global.globalGetValueType().getTypeKind() == .Function) { @@ -530,12 +736,17 @@ pub const Object = struct { try builder.init(); errdefer builder.deinit(); builder.source_filename = try builder.string(options.root_name); - builder.data_layout = rep: { + builder.data_layout = try builder.fmt("{}", .{DataLayoutBuilder{ .target = options.target }}); + builder.target_triple = try builder.string(llvm_target_triple); + + if (std.debug.runtime_safety) { const rep = target_data.stringRep(); defer llvm.disposeMessage(rep); - break :rep try builder.string(std.mem.span(rep)); - }; - builder.target_triple = try builder.string(llvm_target_triple); + std.testing.expectEqualStrings( + std.mem.span(rep), + builder.data_layout.toSlice(&builder).?, + ) catch unreachable; + } return Object{ .gpa = gpa, @@ -768,6 +979,10 @@ pub const Object = struct { if (comp.verbose_llvm_ir) |path| { if (std.mem.eql(u8, path, "-")) { self.llvm_module.dump(); + + const writer = std.io.getStdErr().writer(); + try writer.writeAll("\n" ++ "-" ** 200 ++ "\n\n"); + try self.builder.dump(writer); } else { const path_z = try comp.gpa.dupeZ(u8, path); defer comp.gpa.free(path_z); @@ -836,12 +1051,6 @@ pub const Object = struct { emit_asm_msg, emit_bin_msg, emit_llvm_ir_msg, emit_llvm_bc_msg, }); - { - const writer = std.io.getStdErr().writer(); - try writer.writeAll("\n" ++ "-" ** 200 ++ "\n\n"); - try self.builder.dump(writer); - } - // Unfortunately, LLVM shits the bed when we ask for both binary and assembly. // So we call the entire pipeline multiple times if this is requested. var error_message: [*:0]const u8 = undefined; @@ -1311,7 +1520,7 @@ pub const Object = struct { try global_index.rename(&self.builder, decl_name); const decl_name_slice = decl_name.toSlice(&self.builder).?; const global = global_index.ptr(&self.builder); - global.unnamed_addr = .none; + global.unnamed_addr = .default; llvm_global.setUnnamedAddr(.False); global.linkage = .external; llvm_global.setLinkage(.External); @@ -2674,11 +2883,15 @@ pub const Object = struct { const target = mod.getTarget(); - const llvm_type = try o.lowerLlvmType(decl.ty); + const ty = try o.lowerType(decl.ty); + const llvm_type = if (ty != .none) + o.builder.llvm_types.items[@intFromEnum(ty)] + else + try o.lowerLlvmType(decl.ty); const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); var global = Builder.Global{ - .type = .void, + .type = if (ty != .none) ty else .void, .kind = .{ .object = @enumFromInt(o.builder.objects.items.len) }, }; var object = Builder.Object{ @@ -2698,7 +2911,7 @@ pub const Object = struct { // This is needed for declarations created by `@extern`. if (is_extern) { - global.unnamed_addr = .none; + global.unnamed_addr = .default; llvm_global.setUnnamedAddr(.False); global.linkage = .external; llvm_global.setLinkage(.External); @@ -2708,7 +2921,7 @@ pub const Object = struct { object.thread_local = .generaldynamic; llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { - object.thread_local = .none; + object.thread_local = .default; llvm_global.setThreadLocalMode(.NotThreadLocal); } if (variable.is_weak_linkage) { @@ -2962,9 +3175,9 @@ pub const Object = struct { const name = try o.builder.string(mod.intern_pool.stringToSlice( try struct_obj.getFullyQualifiedName(mod), )); - _ = try o.builder.opaqueType(name); + const ty = try o.builder.opaqueType(name); - const llvm_struct_ty = o.context.structCreateNamed(name.toSlice(&o.builder).?); + const llvm_struct_ty = o.builder.llvm_types.items[@intFromEnum(ty)]; gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls assert(struct_obj.haveFieldTypes()); @@ -3101,16 +3314,6 @@ pub const Object = struct { } } - fn lowerType(o: *Object, t: Type) Allocator.Error!Builder.Type { - const mod = o.module; - switch (t.toIntern()) { - .void_type, .noreturn_type => return .void, - else => switch (mod.intern_pool.indexToKey(t.toIntern())) { - else => return .none, - }, - } - } - fn lowerLlvmTypeFn(o: *Object, fn_ty: Type) Allocator.Error!*llvm.Type { const mod = o.module; const ip = &mod.intern_pool; @@ -3206,6 +3409,149 @@ pub const Object = struct { return llvm_elem_ty; } + fn lowerType(o: *Object, t: Type) Allocator.Error!Builder.Type { + const mod = o.module; + const target = mod.getTarget(); + return switch (t.toIntern()) { + .u0_type, .i0_type => unreachable, + inline .u1_type, + .u8_type, + .i8_type, + .u16_type, + .i16_type, + .u29_type, + .u32_type, + .i32_type, + .u64_type, + .i64_type, + .u80_type, + .u128_type, + .i128_type, + => |tag| @field(Builder.Type, "i" ++ @tagName(tag)[1 .. @tagName(tag).len - "_type".len]), + .usize_type, .isize_type => try o.builder.intType(target.ptrBitWidth()), + inline .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + => |tag| try o.builder.intType(target.c_type_bit_size( + @field(std.Target.CType, @tagName(tag)["c_".len .. @tagName(tag).len - "_type".len]), + )), + .c_longdouble_type, + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + => switch (t.floatBits(target)) { + 16 => if (backendSupportsF16(target)) .half else .i16, + 32 => .float, + 64 => .double, + 80 => if (backendSupportsF80(target)) .x86_fp80 else .i80, + 128 => .fp128, + else => unreachable, + }, + .anyopaque_type => unreachable, + .bool_type => .i1, + .void_type => .void, + .type_type => unreachable, + .anyerror_type => .i16, + .comptime_int_type, .comptime_float_type, .noreturn_type => unreachable, + .anyframe_type => @panic("TODO implement lowerType for AnyFrame types"), + .null_type, + .undefined_type, + .enum_literal_type, + .atomic_order_type, + .atomic_rmw_op_type, + .calling_convention_type, + .address_space_type, + .float_mode_type, + .reduce_op_type, + .call_modifier_type, + .prefetch_options_type, + .export_options_type, + .extern_options_type, + .type_info_type, + => unreachable, + .manyptr_u8_type, + .manyptr_const_u8_type, + .manyptr_const_u8_sentinel_0_type, + .single_const_pointer_to_comptime_int_type, + => .ptr, + .slice_const_u8_type, .slice_const_u8_sentinel_0_type => .none, + .optional_noreturn_type => unreachable, + .anyerror_void_error_union_type => .i16, + .generic_poison_type, .empty_struct_type => unreachable, + // values, not types + .undef, + .zero, + .zero_usize, + .zero_u8, + .one, + .one_usize, + .one_u8, + .four_u8, + .negative_one, + .calling_convention_c, + .calling_convention_inline, + .void_value, + .unreachable_value, + .null_value, + .bool_true, + .bool_false, + .empty_struct, + .generic_poison, + .var_args_param_type, + .none, + => unreachable, + else => switch (mod.intern_pool.indexToKey(t.toIntern())) { + .int_type => |int_type| try o.builder.intType(int_type.bits), + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .One, .Many, .C => try o.builder.pointerType(@enumFromInt( + toLlvmAddressSpace(ptr_type.flags.address_space, target), + )), + .Slice => .none, + }, + .array_type, .vector_type, .opt_type => .none, + .anyframe_type => @panic("TODO implement lowerType for AnyFrame types"), + .error_union_type => .none, + .simple_type => unreachable, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + => .none, + .enum_type => |enum_type| try o.lowerType(enum_type.tag_ty.toType()), + .func_type, .error_set_type, .inferred_error_set_type => .none, + // values, not types + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + }, + }; + } + fn lowerValue(o: *Object, arg_tv: TypedValue) Error!*llvm.Value { const mod = o.module; const gpa = o.gpa; @@ -10606,44 +10952,63 @@ fn toLlvmCallConv(cc: std.builtin.CallingConvention, target: std.Target) llvm.Ca /// Convert a zig-address space to an llvm address space. fn toLlvmAddressSpace(address_space: std.builtin.AddressSpace, target: std.Target) c_uint { + for (llvmAddressSpaceInfo(target)) |info| if (info.zig == address_space) return info.llvm; + unreachable; +} + +const AddressSpaceInfo = struct { + zig: ?std.builtin.AddressSpace, + llvm: c_uint, + non_integral: bool = false, + size: ?u16 = null, + abi: ?u16 = null, + pref: ?u16 = null, + idx: ?u16 = null, + force_in_data_layout: bool = false, +}; +fn llvmAddressSpaceInfo(target: std.Target) []const AddressSpaceInfo { return switch (target.cpu.arch) { - .x86, .x86_64 => switch (address_space) { - .generic => llvm.address_space.default, - .gs => llvm.address_space.x86.gs, - .fs => llvm.address_space.x86.fs, - .ss => llvm.address_space.x86.ss, - else => unreachable, + .x86, .x86_64 => &.{ + .{ .zig = .generic, .llvm = llvm.address_space.default }, + .{ .zig = .gs, .llvm = llvm.address_space.x86.gs }, + .{ .zig = .fs, .llvm = llvm.address_space.x86.fs }, + .{ .zig = .ss, .llvm = llvm.address_space.x86.ss }, + .{ .zig = null, .llvm = llvm.address_space.x86.ptr32_sptr, .size = 32, .abi = 32, .force_in_data_layout = true }, + .{ .zig = null, .llvm = llvm.address_space.x86.ptr32_uptr, .size = 32, .abi = 32, .force_in_data_layout = true }, + .{ .zig = null, .llvm = llvm.address_space.x86.ptr64, .size = 64, .abi = 64, .force_in_data_layout = true }, }, - .nvptx, .nvptx64 => switch (address_space) { - .generic => llvm.address_space.default, - .global => llvm.address_space.nvptx.global, - .constant => llvm.address_space.nvptx.constant, - .param => llvm.address_space.nvptx.param, - .shared => llvm.address_space.nvptx.shared, - .local => llvm.address_space.nvptx.local, - else => unreachable, + .nvptx, .nvptx64 => &.{ + .{ .zig = .generic, .llvm = llvm.address_space.default }, + .{ .zig = .global, .llvm = llvm.address_space.nvptx.global }, + .{ .zig = .constant, .llvm = llvm.address_space.nvptx.constant }, + .{ .zig = .param, .llvm = llvm.address_space.nvptx.param }, + .{ .zig = .shared, .llvm = llvm.address_space.nvptx.shared }, + .{ .zig = .local, .llvm = llvm.address_space.nvptx.local }, }, - .amdgcn => switch (address_space) { - .generic => llvm.address_space.amdgpu.flat, - .global => llvm.address_space.amdgpu.global, - .constant => llvm.address_space.amdgpu.constant, - .shared => llvm.address_space.amdgpu.local, - .local => llvm.address_space.amdgpu.private, - else => unreachable, + .amdgcn => &.{ + .{ .zig = .generic, .llvm = llvm.address_space.amdgpu.flat }, + .{ .zig = .global, .llvm = llvm.address_space.amdgpu.global }, + .{ .zig = .constant, .llvm = llvm.address_space.amdgpu.constant }, + .{ .zig = .shared, .llvm = llvm.address_space.amdgpu.local }, + .{ .zig = .local, .llvm = llvm.address_space.amdgpu.private }, }, - .avr => switch (address_space) { - .generic => llvm.address_space.default, - .flash => llvm.address_space.avr.flash, - .flash1 => llvm.address_space.avr.flash1, - .flash2 => llvm.address_space.avr.flash2, - .flash3 => llvm.address_space.avr.flash3, - .flash4 => llvm.address_space.avr.flash4, - .flash5 => llvm.address_space.avr.flash5, - else => unreachable, + .avr => &.{ + .{ .zig = .generic, .llvm = llvm.address_space.default }, + .{ .zig = .flash, .llvm = llvm.address_space.avr.flash }, + .{ .zig = .flash1, .llvm = llvm.address_space.avr.flash1 }, + .{ .zig = .flash2, .llvm = llvm.address_space.avr.flash2 }, + .{ .zig = .flash3, .llvm = llvm.address_space.avr.flash3 }, + .{ .zig = .flash4, .llvm = llvm.address_space.avr.flash4 }, + .{ .zig = .flash5, .llvm = llvm.address_space.avr.flash5 }, }, - else => switch (address_space) { - .generic => llvm.address_space.default, - else => unreachable, + .wasm32, .wasm64 => &.{ + .{ .zig = .generic, .llvm = llvm.address_space.default }, + .{ .zig = null, .llvm = llvm.address_space.wasm.variable, .non_integral = true }, + .{ .zig = null, .llvm = llvm.address_space.wasm.externref, .non_integral = true, .size = 8, .abi = 8 }, + .{ .zig = null, .llvm = llvm.address_space.wasm.funcref, .non_integral = true, .size = 8, .abi = 8 }, + }, + else => &.{ + .{ .zig = .generic, .llvm = llvm.address_space.default }, }, }; } diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 2aff03a042..04caf2d412 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -17,6 +17,7 @@ string_indices: std.ArrayListUnmanaged(u32) = .{}, types: std.AutoArrayHashMapUnmanaged(String, Type) = .{}, next_unnamed_type: String = @enumFromInt(0), +next_unique_type_id: std.AutoHashMapUnmanaged(String, u32) = .{}, type_map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, type_data: std.ArrayListUnmanaged(Type.Data) = .{}, type_extra: std.ArrayListUnmanaged(u32) = .{}, @@ -109,8 +110,10 @@ pub const Type = enum(u32) { i1, i8, i16, + i29, i32, i64, + i80, i128, ptr, @@ -173,17 +176,18 @@ pub const Type = enum(u32) { if (std.enums.tagName(Type, data.type)) |name| return writer.writeAll(name); const type_data = data.builder.type_data.items[@intFromEnum(data.type)]; switch (type_data.tag) { + .integer => try writer.print("i{d}", .{type_data.data}), .named_structure => { const extra = data.builder.typeExtraData(NamedStructure, type_data.data); - if (comptime std.mem.eql(u8, fmt_str, "")) try writer.print("%{}", .{ - extra.id.fmt(data.builder), - }) else if (comptime std.mem.eql(u8, fmt_str, "+")) switch (extra.child) { + if (comptime std.mem.eql(u8, fmt_str, "")) switch (extra.child) { .none => try writer.writeAll("opaque"), else => try format(.{ .type = extra.child, .builder = data.builder, }, fmt_str, fmt_opts, writer), - } else @compileError("invalid format string: '" ++ fmt_str ++ "'"); + } else if (comptime std.mem.eql(u8, fmt_str, "%")) try writer.print("%{}", .{ + extra.id.fmt(data.builder), + }) else @compileError("invalid format string: '" ++ fmt_str ++ "'"); }, else => try writer.print("", .{@intFromEnum(data.type)}), } @@ -220,7 +224,7 @@ pub const Linkage = enum { }; pub const Preemption = enum { - none, + default, dso_preemptable, dso_local, @@ -230,7 +234,7 @@ pub const Preemption = enum { _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - if (self == .none) return; + if (self == .default) return; try writer.writeAll(@tagName(self)); try writer.writeByte(' '); } @@ -271,7 +275,7 @@ pub const DllStorageClass = enum { }; pub const ThreadLocal = enum { - none, + default, generaldynamic, localdynamic, initialexec, @@ -283,7 +287,7 @@ pub const ThreadLocal = enum { _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - if (self == .none) return; + if (self == .default) return; try writer.writeAll("thread_local"); if (self != .generaldynamic) { try writer.writeByte('('); @@ -295,7 +299,7 @@ pub const ThreadLocal = enum { }; pub const UnnamedAddr = enum { - none, + default, unnamed_addr, local_unnamed_addr, @@ -305,14 +309,14 @@ pub const UnnamedAddr = enum { _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - if (self == .none) return; + if (self == .default) return; try writer.writeAll(@tagName(self)); try writer.writeByte(' '); } }; pub const AddrSpace = enum(u24) { - none, + default, _, pub fn format( @@ -321,13 +325,13 @@ pub const AddrSpace = enum(u24) { _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - if (self == .none) return; + if (self == .default) return; try writer.print("addrspace({d}) ", .{@intFromEnum(self)}); } }; pub const ExternallyInitialized = enum { - none, + default, externally_initialized, pub fn format( @@ -336,7 +340,7 @@ pub const ExternallyInitialized = enum { _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - if (self == .none) return; + if (self == .default) return; try writer.writeAll(@tagName(self)); try writer.writeByte(' '); } @@ -369,12 +373,12 @@ pub const Alignment = enum(u6) { pub const Global = struct { linkage: Linkage = .default, - preemption: Preemption = .none, + preemption: Preemption = .default, visibility: Visibility = .default, dll_storage_class: DllStorageClass = .default, - unnamed_addr: UnnamedAddr = .none, - addr_space: AddrSpace = .none, - externally_initialized: ExternallyInitialized = .none, + unnamed_addr: UnnamedAddr = .default, + addr_space: AddrSpace = .default, + externally_initialized: ExternallyInitialized = .default, type: Type, alignment: Alignment = .default, kind: union(enum) { @@ -459,7 +463,7 @@ pub const Alias = struct { pub const Object = struct { global: Global.Index, - thread_local: ThreadLocal = .none, + thread_local: ThreadLocal = .default, mutability: enum { global, constant } = .global, init: void = {}, @@ -525,7 +529,7 @@ pub fn init(self: *Builder) Allocator.Error!void { @field(llvm.Context, simple_field.name ++ "Type")(self.llvm_context), ); } - inline for (.{ 1, 8, 16, 32, 64, 128 }) |bits| assert(self.intTypeAssumeCapacity(bits) == + inline for (.{ 1, 8, 16, 29, 32, 64, 80, 128 }) |bits| assert(self.intTypeAssumeCapacity(bits) == @field(Type, std.fmt.comptimePrint("i{d}", .{bits}))); inline for (.{0}) |addr_space| assert(self.pointerTypeAssumeCapacity(@enumFromInt(addr_space)) == .ptr); @@ -541,6 +545,7 @@ pub fn deinit(self: *Builder) void { self.string_indices.deinit(self.gpa); self.types.deinit(self.gpa); + self.next_unique_type_id.deinit(self.gpa); self.type_map.deinit(self.gpa); self.type_data.deinit(self.gpa); self.type_extra.deinit(self.gpa); @@ -596,13 +601,18 @@ pub fn fmtAssumeCapacity(self: *Builder, comptime fmt_str: []const u8, fmt_args: } pub fn opaqueType(self: *Builder, name: String) Allocator.Error!Type { + try self.string_map.ensureUnusedCapacity(self.gpa, 1); + try self.string_bytes.ensureUnusedCapacity(self.gpa, name.toSlice(self).?.len + + comptime std.fmt.count("{d}" ++ .{0}, .{std.math.maxInt(u32)})); + try self.string_indices.ensureUnusedCapacity(self.gpa, 1); try self.types.ensureUnusedCapacity(self.gpa, 1); + try self.next_unique_type_id.ensureUnusedCapacity(self.gpa, 1); try self.ensureUnusedCapacityTypes(1, Type.NamedStructure); return self.opaqueTypeAssumeCapacity(name); } pub fn intType(self: *Builder, bits: u24) Allocator.Error!Type { - try self.ensureUnusedCapacityTypes(1); + try self.ensureUnusedCapacityTypes(1, null); return self.intTypeAssumeCapacity(bits); } @@ -695,27 +705,39 @@ fn opaqueTypeAssumeCapacity(self: *Builder, name: String) Type { lhs == ctx.builder.typeExtraData(Type.NamedStructure, rhs_data.data).id; } }; - const id = if (name == .none) name: { - const next_name = self.next_unnamed_type; - assert(next_name != .none); - self.next_unnamed_type = @enumFromInt(@intFromEnum(next_name) + 1); - break :name next_name; - } else name: { - assert(name.toIndex() != null); - break :name name; - }; - const gop = self.type_map.getOrPutAssumeCapacityAdapted(id, Adapter{ .builder = self }); - if (!gop.found_existing) { - gop.key_ptr.* = {}; - gop.value_ptr.* = {}; - self.type_data.appendAssumeCapacity(.{ - .tag = .named_structure, - .data = self.addTypeExtraAssumeCapacity(Type.NamedStructure{ .id = id, .child = .none }), - }); + var id = name; + if (name == .none) { + id = self.next_unnamed_type; + assert(id != .none); + self.next_unnamed_type = @enumFromInt(@intFromEnum(id) + 1); + } else assert(name.toIndex() != null); + while (true) { + const type_gop = self.types.getOrPutAssumeCapacity(id); + if (!type_gop.found_existing) { + const gop = self.type_map.getOrPutAssumeCapacityAdapted(id, Adapter{ .builder = self }); + assert(!gop.found_existing); + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.type_data.appendAssumeCapacity(.{ + .tag = .named_structure, + .data = self.addTypeExtraAssumeCapacity(Type.NamedStructure{ + .id = id, + .child = .none, + }), + }); + const result: Type = @enumFromInt(gop.index); + type_gop.value_ptr.* = result; + if (self.useLibLlvm()) self.llvm_types.appendAssumeCapacity( + self.llvm_context.structCreateNamed(id.toSlice(self) orelse ""), + ); + return result; + } + + const unique_gop = self.next_unique_type_id.getOrPutAssumeCapacity(name); + if (!unique_gop.found_existing) unique_gop.value_ptr.* = 2; + id = self.fmtAssumeCapacity("{s}.{d}", .{ name.toSlice(self).?, unique_gop.value_ptr.* }); + unique_gop.value_ptr.* += 1; } - const result: Type = @enumFromInt(gop.index); - self.types.putAssumeCapacityNoClobber(id, result); - return result; } fn intTypeAssumeCapacity(self: *Builder, bits: u24) Type { @@ -786,7 +808,7 @@ pub fn dump(self: *Builder, writer: anytype) @TypeOf(writer).Error!void { , .{self.target_triple.fmt(self)}); try writer.writeByte('\n'); for (self.types.keys(), self.types.values()) |id, ty| try writer.print( - \\%{} = type {+} + \\%{} = type {} \\ , .{ id.fmt(self), ty.fmt(self) }); try writer.writeByte('\n'); diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index 687437cb56..c89f9ee2d5 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -1612,6 +1612,13 @@ pub const address_space = struct { pub const constant_buffer_14: c_uint = 22; pub const constant_buffer_15: c_uint = 23; }; + + // See llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.h + pub const wasm = struct { + pub const variable = 1; + pub const externref = 10; + pub const funcref = 20; + }; }; pub const DIEnumerator = opaque {}; -- cgit v1.2.3 From d167bd4b568e2d5808fc37a8c683e4c06317a434 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 8 Jul 2023 04:38:31 -0400 Subject: llvm: finish converting `lowerType` --- src/codegen/llvm.zig | 1580 +++++++++++++++++++---------------------- src/codegen/llvm/Builder.zig | 669 ++++++++++++++--- src/codegen/llvm/bindings.zig | 9 +- 3 files changed, 1308 insertions(+), 950 deletions(-) (limited to 'src') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 4e5570bd5f..c74f6021db 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -374,12 +374,11 @@ const DataLayoutBuilder = struct { .pref = 64, .idx = 64, }; - const address_space_info = llvmAddressSpaceInfo(self.target); - assert(address_space_info[0].llvm == llvm.address_space.default); - for (address_space_info) |info| { - const is_default = info.llvm == llvm.address_space.default; + const addr_space_info = llvmAddrSpaceInfo(self.target); + for (addr_space_info, 0..) |info, i| { + assert((info.llvm == .default) == (i == 0)); if (info.non_integral) { - assert(!is_default); + assert(info.llvm != .default); any_non_integral = true; } const size = info.size orelse ptr_bit_width; @@ -391,7 +390,7 @@ const DataLayoutBuilder = struct { abi == default_info.abi and pref == default_info.pref and idx == default_info.idx; - if (is_default) default_info = .{ + if (info.llvm == .default) default_info = .{ .size = size, .abi = abi, .pref = pref, @@ -400,7 +399,7 @@ const DataLayoutBuilder = struct { if (!info.force_in_data_layout and matches_default and self.target.cpu.arch != .riscv64 and !is_aarch64_windows) continue; try writer.writeAll("-p"); - if (!is_default) try writer.print("{d}", .{info.llvm}); + if (info.llvm != .default) try writer.print("{d}", .{@intFromEnum(info.llvm)}); try writer.print(":{d}:{d}", .{ size, abi }); if (pref != abi or idx != size) { try writer.print(":{d}", .{pref}); @@ -459,8 +458,8 @@ const DataLayoutBuilder = struct { try self.typeAlignment(.vector, 512, 128, 128, true, writer); if (any_non_integral) { try writer.writeAll("-ni"); - for (address_space_info) |info| if (info.non_integral) - try writer.print(":{d}", .{info.llvm}); + for (addr_space_info) |info| if (info.non_integral) + try writer.print(":{d}", .{@intFromEnum(info.llvm)}); } } @@ -589,7 +588,7 @@ pub const Object = struct { /// Memoizes a null `?usize` value. null_opt_addr: ?*llvm.Value, - pub const TypeMap = std.AutoHashMapUnmanaged(InternPool.Index, *llvm.Type); + pub const TypeMap = std.AutoHashMapUnmanaged(InternPool.Index, Builder.Type); /// This is an ArrayHashMap as opposed to a HashMap because in `flushModule` we /// want to iterate over it while adding entries to it. @@ -809,15 +808,10 @@ pub const Object = struct { const error_name_table_ptr_global = o.error_name_table orelse return; const mod = o.module; - const target = mod.getTarget(); - const llvm_ptr_ty = o.context.pointerType(0); // TODO: Address space - const llvm_usize_ty = o.context.intType(target.ptrBitWidth()); - const type_fields = [_]*llvm.Type{ - llvm_ptr_ty, - llvm_usize_ty, - }; - const llvm_slice_ty = o.context.structType(&type_fields, type_fields.len, .False); + // TODO: Address space + const llvm_usize_ty = try o.lowerType(Type.usize); + const llvm_slice_ty = (try o.builder.structType(.normal, &.{ .ptr, llvm_usize_ty })).toLlvm(&o.builder); const slice_ty = Type.slice_const_u8_sentinel_0; const slice_alignment = slice_ty.abiAlignment(mod); @@ -838,7 +832,7 @@ pub const Object = struct { const slice_fields = [_]*llvm.Value{ str_global, - llvm_usize_ty.constInt(name.len, .False), + llvm_usize_ty.toLlvm(&o.builder).constInt(name.len, .False), }; llvm_error.* = llvm_slice_ty.constNamedStruct(&slice_fields, slice_fields.len); } @@ -1204,7 +1198,7 @@ pub const Object = struct { { var llvm_arg_i = @as(c_uint, @intFromBool(ret_ptr != null)) + @intFromBool(err_return_tracing); var it = iterateParamTypes(o, fn_info); - while (it.next()) |lowering| switch (lowering) { + while (try it.next()) |lowering| switch (lowering) { .no_bits => continue, .byval => { assert(!it.byval_attr); @@ -1229,7 +1223,7 @@ pub const Object = struct { }, .byref => { const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const param_llvm_ty = try o.lowerLlvmType(param_ty); + const param_llvm_ty = try o.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); const alignment = param_ty.abiAlignment(mod); @@ -1241,14 +1235,14 @@ pub const Object = struct { if (isByRef(param_ty, mod)) { args.appendAssumeCapacity(param); } else { - const load_inst = builder.buildLoad(param_llvm_ty, param, ""); + const load_inst = builder.buildLoad(param_llvm_ty.toLlvm(&o.builder), param, ""); load_inst.setAlignment(alignment); args.appendAssumeCapacity(load_inst); } }, .byref_mut => { const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const param_llvm_ty = try o.lowerLlvmType(param_ty); + const param_llvm_ty = (try o.lowerType(param_ty)).toLlvm(&o.builder); const param = llvm_func.getParam(llvm_arg_i); const alignment = param_ty.abiAlignment(mod); @@ -1271,7 +1265,7 @@ pub const Object = struct { const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; - const param_llvm_ty = try o.lowerLlvmType(param_ty); + const param_llvm_ty = (try o.lowerType(param_ty)).toLlvm(&o.builder); const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod))); const int_llvm_ty = o.context.intType(abi_size * 8); const alignment = @max( @@ -1316,16 +1310,16 @@ pub const Object = struct { const len_param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; - const slice_llvm_ty = try o.lowerLlvmType(param_ty); + const slice_llvm_ty = (try o.lowerType(param_ty)).toLlvm(&o.builder); const partial = builder.buildInsertValue(slice_llvm_ty.getUndef(), ptr_param, 0, ""); const aggregate = builder.buildInsertValue(partial, len_param, 1, ""); try args.append(aggregate); }, .multiple_llvm_types => { assert(!it.byval_attr); - const field_types = it.llvm_types_buffer[0..it.llvm_types_len]; + const field_types = it.llvm_types_buffer[0..it.types_len]; const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const param_llvm_ty = try o.lowerLlvmType(param_ty); + const param_llvm_ty = (try o.lowerType(param_ty)).toLlvm(&o.builder); const param_alignment = param_ty.abiAlignment(mod); const arg_ptr = buildAllocaInner(o.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target); const llvm_ty = o.context.structType(field_types.ptr, @as(c_uint, @intCast(field_types.len)), .False); @@ -1356,7 +1350,7 @@ pub const Object = struct { }, .float_array => { const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const param_llvm_ty = try o.lowerLlvmType(param_ty); + const param_llvm_ty = (try o.lowerType(param_ty)).toLlvm(&o.builder); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; @@ -1374,7 +1368,7 @@ pub const Object = struct { }, .i32_array, .i64_array => { const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const param_llvm_ty = try o.lowerLlvmType(param_ty); + const param_llvm_ty = (try o.lowerType(param_ty)).toLlvm(&o.builder); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; @@ -2678,7 +2672,7 @@ pub const Object = struct { const global = o.llvm_module.addGlobalInAddressSpace( llvm_init.typeOf(), "", - llvm_actual_addrspace, + @intFromEnum(llvm_actual_addrspace), ); global.setLinkage(.Internal); global.setUnnamedAddr(.True); @@ -2686,7 +2680,7 @@ pub const Object = struct { global.setInitializer(llvm_init); const addrspace_casted_global = if (llvm_wanted_addrspace != llvm_actual_addrspace) - global.constAddrSpaceCast(o.context.pointerType(llvm_wanted_addrspace)) + global.constAddrSpaceCast(o.context.pointerType(@intFromEnum(llvm_wanted_addrspace))) else global; @@ -2710,16 +2704,16 @@ pub const Object = struct { const target = mod.getTarget(); const sret = firstParamSRet(fn_info, mod); - const fn_type = try o.lowerLlvmType(zig_fn_type); + const fn_type = try o.lowerType(zig_fn_type); const ip = &mod.intern_pool; const fqn = try o.builder.string(ip.stringToSlice(try decl.getFullyQualifiedName(mod))); const llvm_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); - const llvm_fn = o.llvm_module.addFunctionInAddressSpace(fqn.toSlice(&o.builder).?, fn_type, llvm_addrspace); + const llvm_fn = o.llvm_module.addFunctionInAddressSpace(fqn.toSlice(&o.builder).?, fn_type.toLlvm(&o.builder), @intFromEnum(llvm_addrspace)); var global = Builder.Global{ - .type = .void, + .type = fn_type, .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) }, }; var function = Builder.Function{ @@ -2745,7 +2739,7 @@ pub const Object = struct { o.addArgAttr(llvm_fn, 0, "nonnull"); // Sret pointers must not be address 0 o.addArgAttr(llvm_fn, 0, "noalias"); - const raw_llvm_ret_ty = try o.lowerLlvmType(fn_info.return_type.toType()); + const raw_llvm_ret_ty = (try o.lowerType(fn_info.return_type.toType())).toLlvm(&o.builder); llvm_fn.addSretAttr(raw_llvm_ret_ty); } @@ -2789,7 +2783,7 @@ pub const Object = struct { var it = iterateParamTypes(o, fn_info); it.llvm_index += @intFromBool(sret); it.llvm_index += @intFromBool(err_return_tracing); - while (it.next()) |lowering| switch (lowering) { + while (try it.next()) |lowering| switch (lowering) { .byval => { const param_index = it.zig_index - 1; const param_ty = fn_info.param_types.get(ip)[param_index].toType(); @@ -2799,7 +2793,7 @@ pub const Object = struct { }, .byref => { const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1]; - const param_llvm_ty = try o.lowerLlvmType(param_ty.toType()); + const param_llvm_ty = try o.lowerType(param_ty.toType()); const alignment = param_ty.toType().abiAlignment(mod); o.addByRefParamAttrs(llvm_fn, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); }, @@ -2883,15 +2877,9 @@ pub const Object = struct { const target = mod.getTarget(); - const ty = try o.lowerType(decl.ty); - const llvm_type = if (ty != .none) - o.builder.llvm_types.items[@intFromEnum(ty)] - else - try o.lowerLlvmType(decl.ty); - const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); - var global = Builder.Global{ - .type = if (ty != .none) ty else .void, + .addr_space = toLlvmGlobalAddressSpace(decl.@"addrspace", target), + .type = try o.lowerType(decl.ty), .kind = .{ .object = @enumFromInt(o.builder.objects.items.len) }, }; var object = Builder.Object{ @@ -2904,9 +2892,9 @@ pub const Object = struct { else fqn; const llvm_global = o.llvm_module.addGlobalInAddressSpace( - llvm_type, + global.type.toLlvm(&o.builder), fqn.toSlice(&o.builder).?, - llvm_actual_addrspace, + @intFromEnum(global.addr_space), ); // This is needed for declarations created by `@extern`. @@ -2943,22 +2931,19 @@ pub const Object = struct { } fn isUnnamedType(o: *Object, ty: Type, val: *llvm.Value) bool { - // Once `lowerLlvmType` succeeds, successive calls to it with the same Zig type - // are guaranteed to succeed. So if a call to `lowerLlvmType` fails here it means + // Once `lowerType` succeeds, successive calls to it with the same Zig type + // are guaranteed to succeed. So if a call to `lowerType` fails here it means // it is the first time lowering the type, which means the value can't possible // have that type. - const llvm_ty = o.lowerLlvmType(ty) catch return true; + const llvm_ty = (o.lowerType(ty) catch return true).toLlvm(&o.builder); return val.typeOf() != llvm_ty; } - fn lowerLlvmType(o: *Object, t: Type) Allocator.Error!*llvm.Type { - const ty = try o.lowerType(t); - const llvm_ty = if (ty != .none) - o.builder.llvm_types.items[@intFromEnum(ty)] - else - try o.lowerLlvmTypeInner(t); + fn lowerType(o: *Object, t: Type) Allocator.Error!Builder.Type { + const ty = try o.lowerTypeInner(t); const mod = o.module; if (std.debug.runtime_safety and false) check: { + const llvm_ty = ty.toLlvm(&o.builder); if (t.zigTypeTag(mod) == .Opaque) break :check; if (!t.hasRuntimeBits(mod)) break :check; if (!llvm_ty.isSized().toBool()) break :check; @@ -2971,445 +2956,10 @@ pub const Object = struct { }); } } - return llvm_ty; - } - - fn lowerLlvmTypeInner(o: *Object, t: Type) Allocator.Error!*llvm.Type { - const gpa = o.gpa; - const mod = o.module; - const target = mod.getTarget(); - switch (t.zigTypeTag(mod)) { - .Void, .NoReturn => return o.context.voidType(), - .Int => { - const info = t.intInfo(mod); - assert(info.bits != 0); - return o.context.intType(info.bits); - }, - .Enum => { - const int_ty = t.intTagType(mod); - const bit_count = int_ty.intInfo(mod).bits; - assert(bit_count != 0); - return o.context.intType(bit_count); - }, - .Float => switch (t.floatBits(target)) { - 16 => return if (backendSupportsF16(target)) o.context.halfType() else o.context.intType(16), - 32 => return o.context.floatType(), - 64 => return o.context.doubleType(), - 80 => return if (backendSupportsF80(target)) o.context.x86_fp80Type() else o.context.intType(80), - 128 => return o.context.fp128Type(), - else => unreachable, - }, - .Bool => return o.context.intType(1), - .Pointer => { - if (t.isSlice(mod)) { - const ptr_type = t.slicePtrFieldType(mod); - - const fields: [2]*llvm.Type = .{ - try o.lowerLlvmType(ptr_type), - try o.lowerLlvmType(Type.usize), - }; - return o.context.structType(&fields, fields.len, .False); - } - const ptr_info = t.ptrInfo(mod); - const llvm_addrspace = toLlvmAddressSpace(ptr_info.flags.address_space, target); - return o.context.pointerType(llvm_addrspace); - }, - .Opaque => { - if (t.toIntern() == .anyopaque_type) return o.context.intType(8); - - const gop = try o.type_map.getOrPut(gpa, t.toIntern()); - if (gop.found_existing) return gop.value_ptr.*; - - const opaque_type = mod.intern_pool.indexToKey(t.toIntern()).opaque_type; - const name = mod.intern_pool.stringToSlice(try mod.opaqueFullyQualifiedName(opaque_type)); - - const llvm_struct_ty = o.context.structCreateNamed(name); - gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls - return llvm_struct_ty; - }, - .Array => { - const elem_ty = t.childType(mod); - if (std.debug.runtime_safety) assert((try elem_ty.onePossibleValue(mod)) == null); - const elem_llvm_ty = try o.lowerLlvmType(elem_ty); - const total_len = t.arrayLen(mod) + @intFromBool(t.sentinel(mod) != null); - return elem_llvm_ty.arrayType(@as(c_uint, @intCast(total_len))); - }, - .Vector => { - const elem_type = try o.lowerLlvmType(t.childType(mod)); - return elem_type.vectorType(t.vectorLen(mod)); - }, - .Optional => { - const child_ty = t.optionalChild(mod); - if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return o.context.intType(8); - } - const payload_llvm_ty = try o.lowerLlvmType(child_ty); - if (t.optionalReprIsPayload(mod)) { - return payload_llvm_ty; - } - - comptime assert(optional_layout_version == 3); - var fields_buf: [3]*llvm.Type = .{ - payload_llvm_ty, o.context.intType(8), undefined, - }; - const offset = child_ty.abiSize(mod) + 1; - const abi_size = t.abiSize(mod); - const padding = @as(c_uint, @intCast(abi_size - offset)); - if (padding == 0) { - return o.context.structType(&fields_buf, 2, .False); - } - fields_buf[2] = o.context.intType(8).arrayType(padding); - return o.context.structType(&fields_buf, 3, .False); - }, - .ErrorUnion => { - const payload_ty = t.errorUnionPayload(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return try o.lowerLlvmType(Type.anyerror); - } - const llvm_error_type = try o.lowerLlvmType(Type.anyerror); - const llvm_payload_type = try o.lowerLlvmType(payload_ty); - - const payload_align = payload_ty.abiAlignment(mod); - const error_align = Type.anyerror.abiAlignment(mod); - - const payload_size = payload_ty.abiSize(mod); - const error_size = Type.anyerror.abiSize(mod); - - var fields_buf: [3]*llvm.Type = undefined; - if (error_align > payload_align) { - fields_buf[0] = llvm_error_type; - fields_buf[1] = llvm_payload_type; - const payload_end = - std.mem.alignForward(u64, error_size, payload_align) + - payload_size; - const abi_size = std.mem.alignForward(u64, payload_end, error_align); - const padding = @as(c_uint, @intCast(abi_size - payload_end)); - if (padding == 0) { - return o.context.structType(&fields_buf, 2, .False); - } - fields_buf[2] = o.context.intType(8).arrayType(padding); - return o.context.structType(&fields_buf, 3, .False); - } else { - fields_buf[0] = llvm_payload_type; - fields_buf[1] = llvm_error_type; - const error_end = - std.mem.alignForward(u64, payload_size, error_align) + - error_size; - const abi_size = std.mem.alignForward(u64, error_end, payload_align); - const padding = @as(c_uint, @intCast(abi_size - error_end)); - if (padding == 0) { - return o.context.structType(&fields_buf, 2, .False); - } - fields_buf[2] = o.context.intType(8).arrayType(padding); - return o.context.structType(&fields_buf, 3, .False); - } - }, - .ErrorSet => return o.context.intType(16), - .Struct => { - const gop = try o.type_map.getOrPut(gpa, t.toIntern()); - if (gop.found_existing) return gop.value_ptr.*; - - const struct_type = switch (mod.intern_pool.indexToKey(t.toIntern())) { - .anon_struct_type => |tuple| { - const llvm_struct_ty = o.context.structCreateNamed(""); - gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls - - var llvm_field_types: std.ArrayListUnmanaged(*llvm.Type) = .{}; - defer llvm_field_types.deinit(gpa); - - try llvm_field_types.ensureUnusedCapacity(gpa, tuple.types.len); - - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 0; - - for (tuple.types, tuple.values) |field_ty, field_val| { - if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue; - - const field_align = field_ty.toType().abiAlignment(mod); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForward(u64, offset, field_align); - - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); - try llvm_field_types.append(gpa, llvm_array_ty); - } - const field_llvm_ty = try o.lowerLlvmType(field_ty.toType()); - try llvm_field_types.append(gpa, field_llvm_ty); - - offset += field_ty.toType().abiSize(mod); - } - { - const prev_offset = offset; - offset = std.mem.alignForward(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); - try llvm_field_types.append(gpa, llvm_array_ty); - } - } - - llvm_struct_ty.structSetBody( - llvm_field_types.items.ptr, - @as(c_uint, @intCast(llvm_field_types.items.len)), - .False, - ); - - return llvm_struct_ty; - }, - .struct_type => |struct_type| struct_type, - else => unreachable, - }; - - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - - if (struct_obj.layout == .Packed) { - assert(struct_obj.haveLayout()); - const int_llvm_ty = try o.lowerLlvmType(struct_obj.backing_int_ty); - gop.value_ptr.* = int_llvm_ty; - return int_llvm_ty; - } - - const name = try o.builder.string(mod.intern_pool.stringToSlice( - try struct_obj.getFullyQualifiedName(mod), - )); - const ty = try o.builder.opaqueType(name); - - const llvm_struct_ty = o.builder.llvm_types.items[@intFromEnum(ty)]; - gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls - - assert(struct_obj.haveFieldTypes()); - - var llvm_field_types: std.ArrayListUnmanaged(*llvm.Type) = .{}; - defer llvm_field_types.deinit(gpa); - - try llvm_field_types.ensureUnusedCapacity(gpa, struct_obj.fields.count()); - - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 1; - var any_underaligned_fields = false; - - var it = struct_obj.runtimeFieldIterator(mod); - while (it.next()) |field_and_index| { - const field = field_and_index.field; - const field_align = field.alignment(mod, struct_obj.layout); - const field_ty_align = field.ty.abiAlignment(mod); - any_underaligned_fields = any_underaligned_fields or - field_align < field_ty_align; - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForward(u64, offset, field_align); - - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); - try llvm_field_types.append(gpa, llvm_array_ty); - } - const field_llvm_ty = try o.lowerLlvmType(field.ty); - try llvm_field_types.append(gpa, field_llvm_ty); - - offset += field.ty.abiSize(mod); - } - { - const prev_offset = offset; - offset = std.mem.alignForward(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); - try llvm_field_types.append(gpa, llvm_array_ty); - } - } - - llvm_struct_ty.structSetBody( - llvm_field_types.items.ptr, - @as(c_uint, @intCast(llvm_field_types.items.len)), - llvm.Bool.fromBool(any_underaligned_fields), - ); - - return llvm_struct_ty; - }, - .Union => { - const gop = try o.type_map.getOrPut(gpa, t.toIntern()); - if (gop.found_existing) return gop.value_ptr.*; - - const layout = t.unionGetLayout(mod); - const union_obj = mod.typeToUnion(t).?; - - if (union_obj.layout == .Packed) { - const bitsize = @as(c_uint, @intCast(t.bitSize(mod))); - const int_llvm_ty = o.context.intType(bitsize); - gop.value_ptr.* = int_llvm_ty; - return int_llvm_ty; - } - - if (layout.payload_size == 0) { - const enum_tag_llvm_ty = try o.lowerLlvmType(union_obj.tag_ty); - gop.value_ptr.* = enum_tag_llvm_ty; - return enum_tag_llvm_ty; - } - - const name = mod.intern_pool.stringToSlice(try union_obj.getFullyQualifiedName(mod)); - - const llvm_union_ty = o.context.structCreateNamed(name); - gop.value_ptr.* = llvm_union_ty; // must be done before any recursive calls - - const aligned_field = union_obj.fields.values()[layout.most_aligned_field]; - const llvm_aligned_field_ty = try o.lowerLlvmType(aligned_field.ty); - - const llvm_payload_ty = t: { - if (layout.most_aligned_field_size == layout.payload_size) { - break :t llvm_aligned_field_ty; - } - const padding_len = if (layout.tag_size == 0) - @as(c_uint, @intCast(layout.abi_size - layout.most_aligned_field_size)) - else - @as(c_uint, @intCast(layout.payload_size - layout.most_aligned_field_size)); - const fields: [2]*llvm.Type = .{ - llvm_aligned_field_ty, - o.context.intType(8).arrayType(padding_len), - }; - break :t o.context.structType(&fields, fields.len, .True); - }; - - if (layout.tag_size == 0) { - var llvm_fields: [1]*llvm.Type = .{llvm_payload_ty}; - llvm_union_ty.structSetBody(&llvm_fields, llvm_fields.len, .False); - return llvm_union_ty; - } - const enum_tag_llvm_ty = try o.lowerLlvmType(union_obj.tag_ty); - - // Put the tag before or after the payload depending on which one's - // alignment is greater. - var llvm_fields: [3]*llvm.Type = undefined; - var llvm_fields_len: c_uint = 2; - - if (layout.tag_align >= layout.payload_align) { - llvm_fields = .{ enum_tag_llvm_ty, llvm_payload_ty, undefined }; - } else { - llvm_fields = .{ llvm_payload_ty, enum_tag_llvm_ty, undefined }; - } - - // Insert padding to make the LLVM struct ABI size match the Zig union ABI size. - if (layout.padding != 0) { - llvm_fields[2] = o.context.intType(8).arrayType(layout.padding); - llvm_fields_len = 3; - } - - llvm_union_ty.structSetBody(&llvm_fields, llvm_fields_len, .False); - return llvm_union_ty; - }, - .Fn => return lowerLlvmTypeFn(o, t), - .ComptimeInt => unreachable, - .ComptimeFloat => unreachable, - .Type => unreachable, - .Undefined => unreachable, - .Null => unreachable, - .EnumLiteral => unreachable, - - .Frame => @panic("TODO implement llvmType for Frame types"), - .AnyFrame => @panic("TODO implement llvmType for AnyFrame types"), - } - } - - fn lowerLlvmTypeFn(o: *Object, fn_ty: Type) Allocator.Error!*llvm.Type { - const mod = o.module; - const ip = &mod.intern_pool; - const fn_info = mod.typeToFunc(fn_ty).?; - const llvm_ret_ty = try lowerFnRetTy(o, fn_info); - - var llvm_params = std.ArrayList(*llvm.Type).init(o.gpa); - defer llvm_params.deinit(); - - if (firstParamSRet(fn_info, mod)) { - try llvm_params.append(o.context.pointerType(0)); - } - - if (fn_info.return_type.toType().isError(mod) and - mod.comp.bin_file.options.error_return_tracing) - { - const ptr_ty = try mod.singleMutPtrType(try o.getStackTraceType()); - try llvm_params.append(try o.lowerLlvmType(ptr_ty)); - } - - var it = iterateParamTypes(o, fn_info); - while (it.next()) |lowering| switch (lowering) { - .no_bits => continue, - .byval => { - const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - try llvm_params.append(try o.lowerLlvmType(param_ty)); - }, - .byref, .byref_mut => { - try llvm_params.append(o.context.pointerType(0)); - }, - .abi_sized_int => { - const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod))); - try llvm_params.append(o.context.intType(abi_size * 8)); - }, - .slice => { - const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional) - param_ty.optionalChild(mod).slicePtrFieldType(mod) - else - param_ty.slicePtrFieldType(mod); - const ptr_llvm_ty = try o.lowerLlvmType(ptr_ty); - const len_llvm_ty = try o.lowerLlvmType(Type.usize); - - try llvm_params.ensureUnusedCapacity(2); - llvm_params.appendAssumeCapacity(ptr_llvm_ty); - llvm_params.appendAssumeCapacity(len_llvm_ty); - }, - .multiple_llvm_types => { - try llvm_params.appendSlice(it.llvm_types_buffer[0..it.llvm_types_len]); - }, - .as_u16 => { - try llvm_params.append(o.context.intType(16)); - }, - .float_array => |count| { - const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const float_ty = try o.lowerLlvmType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?); - const field_count = @as(c_uint, @intCast(count)); - const arr_ty = float_ty.arrayType(field_count); - try llvm_params.append(arr_ty); - }, - .i32_array, .i64_array => |arr_len| { - const elem_size: u8 = if (lowering == .i32_array) 32 else 64; - const arr_ty = o.context.intType(elem_size).arrayType(arr_len); - try llvm_params.append(arr_ty); - }, - }; - - return llvm.functionType( - llvm_ret_ty, - llvm_params.items.ptr, - @as(c_uint, @intCast(llvm_params.items.len)), - llvm.Bool.fromBool(fn_info.is_var_args), - ); + return ty; } - /// Use this instead of lowerLlvmType when you want to handle correctly the case of elem_ty - /// being a zero bit type, but it should still be lowered as an i8 in such case. - /// There are other similar cases handled here as well. - fn lowerPtrElemTy(o: *Object, elem_ty: Type) Allocator.Error!*llvm.Type { - const mod = o.module; - const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) { - .Opaque => true, - .Fn => !mod.typeToFunc(elem_ty).?.is_generic, - .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod), - else => elem_ty.hasRuntimeBitsIgnoreComptime(mod), - }; - const llvm_elem_ty = if (lower_elem_ty) - try o.lowerLlvmType(elem_ty) - else - o.context.intType(8); - - return llvm_elem_ty; - } - - fn lowerType(o: *Object, t: Type) Allocator.Error!Builder.Type { + fn lowerTypeInner(o: *Object, t: Type) Allocator.Error!Builder.Type { const mod = o.module; const target = mod.getTarget(); return switch (t.toIntern()) { @@ -3459,8 +3009,11 @@ pub const Object = struct { .bool_type => .i1, .void_type => .void, .type_type => unreachable, - .anyerror_type => .i16, - .comptime_int_type, .comptime_float_type, .noreturn_type => unreachable, + .anyerror_type => Builder.Type.err_int, + .comptime_int_type, + .comptime_float_type, + .noreturn_type, + => unreachable, .anyframe_type => @panic("TODO implement lowerType for AnyFrame types"), .null_type, .undefined_type, @@ -3482,10 +3035,16 @@ pub const Object = struct { .manyptr_const_u8_sentinel_0_type, .single_const_pointer_to_comptime_int_type, => .ptr, - .slice_const_u8_type, .slice_const_u8_sentinel_0_type => .none, + .slice_const_u8_type, + .slice_const_u8_sentinel_0_type, + => try o.builder.structType(.normal, &.{ .ptr, try o.lowerType(Type.usize) }), .optional_noreturn_type => unreachable, - .anyerror_void_error_union_type => .i16, - .generic_poison_type, .empty_struct_type => unreachable, + .anyerror_void_error_union_type, + .adhoc_inferred_error_set_type, + => Builder.Type.err_int, + .generic_poison_type, + .empty_struct_type, + => unreachable, // values, not types .undef, .zero, @@ -3511,22 +3070,265 @@ pub const Object = struct { else => switch (mod.intern_pool.indexToKey(t.toIntern())) { .int_type => |int_type| try o.builder.intType(int_type.bits), .ptr_type => |ptr_type| switch (ptr_type.flags.size) { - .One, .Many, .C => try o.builder.pointerType(@enumFromInt( + .One, .Many, .C => try o.builder.ptrType( toLlvmAddressSpace(ptr_type.flags.address_space, target), - )), - .Slice => .none, + ), + .Slice => try o.builder.structType(.normal, &.{ + .ptr, + try o.lowerType(Type.usize), + }), + }, + .array_type => |array_type| o.builder.arrayType( + array_type.len + @intFromBool(array_type.sentinel != .none), + try o.lowerType(array_type.child.toType()), + ), + .vector_type => |vector_type| o.builder.vectorType( + .normal, + vector_type.len, + try o.lowerType(vector_type.child.toType()), + ), + .opt_type => |child_ty| { + if (!child_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) return .i8; + + const payload_ty = try o.lowerType(child_ty.toType()); + if (t.optionalReprIsPayload(mod)) return payload_ty; + + comptime assert(optional_layout_version == 3); + var fields_buf: [3]Builder.Type = .{ payload_ty, .i8, .none }; + const offset = child_ty.toType().abiSize(mod) + 1; + const abi_size = t.abiSize(mod); + const padding = abi_size - offset; + if (padding == 0) return o.builder.structType(.normal, fields_buf[0..2]); + fields_buf[2] = try o.builder.arrayType(padding, .i8); + return o.builder.structType(.normal, fields_buf[0..3]); }, - .array_type, .vector_type, .opt_type => .none, .anyframe_type => @panic("TODO implement lowerType for AnyFrame types"), - .error_union_type => .none, + .error_union_type => |error_union_type| { + const error_type = Builder.Type.err_int; + if (!error_union_type.payload_type.toType().hasRuntimeBitsIgnoreComptime(mod)) + return error_type; + const payload_type = try o.lowerType(error_union_type.payload_type.toType()); + + const payload_align = error_union_type.payload_type.toType().abiAlignment(mod); + const error_align = Type.err_int.abiAlignment(mod); + + const payload_size = error_union_type.payload_type.toType().abiSize(mod); + const error_size = Type.err_int.abiSize(mod); + + var fields_buf: [3]Builder.Type = undefined; + if (error_align > payload_align) { + fields_buf[0] = error_type; + fields_buf[1] = payload_type; + const payload_end = + std.mem.alignForward(u64, error_size, payload_align) + + payload_size; + const abi_size = std.mem.alignForward(u64, payload_end, error_align); + const padding = abi_size - payload_end; + if (padding == 0) return o.builder.structType(.normal, fields_buf[0..2]); + fields_buf[2] = try o.builder.arrayType(padding, .i8); + return o.builder.structType(.normal, fields_buf[0..3]); + } else { + fields_buf[0] = payload_type; + fields_buf[1] = error_type; + const error_end = + std.mem.alignForward(u64, payload_size, error_align) + + error_size; + const abi_size = std.mem.alignForward(u64, error_end, payload_align); + const padding = abi_size - error_end; + if (padding == 0) return o.builder.structType(.normal, fields_buf[0..2]); + fields_buf[2] = try o.builder.arrayType(padding, .i8); + return o.builder.structType(.normal, fields_buf[0..3]); + } + }, .simple_type => unreachable, - .struct_type, - .anon_struct_type, - .union_type, - .opaque_type, - => .none, + .struct_type => |struct_type| { + const gop = try o.type_map.getOrPut(o.gpa, t.toIntern()); + if (gop.found_existing) return gop.value_ptr.*; + + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + if (struct_obj.layout == .Packed) { + assert(struct_obj.haveLayout()); + const int_ty = try o.lowerType(struct_obj.backing_int_ty); + gop.value_ptr.* = int_ty; + return int_ty; + } + + const name = try o.builder.string(mod.intern_pool.stringToSlice( + try struct_obj.getFullyQualifiedName(mod), + )); + const ty = try o.builder.opaqueType(name); + gop.value_ptr.* = ty; // must be done before any recursive calls + + assert(struct_obj.haveFieldTypes()); + + var llvm_field_types = std.ArrayListUnmanaged(Builder.Type){}; + defer llvm_field_types.deinit(o.gpa); + try llvm_field_types.ensureUnusedCapacity(o.gpa, struct_obj.fields.count()); + + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 1; + var struct_kind: Builder.Type.Structure.Kind = .normal; + + var it = struct_obj.runtimeFieldIterator(mod); + while (it.next()) |field_and_index| { + const field = field_and_index.field; + const field_align = field.alignment(mod, struct_obj.layout); + const field_ty_align = field.ty.abiAlignment(mod); + if (field_align < field_ty_align) struct_kind = .@"packed"; + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForward(u64, offset, field_align); + + const padding_len = offset - prev_offset; + if (padding_len > 0) try llvm_field_types.append( + o.gpa, + try o.builder.arrayType(padding_len, .i8), + ); + try llvm_field_types.append(o.gpa, try o.lowerType(field.ty)); + + offset += field.ty.abiSize(mod); + } + { + const prev_offset = offset; + offset = std.mem.alignForward(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) try llvm_field_types.append( + o.gpa, + try o.builder.arrayType(padding_len, .i8), + ); + } + + try o.builder.namedTypeSetBody( + ty, + try o.builder.structType(struct_kind, llvm_field_types.items), + ); + return ty; + }, + .anon_struct_type => |anon_struct_type| { + var llvm_field_types: std.ArrayListUnmanaged(Builder.Type) = .{}; + defer llvm_field_types.deinit(o.gpa); + try llvm_field_types.ensureUnusedCapacity(o.gpa, anon_struct_type.types.len); + + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; + + for (anon_struct_type.types, anon_struct_type.values) |field_ty, field_val| { + if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue; + + const field_align = field_ty.toType().abiAlignment(mod); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForward(u64, offset, field_align); + + const padding_len = offset - prev_offset; + if (padding_len > 0) try llvm_field_types.append( + o.gpa, + try o.builder.arrayType(padding_len, .i8), + ); + try llvm_field_types.append(o.gpa, try o.lowerType(field_ty.toType())); + + offset += field_ty.toType().abiSize(mod); + } + { + const prev_offset = offset; + offset = std.mem.alignForward(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) try llvm_field_types.append( + o.gpa, + try o.builder.arrayType(padding_len, .i8), + ); + } + return o.builder.structType(.normal, llvm_field_types.items); + }, + .union_type => |union_type| { + const gop = try o.type_map.getOrPut(o.gpa, t.toIntern()); + if (gop.found_existing) return gop.value_ptr.*; + + const union_obj = mod.unionPtr(union_type.index); + const layout = union_obj.getLayout(mod, union_type.hasTag()); + + if (union_obj.layout == .Packed) { + const int_ty = try o.builder.intType(@intCast(t.bitSize(mod))); + gop.value_ptr.* = int_ty; + return int_ty; + } + + if (layout.payload_size == 0) { + const enum_tag_ty = try o.lowerType(union_obj.tag_ty); + gop.value_ptr.* = enum_tag_ty; + return enum_tag_ty; + } + + const name = try o.builder.string(mod.intern_pool.stringToSlice( + try union_obj.getFullyQualifiedName(mod), + )); + const ty = try o.builder.opaqueType(name); + gop.value_ptr.* = ty; // must be done before any recursive calls + + const aligned_field = union_obj.fields.values()[layout.most_aligned_field]; + const aligned_field_ty = try o.lowerType(aligned_field.ty); + + const payload_ty = ty: { + if (layout.most_aligned_field_size == layout.payload_size) { + break :ty aligned_field_ty; + } + const padding_len = if (layout.tag_size == 0) + layout.abi_size - layout.most_aligned_field_size + else + layout.payload_size - layout.most_aligned_field_size; + break :ty try o.builder.structType(.@"packed", &.{ + aligned_field_ty, + try o.builder.arrayType(padding_len, .i8), + }); + }; + + if (layout.tag_size == 0) { + try o.builder.namedTypeSetBody( + ty, + try o.builder.structType(.normal, &.{payload_ty}), + ); + return ty; + } + const enum_tag_ty = try o.lowerType(union_obj.tag_ty); + + // Put the tag before or after the payload depending on which one's + // alignment is greater. + var llvm_fields: [3]Builder.Type = undefined; + var llvm_fields_len: usize = 2; + + if (layout.tag_align >= layout.payload_align) { + llvm_fields = .{ enum_tag_ty, payload_ty, .none }; + } else { + llvm_fields = .{ payload_ty, enum_tag_ty, .none }; + } + + // Insert padding to make the LLVM struct ABI size match the Zig union ABI size. + if (layout.padding != 0) { + llvm_fields[llvm_fields_len] = try o.builder.arrayType(layout.padding, .i8); + llvm_fields_len += 1; + } + + try o.builder.namedTypeSetBody( + ty, + try o.builder.structType(.normal, llvm_fields[0..llvm_fields_len]), + ); + return ty; + }, + .opaque_type => |opaque_type| { + const gop = try o.type_map.getOrPut(o.gpa, t.toIntern()); + if (!gop.found_existing) { + const name = try o.builder.string(mod.intern_pool.stringToSlice( + try mod.opaqueFullyQualifiedName(opaque_type), + )); + gop.value_ptr.* = try o.builder.opaqueType(name); + } + return gop.value_ptr.*; + }, .enum_type => |enum_type| try o.lowerType(enum_type.tag_ty.toType()), - .func_type, .error_set_type, .inferred_error_set_type => .none, + .func_type => |func_type| try o.lowerTypeFn(func_type), + .error_set_type, .inferred_error_set_type => Builder.Type.err_int, // values, not types .undef, .runtime_value, @@ -3552,6 +3354,85 @@ pub const Object = struct { }; } + /// Use this instead of lowerType when you want to handle correctly the case of elem_ty + /// being a zero bit type, but it should still be lowered as an i8 in such case. + /// There are other similar cases handled here as well. + fn lowerPtrElemTy(o: *Object, elem_ty: Type) Allocator.Error!Builder.Type { + const mod = o.module; + const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) { + .Opaque => true, + .Fn => !mod.typeToFunc(elem_ty).?.is_generic, + .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod), + else => elem_ty.hasRuntimeBitsIgnoreComptime(mod), + }; + return if (lower_elem_ty) try o.lowerType(elem_ty) else .i8; + } + + fn lowerTypeFn(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type { + const mod = o.module; + const ip = &mod.intern_pool; + const ret_ty = try lowerFnRetTy(o, fn_info); + + var llvm_params = std.ArrayListUnmanaged(Builder.Type){}; + defer llvm_params.deinit(o.gpa); + + if (firstParamSRet(fn_info, mod)) { + try llvm_params.append(o.gpa, .ptr); + } + + if (fn_info.return_type.toType().isError(mod) and + mod.comp.bin_file.options.error_return_tracing) + { + const ptr_ty = try mod.singleMutPtrType(try o.getStackTraceType()); + try llvm_params.append(o.gpa, try o.lowerType(ptr_ty)); + } + + var it = iterateParamTypes(o, fn_info); + while (try it.next()) |lowering| switch (lowering) { + .no_bits => continue, + .byval => { + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); + try llvm_params.append(o.gpa, try o.lowerType(param_ty)); + }, + .byref, .byref_mut => { + try llvm_params.append(o.gpa, .ptr); + }, + .abi_sized_int => { + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); + try llvm_params.append(o.gpa, try o.builder.intType( + @intCast(param_ty.abiSize(mod) * 8), + )); + }, + .slice => { + try llvm_params.appendSlice(o.gpa, &.{ .ptr, try o.lowerType(Type.usize) }); + }, + .multiple_llvm_types => { + try llvm_params.appendSlice(o.gpa, it.types_buffer[0..it.types_len]); + }, + .as_u16 => { + try llvm_params.append(o.gpa, .i16); + }, + .float_array => |count| { + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); + const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?); + try llvm_params.append(o.gpa, try o.builder.arrayType(count, float_ty)); + }, + .i32_array, .i64_array => |arr_len| { + try llvm_params.append(o.gpa, try o.builder.arrayType(arr_len, switch (lowering) { + .i32_array => .i32, + .i64_array => .i64, + else => unreachable, + })); + }, + }; + + return o.builder.fnType( + ret_ty, + llvm_params.items, + if (fn_info.is_var_args) .vararg else .normal, + ); + } + fn lowerValue(o: *Object, arg_tv: TypedValue) Error!*llvm.Value { const mod = o.module; const gpa = o.gpa; @@ -3562,8 +3443,7 @@ pub const Object = struct { else => {}, } if (tv.val.isUndefDeep(mod)) { - const llvm_type = try o.lowerLlvmType(tv.ty); - return llvm_type.getUndef(); + return (try o.lowerType(tv.ty)).toLlvm(&o.builder).getUndef(); } switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { @@ -3595,7 +3475,7 @@ pub const Object = struct { .generic_poison, => unreachable, // non-runtime values .false, .true => { - const llvm_type = try o.lowerLlvmType(tv.ty); + const llvm_type = (try o.lowerType(tv.ty)).toLlvm(&o.builder); return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(); }, }, @@ -3623,7 +3503,7 @@ pub const Object = struct { return lowerBigInt(o, tv.ty, bigint); }, .err => |err| { - const llvm_ty = try o.lowerLlvmType(Type.anyerror); + const llvm_ty = Builder.Type.err_int.toLlvm(&o.builder); const int = try mod.getErrorValue(err.name); return llvm_ty.constInt(int, .False); }, @@ -3659,7 +3539,7 @@ pub const Object = struct { }); var fields_buf: [3]*llvm.Value = undefined; - const llvm_ty = try o.lowerLlvmType(tv.ty); + const llvm_ty = (try o.lowerType(tv.ty)).toLlvm(&o.builder); const llvm_field_count = llvm_ty.countStructElementTypes(); if (llvm_field_count > 2) { assert(llvm_field_count == 3); @@ -3703,7 +3583,7 @@ pub const Object = struct { return unsigned_val; }, .float => { - const llvm_ty = try o.lowerLlvmType(tv.ty); + const llvm_ty = (try o.lowerType(tv.ty)).toLlvm(&o.builder); switch (tv.ty.floatBits(target)) { 16 => { const repr = @as(u16, @bitCast(tv.val.toFloat(f16, mod))); @@ -3788,7 +3668,7 @@ pub const Object = struct { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return non_null_bit; } - const llvm_ty = try o.lowerLlvmType(tv.ty); + const llvm_ty = (try o.lowerType(tv.ty)).toLlvm(&o.builder); if (tv.ty.optionalReprIsPayload(mod)) return switch (opt.val) { .none => llvm_ty.constNull(), else => |payload| o.lowerValue(.{ .ty = payload_ty, .val = payload.toValue() }), @@ -3834,7 +3714,7 @@ pub const Object = struct { .True, ); } else { - const llvm_elem_ty = try o.lowerLlvmType(elem_ty); + const llvm_elem_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); return llvm_elem_ty.constArray( llvm_elems.ptr, @as(c_uint, @intCast(llvm_elems.len)), @@ -3869,7 +3749,7 @@ pub const Object = struct { .True, ); } else { - const llvm_elem_ty = try o.lowerLlvmType(elem_ty); + const llvm_elem_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); return llvm_elem_ty.constArray( llvm_elems.ptr, @as(c_uint, @intCast(llvm_elems.len)), @@ -3956,7 +3836,7 @@ pub const Object = struct { .False, ); } else { - const llvm_struct_ty = try o.lowerLlvmType(tv.ty); + const llvm_struct_ty = (try o.lowerType(tv.ty)).toLlvm(&o.builder); return llvm_struct_ty.constNamedStruct( llvm_fields.items.ptr, @as(c_uint, @intCast(llvm_fields.items.len)), @@ -3965,7 +3845,7 @@ pub const Object = struct { }, .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - const llvm_struct_ty = try o.lowerLlvmType(tv.ty); + const llvm_struct_ty = (try o.lowerType(tv.ty)).toLlvm(&o.builder); if (struct_obj.layout == .Packed) { assert(struct_obj.haveLayout()); @@ -4062,7 +3942,7 @@ pub const Object = struct { else => unreachable, }, .un => { - const llvm_union_ty = try o.lowerLlvmType(tv.ty); + const llvm_union_ty = (try o.lowerType(tv.ty)).toLlvm(&o.builder); const tag_and_val: Value.Payload.Union.Data = switch (tv.val.toIntern()) { .none => tv.val.castTag(.@"union").?.data, else => switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { @@ -4232,7 +4112,7 @@ pub const Object = struct { llvm_u32.constInt(0, .False), llvm_u32.constInt(payload_offset, .False), }; - const eu_llvm_ty = try o.lowerLlvmType(eu_ty); + const eu_llvm_ty = (try o.lowerType(eu_ty)).toLlvm(&o.builder); return eu_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); }, .opt_payload => |opt_ptr| { @@ -4253,19 +4133,19 @@ pub const Object = struct { llvm_u32.constInt(0, .False), llvm_u32.constInt(0, .False), }; - const opt_llvm_ty = try o.lowerLlvmType(opt_ty); + const opt_llvm_ty = (try o.lowerType(opt_ty)).toLlvm(&o.builder); return opt_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); }, .comptime_field => unreachable, .elem => |elem_ptr| { const parent_llvm_ptr = try o.lowerParentPtr(elem_ptr.base.toValue(), true); - const llvm_usize = try o.lowerLlvmType(Type.usize); + const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); const indices: [1]*llvm.Value = .{ llvm_usize.constInt(elem_ptr.index, .False), }; const elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod); - const elem_llvm_ty = try o.lowerLlvmType(elem_ty); + const elem_llvm_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); }, .field => |field_ptr| { @@ -4294,7 +4174,7 @@ pub const Object = struct { llvm_u32.constInt(0, .False), llvm_u32.constInt(llvm_pl_index, .False), }; - const parent_llvm_ty = try o.lowerLlvmType(parent_ty); + const parent_llvm_ty = (try o.lowerType(parent_ty)).toLlvm(&o.builder); return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); }, .Struct => { @@ -4317,7 +4197,7 @@ pub const Object = struct { return field_addr.constIntToPtr(final_llvm_ty); } - const parent_llvm_ty = try o.lowerLlvmType(parent_ty); + const parent_llvm_ty = (try o.lowerType(parent_ty)).toLlvm(&o.builder); if (llvmField(parent_ty, field_index, mod)) |llvm_field| { const indices: [2]*llvm.Value = .{ llvm_u32.constInt(0, .False), @@ -4336,7 +4216,7 @@ pub const Object = struct { llvm_u32.constInt(0, .False), llvm_u32.constInt(field_index, .False), }; - const parent_llvm_ty = try o.lowerLlvmType(parent_ty); + const parent_llvm_ty = (try o.lowerType(parent_ty)).toLlvm(&o.builder); return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); }, else => unreachable, @@ -4386,11 +4266,11 @@ pub const Object = struct { const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); const llvm_val = if (llvm_wanted_addrspace != llvm_actual_addrspace) blk: { - const llvm_decl_wanted_ptr_ty = o.context.pointerType(llvm_wanted_addrspace); + const llvm_decl_wanted_ptr_ty = o.context.pointerType(@intFromEnum(llvm_wanted_addrspace)); break :blk llvm_decl_val.constAddrSpaceCast(llvm_decl_wanted_ptr_ty); } else llvm_decl_val; - const llvm_type = try o.lowerLlvmType(tv.ty); + const llvm_type = (try o.lowerType(tv.ty)).toLlvm(&o.builder); if (tv.ty.zigTypeTag(mod) == .Int) { return llvm_val.constPtrToInt(llvm_type); } else { @@ -4405,8 +4285,8 @@ pub const Object = struct { // The value cannot be undefined, because we use the `nonnull` annotation // for non-optional pointers. We also need to respect the alignment, even though // the address will never be dereferenced. - const llvm_usize = try o.lowerLlvmType(Type.usize); - const llvm_ptr_ty = try o.lowerLlvmType(ptr_ty); + const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); + const llvm_ptr_ty = (try o.lowerType(ptr_ty)).toLlvm(&o.builder); if (ptr_ty.ptrInfo(mod).flags.alignment.toByteUnitsOptional()) |alignment| { return llvm_usize.constInt(alignment, .False).constIntToPtr(llvm_ptr_ty); } @@ -4492,23 +4372,23 @@ pub const Object = struct { /// widen it before using it and then truncate the result. /// RMW exchange of floating-point values is bitcasted to same-sized integer /// types to work around a LLVM deficiency when targeting ARM/AArch64. - fn getAtomicAbiType(o: *Object, ty: Type, is_rmw_xchg: bool) ?*llvm.Type { + fn getAtomicAbiType(o: *Object, ty: Type, is_rmw_xchg: bool) Allocator.Error!Builder.Type { const mod = o.module; const int_ty = switch (ty.zigTypeTag(mod)) { .Int => ty, .Enum => ty.intTagType(mod), .Float => { - if (!is_rmw_xchg) return null; - return o.context.intType(@as(c_uint, @intCast(ty.abiSize(mod) * 8))); + if (!is_rmw_xchg) return .none; + return o.builder.intType(@intCast(ty.abiSize(mod) * 8)); }, - .Bool => return o.context.intType(8), - else => return null, + .Bool => return .i8, + else => return .none, }; const bit_count = int_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count) or (bit_count % 8) != 0) { - return o.context.intType(@as(c_uint, @intCast(int_ty.abiSize(mod) * 8))); + return o.builder.intType(@intCast(int_ty.abiSize(mod) * 8)); } else { - return null; + return .none; } } @@ -4549,13 +4429,13 @@ pub const Object = struct { llvm_arg_i: u32, alignment: u32, byval_attr: bool, - param_llvm_ty: *llvm.Type, + param_llvm_ty: Builder.Type, ) void { o.addArgAttr(llvm_fn, llvm_arg_i, "nonnull"); o.addArgAttr(llvm_fn, llvm_arg_i, "readonly"); o.addArgAttrInt(llvm_fn, llvm_arg_i, "align", alignment); if (byval_attr) { - llvm_fn.addByValAttr(llvm_arg_i, param_llvm_ty); + llvm_fn.addByValAttr(llvm_arg_i, param_llvm_ty.toLlvm(&o.builder)); } } }; @@ -4626,7 +4506,7 @@ pub const DeclGen = struct { const new_global = o.llvm_module.addGlobalInAddressSpace( llvm_init.typeOf(), "", - llvm_global_addrspace, + @intFromEnum(llvm_global_addrspace), ); new_global.setLinkage(llvm_global.getLinkage()); new_global.setUnnamedAddr(llvm_global.getUnnamedAddress()); @@ -4761,14 +4641,14 @@ pub const FuncGen = struct { const target = mod.getTarget(); const llvm_wanted_addrspace = toLlvmAddressSpace(.generic, target); const llvm_actual_addrspace = toLlvmGlobalAddressSpace(.generic, target); - const global = o.llvm_module.addGlobalInAddressSpace(llvm_val.typeOf(), "", llvm_actual_addrspace); + const global = o.llvm_module.addGlobalInAddressSpace(llvm_val.typeOf(), "", @intFromEnum(llvm_actual_addrspace)); global.setInitializer(llvm_val); global.setLinkage(.Private); global.setGlobalConstant(.True); global.setUnnamedAddr(.True); global.setAlignment(tv.ty.abiAlignment(mod)); const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace) - global.constAddrSpaceCast(self.context.pointerType(llvm_wanted_addrspace)) + global.constAddrSpaceCast(self.context.pointerType(@intFromEnum(llvm_wanted_addrspace))) else global; return addrspace_casted_ptr; @@ -5053,7 +4933,7 @@ pub const FuncGen = struct { defer llvm_args.deinit(); const ret_ptr = if (!sret) null else blk: { - const llvm_ret_ty = try o.lowerLlvmType(return_type); + const llvm_ret_ty = (try o.lowerType(return_type)).toLlvm(&o.builder); const ret_ptr = self.buildAlloca(llvm_ret_ty, return_type.abiAlignment(mod)); try llvm_args.append(ret_ptr); break :blk ret_ptr; @@ -5066,13 +4946,13 @@ pub const FuncGen = struct { } var it = iterateParamTypes(o, fn_info); - while (it.nextCall(self, args)) |lowering| switch (lowering) { + while (try it.nextCall(self, args)) |lowering| switch (lowering) { .no_bits => continue, .byval => { const arg = args[it.zig_index - 1]; const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const llvm_param_ty = try o.lowerLlvmType(param_ty); + const llvm_param_ty = (try o.lowerType(param_ty)).toLlvm(&o.builder); if (isByRef(param_ty, mod)) { const alignment = param_ty.abiAlignment(mod); const load_inst = self.builder.buildLoad(llvm_param_ty, llvm_arg, ""); @@ -5103,7 +4983,7 @@ pub const FuncGen = struct { const llvm_arg = try self.resolveInst(arg); const alignment = param_ty.abiAlignment(mod); - const param_llvm_ty = try o.lowerLlvmType(param_ty); + const param_llvm_ty = (try o.lowerType(param_ty)).toLlvm(&o.builder); const arg_ptr = self.buildAlloca(param_llvm_ty, alignment); if (isByRef(param_ty, mod)) { const load_inst = self.builder.buildLoad(param_llvm_ty, llvm_arg, ""); @@ -5157,7 +5037,7 @@ pub const FuncGen = struct { .multiple_llvm_types => { const arg = args[it.zig_index - 1]; const param_ty = self.typeOf(arg); - const llvm_types = it.llvm_types_buffer[0..it.llvm_types_len]; + const llvm_types = it.llvm_types_buffer[0..it.types_len]; const llvm_arg = try self.resolveInst(arg); const is_by_ref = isByRef(param_ty, mod); const arg_ptr = if (is_by_ref) llvm_arg else p: { @@ -5168,7 +5048,7 @@ pub const FuncGen = struct { }; const llvm_ty = self.context.structType(llvm_types.ptr, @as(c_uint, @intCast(llvm_types.len)), .False); - try llvm_args.ensureUnusedCapacity(it.llvm_types_len); + try llvm_args.ensureUnusedCapacity(it.types_len); for (llvm_types, 0..) |field_ty, i_usize| { const i = @as(c_uint, @intCast(i_usize)); const field_ptr = self.builder.buildStructGEP(llvm_ty, arg_ptr, i, ""); @@ -5194,7 +5074,7 @@ pub const FuncGen = struct { llvm_arg = store_inst; } - const float_ty = try o.lowerLlvmType(aarch64_c_abi.getFloatArrayType(arg_ty, mod).?); + const float_ty = (try o.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty, mod).?)).toLlvm(&o.builder); const array_llvm_ty = float_ty.arrayType(count); const alignment = arg_ty.abiAlignment(mod); @@ -5223,7 +5103,7 @@ pub const FuncGen = struct { }; const call = self.builder.buildCall( - try o.lowerLlvmType(zig_fn_ty), + (try o.lowerType(zig_fn_ty)).toLlvm(&o.builder), llvm_fn, llvm_args.items.ptr, @as(c_uint, @intCast(llvm_args.items.len)), @@ -5237,7 +5117,7 @@ pub const FuncGen = struct { it = iterateParamTypes(o, fn_info); it.llvm_index += @intFromBool(sret); it.llvm_index += @intFromBool(err_return_tracing); - while (it.next()) |lowering| switch (lowering) { + while (try it.next()) |lowering| switch (lowering) { .byval => { const param_index = it.zig_index - 1; const param_ty = fn_info.param_types.get(ip)[param_index].toType(); @@ -5248,7 +5128,7 @@ pub const FuncGen = struct { .byref => { const param_index = it.zig_index - 1; const param_ty = fn_info.param_types.get(ip)[param_index].toType(); - const param_llvm_ty = try o.lowerLlvmType(param_ty); + const param_llvm_ty = try o.lowerType(param_ty); const alignment = param_ty.abiAlignment(mod); o.addByRefParamAttrs(call, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); }, @@ -5297,7 +5177,7 @@ pub const FuncGen = struct { return null; } - const llvm_ret_ty = try o.lowerLlvmType(return_type); + const llvm_ret_ty = (try o.lowerType(return_type)).toLlvm(&o.builder); if (ret_ptr) |rp| { call.setCallSret(llvm_ret_ty); @@ -5311,7 +5191,7 @@ pub const FuncGen = struct { } } - const abi_ret_ty = try lowerFnRetTy(o, fn_info); + const abi_ret_ty = (try lowerFnRetTy(o, fn_info)).toLlvm(&o.builder); if (abi_ret_ty != llvm_ret_ty) { // In this case the function return type is honoring the calling convention by having @@ -5374,7 +5254,7 @@ pub const FuncGen = struct { const fn_info = mod.typeToFunc(panic_decl.ty).?; const panic_global = try o.resolveLlvmFunction(panic_func.owner_decl); _ = fg.builder.buildCall( - try o.lowerLlvmType(panic_decl.ty), + (try o.lowerType(panic_decl.ty)).toLlvm(&o.builder), panic_global.toLlvm(&o.builder), &args, args.len, @@ -5403,7 +5283,7 @@ pub const FuncGen = struct { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. - const err_int = try o.lowerLlvmType(Type.anyerror); + const err_int = (try o.lowerType(Type.anyerror)).toLlvm(&o.builder); _ = self.builder.buildRet(err_int.constInt(0, .False)); } else { _ = self.builder.buildRetVoid(); @@ -5411,7 +5291,7 @@ pub const FuncGen = struct { return null; } - const abi_ret_ty = try lowerFnRetTy(o, fn_info); + const abi_ret_ty = (try lowerFnRetTy(o, fn_info)).toLlvm(&o.builder); const operand = try self.resolveInst(un_op); const alignment = ret_ty.abiAlignment(mod); @@ -5451,7 +5331,7 @@ pub const FuncGen = struct { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. - const err_int = try o.lowerLlvmType(Type.anyerror); + const err_int = (try o.lowerType(Type.anyerror)).toLlvm(&o.builder); _ = self.builder.buildRet(err_int.constInt(0, .False)); } else { _ = self.builder.buildRetVoid(); @@ -5463,7 +5343,7 @@ pub const FuncGen = struct { return null; } const ptr = try self.resolveInst(un_op); - const abi_ret_ty = try lowerFnRetTy(o, fn_info); + const abi_ret_ty = (try lowerFnRetTy(o, fn_info)).toLlvm(&o.builder); const loaded = self.builder.buildLoad(abi_ret_ty, ptr, ""); loaded.setAlignment(ret_ty.abiAlignment(mod)); _ = self.builder.buildRet(loaded); @@ -5475,7 +5355,7 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const list = try self.resolveInst(ty_op.operand); const arg_ty = self.air.getRefType(ty_op.ty); - const llvm_arg_ty = try o.lowerLlvmType(arg_ty); + const llvm_arg_ty = (try o.lowerType(arg_ty)).toLlvm(&o.builder); return self.builder.buildVAArg(list, llvm_arg_ty, ""); } @@ -5485,7 +5365,7 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_list = try self.resolveInst(ty_op.operand); const va_list_ty = self.air.getRefType(ty_op.ty); - const llvm_va_list_ty = try o.lowerLlvmType(va_list_ty); + const llvm_va_list_ty = (try o.lowerType(va_list_ty)).toLlvm(&o.builder); const mod = o.module; const result_alignment = va_list_ty.abiAlignment(mod); @@ -5533,7 +5413,7 @@ pub const FuncGen = struct { const o = self.dg.object; const mod = o.module; const va_list_ty = self.typeOfIndex(inst); - const llvm_va_list_ty = try o.lowerLlvmType(va_list_ty); + const llvm_va_list_ty = (try o.lowerType(va_list_ty)).toLlvm(&o.builder); const result_alignment = va_list_ty.abiAlignment(mod); const list = self.buildAlloca(llvm_va_list_ty, result_alignment); @@ -5612,7 +5492,7 @@ pub const FuncGen = struct { // We need to emit instructions to check for equality/inequality // of optionals that are not pointers. const is_by_ref = isByRef(scalar_ty, mod); - const opt_llvm_ty = try o.lowerLlvmType(scalar_ty); + const opt_llvm_ty = (try o.lowerType(scalar_ty)).toLlvm(&o.builder); const lhs_non_null = self.optIsNonNull(opt_llvm_ty, lhs, is_by_ref); const rhs_non_null = self.optIsNonNull(opt_llvm_ty, rhs, is_by_ref); const llvm_i2 = self.context.intType(2); @@ -5722,7 +5602,7 @@ pub const FuncGen = struct { const is_body = inst_ty.zigTypeTag(mod) == .Fn; if (!is_body and !inst_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; - const raw_llvm_ty = try o.lowerLlvmType(inst_ty); + const raw_llvm_ty = (try o.lowerType(inst_ty)).toLlvm(&o.builder); const llvm_ty = ty: { // If the zig tag type is a function, this represents an actual function body; not @@ -5827,11 +5707,11 @@ pub const FuncGen = struct { const mod = o.module; const payload_ty = err_union_ty.errorUnionPayload(mod); const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod); - const err_union_llvm_ty = try o.lowerLlvmType(err_union_ty); + const err_union_llvm_ty = (try o.lowerType(err_union_ty)).toLlvm(&o.builder); if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { const is_err = err: { - const err_set_ty = try o.lowerLlvmType(Type.anyerror); + const err_set_ty = (try o.lowerType(Type.anyerror)).toLlvm(&o.builder); const zero = err_set_ty.constNull(); if (!payload_has_bits) { // TODO add alignment to this load @@ -5966,9 +5846,9 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.typeOf(ty_op.operand); const array_ty = operand_ty.childType(mod); - const llvm_usize = try o.lowerLlvmType(Type.usize); + const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); const len = llvm_usize.constInt(array_ty.arrayLen(mod), .False); - const slice_llvm_ty = try o.lowerLlvmType(self.typeOfIndex(inst)); + const slice_llvm_ty = (try o.lowerType(self.typeOfIndex(inst))).toLlvm(&o.builder); const operand = try self.resolveInst(ty_op.operand); if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) { const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), operand, 0, ""); @@ -5977,7 +5857,7 @@ pub const FuncGen = struct { const indices: [2]*llvm.Value = .{ llvm_usize.constNull(), llvm_usize.constNull(), }; - const array_llvm_ty = try o.lowerLlvmType(array_ty); + const array_llvm_ty = (try o.lowerType(array_ty)).toLlvm(&o.builder); const ptr = self.builder.buildInBoundsGEP(array_llvm_ty, operand, &indices, indices.len, ""); const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), ptr, 0, ""); return self.builder.buildInsertValue(partial, len, 1, ""); @@ -5994,7 +5874,7 @@ pub const FuncGen = struct { const dest_ty = self.typeOfIndex(inst); const dest_scalar_ty = dest_ty.scalarType(mod); - const dest_llvm_ty = try o.lowerLlvmType(dest_ty); + const dest_llvm_ty = (try o.lowerType(dest_ty)).toLlvm(&o.builder); const target = mod.getTarget(); if (intrinsicsAllowed(dest_scalar_ty, target)) { @@ -6055,7 +5935,7 @@ pub const FuncGen = struct { const dest_ty = self.typeOfIndex(inst); const dest_scalar_ty = dest_ty.scalarType(mod); - const dest_llvm_ty = try o.lowerLlvmType(dest_ty); + const dest_llvm_ty = (try o.lowerType(dest_ty)).toLlvm(&o.builder); if (intrinsicsAllowed(operand_scalar_ty, target)) { // TODO set fast math flag @@ -6087,7 +5967,7 @@ pub const FuncGen = struct { compiler_rt_dest_abbrev, }) catch unreachable; - const operand_llvm_ty = try o.lowerLlvmType(operand_ty); + const operand_llvm_ty = (try o.lowerType(operand_ty)).toLlvm(&o.builder); const param_types = [1]*llvm.Type{operand_llvm_ty}; const libc_fn = try self.getLibcFunction(fn_name, ¶m_types, libc_ret_ty); const params = [1]*llvm.Value{operand}; @@ -6145,7 +6025,7 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const slice_ptr = try self.resolveInst(ty_op.operand); const slice_ptr_ty = self.typeOf(ty_op.operand); - const slice_llvm_ty = try o.lowerPtrElemTy(slice_ptr_ty.childType(mod)); + const slice_llvm_ty = (try o.lowerPtrElemTy(slice_ptr_ty.childType(mod))).toLlvm(&o.builder); return self.builder.buildStructGEP(slice_llvm_ty, slice_ptr, index, ""); } @@ -6159,7 +6039,7 @@ pub const FuncGen = struct { const slice = try self.resolveInst(bin_op.lhs); const index = try self.resolveInst(bin_op.rhs); const elem_ty = slice_ty.childType(mod); - const llvm_elem_ty = try o.lowerPtrElemTy(elem_ty); + const llvm_elem_ty = (try o.lowerPtrElemTy(elem_ty)).toLlvm(&o.builder); const base_ptr = self.builder.buildExtractValue(slice, 0, ""); const indices: [1]*llvm.Value = .{index}; const ptr = self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); @@ -6182,7 +6062,7 @@ pub const FuncGen = struct { const slice = try self.resolveInst(bin_op.lhs); const index = try self.resolveInst(bin_op.rhs); - const llvm_elem_ty = try o.lowerPtrElemTy(slice_ty.childType(mod)); + const llvm_elem_ty = (try o.lowerPtrElemTy(slice_ty.childType(mod))).toLlvm(&o.builder); const base_ptr = self.builder.buildExtractValue(slice, 0, ""); const indices: [1]*llvm.Value = .{index}; return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); @@ -6197,7 +6077,7 @@ pub const FuncGen = struct { const array_ty = self.typeOf(bin_op.lhs); const array_llvm_val = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const array_llvm_ty = try o.lowerLlvmType(array_ty); + const array_llvm_ty = (try o.lowerType(array_ty)).toLlvm(&o.builder); const elem_ty = array_ty.childType(mod); if (isByRef(array_ty, mod)) { const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs }; @@ -6208,7 +6088,7 @@ pub const FuncGen = struct { return self.loadByRef(elem_ptr, elem_ty, elem_ty.abiAlignment(mod), false); } else { - const elem_llvm_ty = try o.lowerLlvmType(elem_ty); + const elem_llvm_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); if (Air.refToIndex(bin_op.lhs)) |lhs_index| { if (self.air.instructions.items(.tag)[lhs_index] == .load) { const load_data = self.air.instructions.items(.data)[lhs_index]; @@ -6242,7 +6122,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(mod); - const llvm_elem_ty = try o.lowerPtrElemTy(elem_ty); + const llvm_elem_ty = (try o.lowerPtrElemTy(elem_ty)).toLlvm(&o.builder); const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); // TODO: when we go fully opaque pointers in LLVM 16 we can remove this branch @@ -6279,7 +6159,7 @@ pub const FuncGen = struct { const elem_ptr = self.air.getRefType(ty_pl.ty); if (elem_ptr.ptrInfo(mod).flags.vector_index != .none) return base_ptr; - const llvm_elem_ty = try o.lowerPtrElemTy(elem_ty); + const llvm_elem_ty = (try o.lowerPtrElemTy(elem_ty)).toLlvm(&o.builder); if (ptr_ty.isSinglePointer(mod)) { // If this is a single-item pointer to an array, we need another index in the GEP. const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs }; @@ -6333,7 +6213,7 @@ pub const FuncGen = struct { const containing_int = struct_llvm_val; const shift_amt = containing_int.typeOf().constInt(bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); - const elem_llvm_ty = try o.lowerLlvmType(field_ty); + const elem_llvm_ty = (try o.lowerType(field_ty)).toLlvm(&o.builder); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); const same_size_int = self.context.intType(elem_bits); @@ -6355,7 +6235,7 @@ pub const FuncGen = struct { .Union => { assert(struct_ty.containerLayout(mod) == .Packed); const containing_int = struct_llvm_val; - const elem_llvm_ty = try o.lowerLlvmType(field_ty); + const elem_llvm_ty = (try o.lowerType(field_ty)).toLlvm(&o.builder); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); const same_size_int = self.context.intType(elem_bits); @@ -6377,7 +6257,7 @@ pub const FuncGen = struct { .Struct => { assert(struct_ty.containerLayout(mod) != .Packed); const llvm_field = llvmField(struct_ty, field_index, mod).?; - const struct_llvm_ty = try o.lowerLlvmType(struct_ty); + const struct_llvm_ty = (try o.lowerType(struct_ty)).toLlvm(&o.builder); const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field.index, ""); const field_ptr_ty = try mod.ptrType(.{ .child = llvm_field.ty.toIntern(), @@ -6396,11 +6276,11 @@ pub const FuncGen = struct { } }, .Union => { - const union_llvm_ty = try o.lowerLlvmType(struct_ty); + const union_llvm_ty = (try o.lowerType(struct_ty)).toLlvm(&o.builder); const layout = struct_ty.unionGetLayout(mod); const payload_index = @intFromBool(layout.tag_align >= layout.payload_align); const field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_llvm_val, payload_index, ""); - const llvm_field_ty = try o.lowerLlvmType(field_ty); + const llvm_field_ty = (try o.lowerType(field_ty)).toLlvm(&o.builder); if (isByRef(field_ty, mod)) { if (canElideLoad(self, body_tail)) return field_ptr; @@ -6426,7 +6306,7 @@ pub const FuncGen = struct { const parent_ty = self.air.getRefType(ty_pl.ty).childType(mod); const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); - const res_ty = try o.lowerLlvmType(self.air.getRefType(ty_pl.ty)); + const res_ty = (try o.lowerType(self.air.getRefType(ty_pl.ty))).toLlvm(&o.builder); if (field_offset == 0) { return field_ptr; } @@ -6691,7 +6571,7 @@ pub const FuncGen = struct { const output_inst = try self.resolveInst(output); const output_ty = self.typeOf(output); assert(output_ty.zigTypeTag(mod) == .Pointer); - const elem_llvm_ty = try o.lowerPtrElemTy(output_ty.childType(mod)); + const elem_llvm_ty = (try o.lowerPtrElemTy(output_ty.childType(mod))).toLlvm(&o.builder); if (llvm_ret_indirect[i]) { // Pass the result by reference as an indirect output (e.g. "=*m") @@ -6708,7 +6588,7 @@ pub const FuncGen = struct { } } else { const ret_ty = self.typeOfIndex(inst); - llvm_ret_types[llvm_ret_i] = try o.lowerLlvmType(ret_ty); + llvm_ret_types[llvm_ret_i] = (try o.lowerType(ret_ty)).toLlvm(&o.builder); llvm_ret_i += 1; } @@ -6745,13 +6625,13 @@ pub const FuncGen = struct { const arg_ty = self.typeOf(input); var llvm_elem_ty: ?*llvm.Type = null; if (isByRef(arg_ty, mod)) { - llvm_elem_ty = try o.lowerPtrElemTy(arg_ty); + llvm_elem_ty = (try o.lowerPtrElemTy(arg_ty)).toLlvm(&o.builder); if (constraintAllowsMemory(constraint)) { llvm_param_values[llvm_param_i] = arg_llvm_value; llvm_param_types[llvm_param_i] = arg_llvm_value.typeOf(); } else { const alignment = arg_ty.abiAlignment(mod); - const arg_llvm_ty = try o.lowerLlvmType(arg_ty); + const arg_llvm_ty = (try o.lowerType(arg_ty)).toLlvm(&o.builder); const load_inst = self.builder.buildLoad(arg_llvm_ty, arg_llvm_value, ""); load_inst.setAlignment(alignment); llvm_param_values[llvm_param_i] = load_inst; @@ -6792,7 +6672,7 @@ pub const FuncGen = struct { // an elementtype() attribute. if (constraint[0] == '*') { llvm_param_attrs[llvm_param_i] = llvm_elem_ty orelse - try o.lowerPtrElemTy(arg_ty.childType(mod)); + (try o.lowerPtrElemTy(arg_ty.childType(mod))).toLlvm(&o.builder); } else { llvm_param_attrs[llvm_param_i] = null; } @@ -6989,7 +6869,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(un_op); const operand_ty = self.typeOf(un_op); const optional_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; - const optional_llvm_ty = try o.lowerLlvmType(optional_ty); + const optional_llvm_ty = (try o.lowerType(optional_ty)).toLlvm(&o.builder); const payload_ty = optional_ty.optionalChild(mod); if (optional_ty.optionalReprIsPayload(mod)) { const loaded = if (operand_is_ptr) @@ -6998,7 +6878,7 @@ pub const FuncGen = struct { operand; if (payload_ty.isSlice(mod)) { const slice_ptr = self.builder.buildExtractValue(loaded, 0, ""); - const ptr_ty = try o.lowerLlvmType(payload_ty.slicePtrFieldType(mod)); + const ptr_ty = (try o.lowerType(payload_ty.slicePtrFieldType(mod))).toLlvm(&o.builder); return self.builder.buildICmp(pred, slice_ptr, ptr_ty.constNull(), ""); } return self.builder.buildICmp(pred, loaded, optional_llvm_ty.constNull(), ""); @@ -7037,7 +6917,7 @@ pub const FuncGen = struct { const operand_ty = self.typeOf(un_op); const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; const payload_ty = err_union_ty.errorUnionPayload(mod); - const err_set_ty = try o.lowerLlvmType(Type.anyerror); + const err_set_ty = (try o.lowerType(Type.anyerror)).toLlvm(&o.builder); const zero = err_set_ty.constNull(); if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { @@ -7051,7 +6931,7 @@ pub const FuncGen = struct { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const loaded = if (operand_is_ptr) - self.builder.buildLoad(try o.lowerLlvmType(err_union_ty), operand, "") + self.builder.buildLoad((try o.lowerType(err_union_ty)).toLlvm(&o.builder), operand, "") else operand; return self.builder.buildICmp(op, loaded, zero, ""); @@ -7060,7 +6940,7 @@ pub const FuncGen = struct { const err_field_index = errUnionErrorOffset(payload_ty, mod); if (operand_is_ptr or isByRef(err_union_ty, mod)) { - const err_union_llvm_ty = try o.lowerLlvmType(err_union_ty); + const err_union_llvm_ty = (try o.lowerType(err_union_ty)).toLlvm(&o.builder); const err_field_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, err_field_index, ""); const loaded = self.builder.buildLoad(err_set_ty, err_field_ptr, ""); return self.builder.buildICmp(op, loaded, zero, ""); @@ -7086,7 +6966,7 @@ pub const FuncGen = struct { // The payload and the optional are the same value. return operand; } - const optional_llvm_ty = try o.lowerLlvmType(optional_ty); + const optional_llvm_ty = (try o.lowerType(optional_ty)).toLlvm(&o.builder); return self.builder.buildStructGEP(optional_llvm_ty, operand, 0, ""); } @@ -7112,7 +6992,7 @@ pub const FuncGen = struct { } // First set the non-null bit. - const optional_llvm_ty = try o.lowerLlvmType(optional_ty); + const optional_llvm_ty = (try o.lowerType(optional_ty)).toLlvm(&o.builder); const non_null_ptr = self.builder.buildStructGEP(optional_llvm_ty, operand, 1, ""); // TODO set alignment on this store _ = self.builder.buildStore(non_null_bit, non_null_ptr); @@ -7139,7 +7019,7 @@ pub const FuncGen = struct { return operand; } - const opt_llvm_ty = try o.lowerLlvmType(optional_ty); + const opt_llvm_ty = (try o.lowerType(optional_ty)).toLlvm(&o.builder); const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false; return self.optPayloadHandle(opt_llvm_ty, operand, optional_ty, can_elide_load); } @@ -7163,7 +7043,7 @@ pub const FuncGen = struct { return if (operand_is_ptr) operand else null; } const offset = errUnionPayloadOffset(payload_ty, mod); - const err_union_llvm_ty = try o.lowerLlvmType(err_union_ty); + const err_union_llvm_ty = (try o.lowerType(err_union_ty)).toLlvm(&o.builder); if (operand_is_ptr) { return self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, ""); } else if (isByRef(err_union_ty, mod)) { @@ -7193,7 +7073,7 @@ pub const FuncGen = struct { const operand_ty = self.typeOf(ty_op.operand); const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { - const err_llvm_ty = try o.lowerLlvmType(Type.anyerror); + const err_llvm_ty = (try o.lowerType(Type.anyerror)).toLlvm(&o.builder); if (operand_is_ptr) { return operand; } else { @@ -7201,7 +7081,7 @@ pub const FuncGen = struct { } } - const err_set_llvm_ty = try o.lowerLlvmType(Type.anyerror); + const err_set_llvm_ty = (try o.lowerType(Type.anyerror)).toLlvm(&o.builder); const payload_ty = err_union_ty.errorUnionPayload(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -7212,7 +7092,7 @@ pub const FuncGen = struct { const offset = errUnionErrorOffset(payload_ty, mod); if (operand_is_ptr or isByRef(err_union_ty, mod)) { - const err_union_llvm_ty = try o.lowerLlvmType(err_union_ty); + const err_union_llvm_ty = (try o.lowerType(err_union_ty)).toLlvm(&o.builder); const err_field_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, ""); return self.builder.buildLoad(err_set_llvm_ty, err_field_ptr, ""); } @@ -7233,7 +7113,7 @@ pub const FuncGen = struct { _ = self.builder.buildStore(non_error_val, operand); return operand; } - const err_union_llvm_ty = try o.lowerLlvmType(err_union_ty); + const err_union_llvm_ty = (try o.lowerType(err_union_ty)).toLlvm(&o.builder); { const error_offset = errUnionErrorOffset(payload_ty, mod); // First set the non-error value. @@ -7269,7 +7149,7 @@ pub const FuncGen = struct { const mod = o.module; const llvm_field = llvmField(struct_ty, field_index, mod).?; - const struct_llvm_ty = try o.lowerLlvmType(struct_ty); + const struct_llvm_ty = (try o.lowerType(struct_ty)).toLlvm(&o.builder); const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field.index, ""); const field_ptr_ty = try mod.ptrType(.{ .child = llvm_field.ty.toIntern(), @@ -7293,7 +7173,7 @@ pub const FuncGen = struct { if (optional_ty.optionalReprIsPayload(mod)) { return operand; } - const llvm_optional_ty = try o.lowerLlvmType(optional_ty); + const llvm_optional_ty = (try o.lowerType(optional_ty)).toLlvm(&o.builder); if (isByRef(optional_ty, mod)) { const optional_ptr = self.buildAlloca(llvm_optional_ty, optional_ty.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 0, ""); @@ -7317,8 +7197,8 @@ pub const FuncGen = struct { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return operand; } - const ok_err_code = (try o.lowerLlvmType(Type.anyerror)).constNull(); - const err_un_llvm_ty = try o.lowerLlvmType(err_un_ty); + const ok_err_code = (try o.lowerType(Type.anyerror)).toLlvm(&o.builder).constNull(); + const err_un_llvm_ty = (try o.lowerType(err_un_ty)).toLlvm(&o.builder); const payload_offset = errUnionPayloadOffset(payload_ty, mod); const error_offset = errUnionErrorOffset(payload_ty, mod); @@ -7347,7 +7227,7 @@ pub const FuncGen = struct { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return operand; } - const err_un_llvm_ty = try o.lowerLlvmType(err_un_ty); + const err_un_llvm_ty = (try o.lowerType(err_un_ty)).toLlvm(&o.builder); const payload_offset = errUnionPayloadOffset(payload_ty, mod); const error_offset = errUnionErrorOffset(payload_ty, mod); @@ -7403,7 +7283,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(extra.rhs); const loaded_vector = blk: { - const elem_llvm_ty = try o.lowerLlvmType(vector_ptr_ty.childType(mod)); + const elem_llvm_ty = (try o.lowerType(vector_ptr_ty.childType(mod))).toLlvm(&o.builder); const load_inst = self.builder.buildLoad(elem_llvm_ty, vector_ptr, ""); load_inst.setAlignment(vector_ptr_ty.ptrAlignment(mod)); load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr(mod))); @@ -7447,7 +7327,7 @@ pub const FuncGen = struct { const ptr = try self.resolveInst(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); const inst_ty = self.typeOfIndex(inst); - const llvm_slice_ty = try o.lowerLlvmType(inst_ty); + const llvm_slice_ty = (try o.lowerType(inst_ty)).toLlvm(&o.builder); // In case of slicing a global, the result type looks something like `{ i8*, i64 }` // but `ptr` is pointing to the global directly. @@ -7491,7 +7371,7 @@ pub const FuncGen = struct { true => signed_intrinsic, false => unsigned_intrinsic, }; - const llvm_inst_ty = try o.lowerLlvmType(inst_ty); + const llvm_inst_ty = (try o.lowerType(inst_ty)).toLlvm(&o.builder); const llvm_fn = fg.getIntrinsic(intrinsic_name, &.{llvm_inst_ty}); const result_struct = fg.builder.buildCall( llvm_fn.globalGetValueType(), @@ -7664,11 +7544,11 @@ pub const FuncGen = struct { return self.buildFloatOp(.floor, inst_ty, 1, .{result}); } if (scalar_ty.isSignedInt(mod)) { - const inst_llvm_ty = try o.lowerLlvmType(inst_ty); + const inst_llvm_ty = (try o.lowerType(inst_ty)).toLlvm(&o.builder); const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { const vec_len = inst_ty.vectorLen(mod); - const scalar_llvm_ty = try o.lowerLlvmType(scalar_ty); + const scalar_llvm_ty = (try o.lowerType(scalar_ty)).toLlvm(&o.builder); const shifts = try self.gpa.alloc(*llvm.Value, vec_len); defer self.gpa.free(shifts); @@ -7730,7 +7610,7 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.typeOfIndex(inst); - const inst_llvm_ty = try o.lowerLlvmType(inst_ty); + const inst_llvm_ty = (try o.lowerType(inst_ty)).toLlvm(&o.builder); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { @@ -7745,7 +7625,7 @@ pub const FuncGen = struct { const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { const vec_len = inst_ty.vectorLen(mod); - const scalar_llvm_ty = try o.lowerLlvmType(scalar_ty); + const scalar_llvm_ty = (try o.lowerType(scalar_ty)).toLlvm(&o.builder); const shifts = try self.gpa.alloc(*llvm.Value, vec_len); defer self.gpa.free(shifts); @@ -7774,7 +7654,7 @@ pub const FuncGen = struct { const ptr = try self.resolveInst(bin_op.lhs); const offset = try self.resolveInst(bin_op.rhs); const ptr_ty = self.typeOf(bin_op.lhs); - const llvm_elem_ty = try o.lowerPtrElemTy(ptr_ty.childType(mod)); + const llvm_elem_ty = (try o.lowerPtrElemTy(ptr_ty.childType(mod))).toLlvm(&o.builder); switch (ptr_ty.ptrSize(mod)) { .One => { // It's a pointer to an array, so according to LLVM we need an extra GEP index. @@ -7802,7 +7682,7 @@ pub const FuncGen = struct { const offset = try self.resolveInst(bin_op.rhs); const negative_offset = self.builder.buildNeg(offset, ""); const ptr_ty = self.typeOf(bin_op.lhs); - const llvm_elem_ty = try o.lowerPtrElemTy(ptr_ty.childType(mod)); + const llvm_elem_ty = (try o.lowerPtrElemTy(ptr_ty.childType(mod))).toLlvm(&o.builder); switch (ptr_ty.ptrSize(mod)) { .One => { // It's a pointer to an array, so according to LLVM we need an extra GEP index. @@ -7843,8 +7723,8 @@ pub const FuncGen = struct { const intrinsic_name = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic; - const llvm_lhs_ty = try o.lowerLlvmType(lhs_ty); - const llvm_dest_ty = try o.lowerLlvmType(dest_ty); + const llvm_lhs_ty = (try o.lowerType(lhs_ty)).toLlvm(&o.builder); + const llvm_dest_ty = (try o.lowerType(dest_ty)).toLlvm(&o.builder); const llvm_fn = self.getIntrinsic(intrinsic_name, &.{llvm_lhs_ty}); const result_struct = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &[_]*llvm.Value{ lhs, rhs }, 2, .Fast, .Auto, ""); @@ -7920,7 +7800,7 @@ pub const FuncGen = struct { const f = o.llvm_module.addFunction(name.toSlice(&o.builder).?, fn_type); var global = Builder.Global{ - .type = .void, + .type = try o.builder.fnType(.void, &.{}, .normal), .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) }, }; var function = Builder.Function{ @@ -7947,7 +7827,7 @@ pub const FuncGen = struct { const mod = o.module; const target = o.module.getTarget(); const scalar_ty = ty.scalarType(mod); - const scalar_llvm_ty = try o.lowerLlvmType(scalar_ty); + const scalar_llvm_ty = (try o.lowerType(scalar_ty)).toLlvm(&o.builder); if (intrinsicsAllowed(scalar_ty, target)) { const llvm_predicate: llvm.RealPredicate = switch (pred) { @@ -8050,8 +7930,8 @@ pub const FuncGen = struct { const mod = o.module; const target = mod.getTarget(); const scalar_ty = ty.scalarType(mod); - const llvm_ty = try o.lowerLlvmType(ty); - const scalar_llvm_ty = try o.lowerLlvmType(scalar_ty); + const llvm_ty = (try o.lowerType(ty)).toLlvm(&o.builder); + const scalar_llvm_ty = (try o.lowerType(scalar_ty)).toLlvm(&o.builder); const intrinsics_allowed = op != .tan and intrinsicsAllowed(scalar_ty, target); var fn_name_buf: [64]u8 = undefined; @@ -8161,10 +8041,10 @@ pub const FuncGen = struct { const rhs_scalar_ty = rhs_ty.scalarType(mod); const dest_ty = self.typeOfIndex(inst); - const llvm_dest_ty = try o.lowerLlvmType(dest_ty); + const llvm_dest_ty = (try o.lowerType(dest_ty)).toLlvm(&o.builder); const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) - self.builder.buildZExt(rhs, try o.lowerLlvmType(lhs_ty), "") + self.builder.buildZExt(rhs, (try o.lowerType(lhs_ty)).toLlvm(&o.builder), "") else rhs; @@ -8235,7 +8115,7 @@ pub const FuncGen = struct { const rhs_scalar_ty = rhs_ty.scalarType(mod); const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) - self.builder.buildZExt(rhs, try o.lowerLlvmType(lhs_ty), "") + self.builder.buildZExt(rhs, (try o.lowerType(lhs_ty)).toLlvm(&o.builder), "") else rhs; if (lhs_scalar_ty.isSignedInt(mod)) return self.builder.buildNSWShl(lhs, casted_rhs, ""); @@ -8256,7 +8136,7 @@ pub const FuncGen = struct { const rhs_scalar_ty = rhs_type.scalarType(mod); const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) - self.builder.buildZExt(rhs, try o.lowerLlvmType(lhs_type), "") + self.builder.buildZExt(rhs, (try o.lowerType(lhs_type)).toLlvm(&o.builder), "") else rhs; return self.builder.buildShl(lhs, casted_rhs, ""); @@ -8291,7 +8171,7 @@ pub const FuncGen = struct { // poison value." // However Zig semantics says that saturating shift left can never produce // undefined; instead it saturates. - const lhs_scalar_llvm_ty = try o.lowerLlvmType(lhs_scalar_ty); + const lhs_scalar_llvm_ty = (try o.lowerType(lhs_scalar_ty)).toLlvm(&o.builder); const bits = lhs_scalar_llvm_ty.constInt(lhs_bits, .False); const lhs_max = lhs_scalar_llvm_ty.constAllOnes(); if (rhs_ty.zigTypeTag(mod) == .Vector) { @@ -8320,7 +8200,7 @@ pub const FuncGen = struct { const rhs_scalar_ty = rhs_ty.scalarType(mod); const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) - self.builder.buildZExt(rhs, try o.lowerLlvmType(lhs_ty), "") + self.builder.buildZExt(rhs, (try o.lowerType(lhs_ty)).toLlvm(&o.builder), "") else rhs; const is_signed_int = lhs_scalar_ty.isSignedInt(mod); @@ -8346,7 +8226,7 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const dest_ty = self.typeOfIndex(inst); const dest_info = dest_ty.intInfo(mod); - const dest_llvm_ty = try o.lowerLlvmType(dest_ty); + const dest_llvm_ty = (try o.lowerType(dest_ty)).toLlvm(&o.builder); const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); const operand_info = operand_ty.intInfo(mod); @@ -8367,7 +8247,7 @@ pub const FuncGen = struct { const o = self.dg.object; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const dest_llvm_ty = try o.lowerLlvmType(self.typeOfIndex(inst)); + const dest_llvm_ty = (try o.lowerType(self.typeOfIndex(inst))).toLlvm(&o.builder); return self.builder.buildTrunc(operand, dest_llvm_ty, ""); } @@ -8383,11 +8263,11 @@ pub const FuncGen = struct { const src_bits = operand_ty.floatBits(target); if (intrinsicsAllowed(dest_ty, target) and intrinsicsAllowed(operand_ty, target)) { - const dest_llvm_ty = try o.lowerLlvmType(dest_ty); + const dest_llvm_ty = (try o.lowerType(dest_ty)).toLlvm(&o.builder); return self.builder.buildFPTrunc(operand, dest_llvm_ty, ""); } else { - const operand_llvm_ty = try o.lowerLlvmType(operand_ty); - const dest_llvm_ty = try o.lowerLlvmType(dest_ty); + const operand_llvm_ty = (try o.lowerType(operand_ty)).toLlvm(&o.builder); + const dest_llvm_ty = (try o.lowerType(dest_ty)).toLlvm(&o.builder); var fn_name_buf: [64]u8 = undefined; const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__trunc{s}f{s}f2", .{ @@ -8414,11 +8294,11 @@ pub const FuncGen = struct { const src_bits = operand_ty.floatBits(target); if (intrinsicsAllowed(dest_ty, target) and intrinsicsAllowed(operand_ty, target)) { - const dest_llvm_ty = try o.lowerLlvmType(dest_ty); + const dest_llvm_ty = (try o.lowerType(dest_ty)).toLlvm(&o.builder); return self.builder.buildFPExt(operand, dest_llvm_ty, ""); } else { - const operand_llvm_ty = try o.lowerLlvmType(operand_ty); - const dest_llvm_ty = try o.lowerLlvmType(dest_ty); + const operand_llvm_ty = (try o.lowerType(operand_ty)).toLlvm(&o.builder); + const dest_llvm_ty = (try o.lowerType(dest_ty)).toLlvm(&o.builder); var fn_name_buf: [64]u8 = undefined; const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__extend{s}f{s}f2", .{ @@ -8439,7 +8319,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); const operand_ptr = self.sliceOrArrayPtr(operand, ptr_ty); - const dest_llvm_ty = try o.lowerLlvmType(self.typeOfIndex(inst)); + const dest_llvm_ty = (try o.lowerType(self.typeOfIndex(inst))).toLlvm(&o.builder); return self.builder.buildPtrToInt(operand_ptr, dest_llvm_ty, ""); } @@ -8456,7 +8336,7 @@ pub const FuncGen = struct { const mod = o.module; const operand_is_ref = isByRef(operand_ty, mod); const result_is_ref = isByRef(inst_ty, mod); - const llvm_dest_ty = try o.lowerLlvmType(inst_ty); + const llvm_dest_ty = (try o.lowerType(inst_ty)).toLlvm(&o.builder); if (operand_is_ref and result_is_ref) { // They are both pointers, so just return the same opaque pointer :) @@ -8486,7 +8366,7 @@ pub const FuncGen = struct { } else { // If the ABI size of the element type is not evenly divisible by size in bits; // a simple bitcast will not work, and we fall back to extractelement. - const llvm_usize = try o.lowerLlvmType(Type.usize); + const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); const llvm_u32 = self.context.intType(32); const zero = llvm_usize.constNull(); const vector_len = operand_ty.arrayLen(mod); @@ -8503,7 +8383,7 @@ pub const FuncGen = struct { return array_ptr; } else if (operand_ty.zigTypeTag(mod) == .Array and inst_ty.zigTypeTag(mod) == .Vector) { const elem_ty = operand_ty.childType(mod); - const llvm_vector_ty = try o.lowerLlvmType(inst_ty); + const llvm_vector_ty = (try o.lowerType(inst_ty)).toLlvm(&o.builder); if (!operand_is_ref) { return self.dg.todo("implement bitcast non-ref array to vector", .{}); } @@ -8518,9 +8398,9 @@ pub const FuncGen = struct { } else { // If the ABI size of the element type is not evenly divisible by size in bits; // a simple bitcast will not work, and we fall back to extractelement. - const array_llvm_ty = try o.lowerLlvmType(operand_ty); - const elem_llvm_ty = try o.lowerLlvmType(elem_ty); - const llvm_usize = try o.lowerLlvmType(Type.usize); + const array_llvm_ty = (try o.lowerType(operand_ty)).toLlvm(&o.builder); + const elem_llvm_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); + const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); const llvm_u32 = self.context.intType(32); const zero = llvm_usize.constNull(); const vector_len = operand_ty.arrayLen(mod); @@ -8629,7 +8509,7 @@ pub const FuncGen = struct { if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return o.lowerPtrToVoid(ptr_ty); - const pointee_llvm_ty = try o.lowerLlvmType(pointee_type); + const pointee_llvm_ty = (try o.lowerType(pointee_type)).toLlvm(&o.builder); const alignment = ptr_ty.ptrAlignment(mod); return self.buildAlloca(pointee_llvm_ty, alignment); } @@ -8641,7 +8521,7 @@ pub const FuncGen = struct { const ret_ty = ptr_ty.childType(mod); if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return o.lowerPtrToVoid(ptr_ty); if (self.ret_ptr) |ret_ptr| return ret_ptr; - const ret_llvm_ty = try o.lowerLlvmType(ret_ty); + const ret_llvm_ty = (try o.lowerType(ret_ty)).toLlvm(&o.builder); return self.buildAlloca(ret_llvm_ty, ptr_ty.ptrAlignment(mod)); } @@ -8673,7 +8553,7 @@ pub const FuncGen = struct { else u8_llvm_ty.getUndef(); const operand_size = operand_ty.abiSize(mod); - const usize_llvm_ty = try o.lowerLlvmType(Type.usize); + const usize_llvm_ty = (try o.lowerType(Type.usize)).toLlvm(&o.builder); const len = usize_llvm_ty.constInt(operand_size, .False); const dest_ptr_align = ptr_ty.ptrAlignment(mod); _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr(mod)); @@ -8746,7 +8626,7 @@ pub const FuncGen = struct { _ = inst; const o = self.dg.object; const mod = o.module; - const llvm_usize = try o.lowerLlvmType(Type.usize); + const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); const target = mod.getTarget(); if (!target_util.supportsReturnAddress(target)) { // https://github.com/ziglang/zig/issues/11946 @@ -8774,7 +8654,7 @@ pub const FuncGen = struct { const params = [_]*llvm.Value{llvm_i32.constNull()}; const ptr_val = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .Fast, .Auto, ""); - const llvm_usize = try o.lowerLlvmType(Type.usize); + const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); return self.builder.buildPtrToInt(ptr_val, llvm_usize, ""); } @@ -8795,15 +8675,16 @@ pub const FuncGen = struct { var expected_value = try self.resolveInst(extra.expected_value); var new_value = try self.resolveInst(extra.new_value); const operand_ty = self.typeOf(extra.ptr).childType(mod); - const opt_abi_ty = o.getAtomicAbiType(operand_ty, false); - if (opt_abi_ty) |abi_ty| { + const abi_ty = try o.getAtomicAbiType(operand_ty, false); + if (abi_ty != .none) { + const llvm_abi_ty = abi_ty.toLlvm(&o.builder); // operand needs widening and truncating if (operand_ty.isSignedInt(mod)) { - expected_value = self.builder.buildSExt(expected_value, abi_ty, ""); - new_value = self.builder.buildSExt(new_value, abi_ty, ""); + expected_value = self.builder.buildSExt(expected_value, llvm_abi_ty, ""); + new_value = self.builder.buildSExt(new_value, llvm_abi_ty, ""); } else { - expected_value = self.builder.buildZExt(expected_value, abi_ty, ""); - new_value = self.builder.buildZExt(new_value, abi_ty, ""); + expected_value = self.builder.buildZExt(expected_value, llvm_abi_ty, ""); + new_value = self.builder.buildZExt(new_value, llvm_abi_ty, ""); } } const result = self.builder.buildAtomicCmpXchg( @@ -8819,8 +8700,8 @@ pub const FuncGen = struct { const optional_ty = self.typeOfIndex(inst); var payload = self.builder.buildExtractValue(result, 0, ""); - if (opt_abi_ty != null) { - payload = self.builder.buildTrunc(payload, try o.lowerLlvmType(operand_ty), ""); + if (abi_ty != .none) { + payload = self.builder.buildTrunc(payload, (try o.lowerType(operand_ty)).toLlvm(&o.builder), ""); } const success_bit = self.builder.buildExtractValue(result, 1, ""); @@ -8848,15 +8729,16 @@ pub const FuncGen = struct { const op = toLlvmAtomicRmwBinOp(extra.op(), is_signed_int, is_float); const ordering = toLlvmAtomicOrdering(extra.ordering()); const single_threaded = llvm.Bool.fromBool(self.single_threaded); - const opt_abi_ty = o.getAtomicAbiType(operand_ty, op == .Xchg); - if (opt_abi_ty) |abi_ty| { + const abi_ty = try o.getAtomicAbiType(operand_ty, op == .Xchg); + if (abi_ty != .none) { + const llvm_abi_ty = abi_ty.toLlvm(&o.builder); // operand needs widening and truncating or bitcasting. const casted_operand = if (is_float) - self.builder.buildBitCast(operand, abi_ty, "") + self.builder.buildBitCast(operand, llvm_abi_ty, "") else if (is_signed_int) - self.builder.buildSExt(operand, abi_ty, "") + self.builder.buildSExt(operand, llvm_abi_ty, "") else - self.builder.buildZExt(operand, abi_ty, ""); + self.builder.buildZExt(operand, llvm_abi_ty, ""); const uncasted_result = self.builder.buildAtomicRmw( op, @@ -8865,7 +8747,7 @@ pub const FuncGen = struct { ordering, single_threaded, ); - const operand_llvm_ty = try o.lowerLlvmType(operand_ty); + const operand_llvm_ty = (try o.lowerType(operand_ty)).toLlvm(&o.builder); if (is_float) { return self.builder.buildBitCast(uncasted_result, operand_llvm_ty, ""); } else { @@ -8878,7 +8760,7 @@ pub const FuncGen = struct { } // It's a pointer but we need to treat it as an int. - const usize_llvm_ty = try o.lowerLlvmType(Type.usize); + const usize_llvm_ty = (try o.lowerType(Type.usize)).toLlvm(&o.builder); const casted_operand = self.builder.buildPtrToInt(operand, usize_llvm_ty, ""); const uncasted_result = self.builder.buildAtomicRmw( op, @@ -8887,7 +8769,7 @@ pub const FuncGen = struct { ordering, single_threaded, ); - const operand_llvm_ty = try o.lowerLlvmType(operand_ty); + const operand_llvm_ty = (try o.lowerType(operand_ty)).toLlvm(&o.builder); return self.builder.buildIntToPtr(uncasted_result, operand_llvm_ty, ""); } @@ -8902,15 +8784,16 @@ pub const FuncGen = struct { if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; const ordering = toLlvmAtomicOrdering(atomic_load.order); - const opt_abi_llvm_ty = o.getAtomicAbiType(elem_ty, false); + const abi_ty = try o.getAtomicAbiType(elem_ty, false); const ptr_alignment = @as(u32, @intCast(ptr_info.flags.alignment.toByteUnitsOptional() orelse ptr_info.child.toType().abiAlignment(mod))); const ptr_volatile = llvm.Bool.fromBool(ptr_info.flags.is_volatile); - const elem_llvm_ty = try o.lowerLlvmType(elem_ty); + const elem_llvm_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); - if (opt_abi_llvm_ty) |abi_llvm_ty| { + if (abi_ty != .none) { + const llvm_abi_ty = abi_ty.toLlvm(&o.builder); // operand needs widening and truncating - const load_inst = self.builder.buildLoad(abi_llvm_ty, ptr, ""); + const load_inst = self.builder.buildLoad(llvm_abi_ty, ptr, ""); load_inst.setAlignment(ptr_alignment); load_inst.setVolatile(ptr_volatile); load_inst.setOrdering(ordering); @@ -8936,14 +8819,15 @@ pub const FuncGen = struct { if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return null; const ptr = try self.resolveInst(bin_op.lhs); var element = try self.resolveInst(bin_op.rhs); - const opt_abi_ty = o.getAtomicAbiType(operand_ty, false); + const abi_ty = try o.getAtomicAbiType(operand_ty, false); - if (opt_abi_ty) |abi_ty| { + if (abi_ty != .none) { + const llvm_abi_ty = abi_ty.toLlvm(&o.builder); // operand needs widening if (operand_ty.isSignedInt(mod)) { - element = self.builder.buildSExt(element, abi_ty, ""); + element = self.builder.buildSExt(element, llvm_abi_ty, ""); } else { - element = self.builder.buildZExt(element, abi_ty, ""); + element = self.builder.buildZExt(element, llvm_abi_ty, ""); } } try self.store(ptr, ptr_ty, element, ordering); @@ -9056,7 +8940,7 @@ pub const FuncGen = struct { .One => llvm_usize_ty.constInt(ptr_ty.childType(mod).arrayLen(mod), .False), .Many, .C => unreachable, }; - const elem_llvm_ty = try o.lowerLlvmType(elem_ty); + const elem_llvm_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); const len_gep = [_]*llvm.Value{len}; const end_ptr = self.builder.buildInBoundsGEP(elem_llvm_ty, dest_ptr, &len_gep, len_gep.len, ""); _ = self.builder.buildBr(loop_block); @@ -9181,7 +9065,7 @@ pub const FuncGen = struct { _ = self.builder.buildStore(new_tag, union_ptr); return null; } - const un_llvm_ty = try o.lowerLlvmType(un_ty); + const un_llvm_ty = (try o.lowerType(un_ty)).toLlvm(&o.builder); const tag_index = @intFromBool(layout.tag_align < layout.payload_align); const tag_field_ptr = self.builder.buildStructGEP(un_llvm_ty, union_ptr, tag_index, ""); // TODO alignment on this store @@ -9198,7 +9082,7 @@ pub const FuncGen = struct { if (layout.tag_size == 0) return null; const union_handle = try self.resolveInst(ty_op.operand); if (isByRef(un_ty, mod)) { - const llvm_un_ty = try o.lowerLlvmType(un_ty); + const llvm_un_ty = (try o.lowerType(un_ty)).toLlvm(&o.builder); if (layout.payload_size == 0) { return self.builder.buildLoad(llvm_un_ty, union_handle, ""); } @@ -9240,13 +9124,13 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const llvm_i1 = self.context.intType(1); - const operand_llvm_ty = try o.lowerLlvmType(operand_ty); + const operand_llvm_ty = (try o.lowerType(operand_ty)).toLlvm(&o.builder); const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); const params = [_]*llvm.Value{ operand, llvm_i1.constNull() }; const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); const result_ty = self.typeOfIndex(inst); - const result_llvm_ty = try o.lowerLlvmType(result_ty); + const result_llvm_ty = (try o.lowerType(result_ty)).toLlvm(&o.builder); const bits = operand_ty.intInfo(mod).bits; const result_bits = result_ty.intInfo(mod).bits; @@ -9267,12 +9151,12 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const params = [_]*llvm.Value{operand}; - const operand_llvm_ty = try o.lowerLlvmType(operand_ty); + const operand_llvm_ty = (try o.lowerType(operand_ty)).toLlvm(&o.builder); const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); const result_ty = self.typeOfIndex(inst); - const result_llvm_ty = try o.lowerLlvmType(result_ty); + const result_llvm_ty = (try o.lowerType(result_ty)).toLlvm(&o.builder); const bits = operand_ty.intInfo(mod).bits; const result_bits = result_ty.intInfo(mod).bits; @@ -9294,7 +9178,7 @@ pub const FuncGen = struct { assert(bits % 8 == 0); var operand = try self.resolveInst(ty_op.operand); - var operand_llvm_ty = try o.lowerLlvmType(operand_ty); + var operand_llvm_ty = (try o.lowerType(operand_ty)).toLlvm(&o.builder); if (bits % 16 == 8) { // If not an even byte-multiple, we need zero-extend + shift-left 1 byte @@ -9328,7 +9212,7 @@ pub const FuncGen = struct { const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); const result_ty = self.typeOfIndex(inst); - const result_llvm_ty = try o.lowerLlvmType(result_ty); + const result_llvm_ty = (try o.lowerType(result_ty)).toLlvm(&o.builder); const result_bits = result_ty.intInfo(mod).bits; if (bits > result_bits) { return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); @@ -9407,9 +9291,9 @@ pub const FuncGen = struct { const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{}", .{fqn.fmt(&mod.intern_pool)}); - const param_types = [_]*llvm.Type{try o.lowerLlvmType(enum_type.tag_ty.toType())}; + const param_types = [_]*llvm.Type{(try o.lowerType(enum_type.tag_ty.toType())).toLlvm(&o.builder)}; - const llvm_ret_ty = try o.lowerLlvmType(Type.bool); + const llvm_ret_ty = (try o.lowerType(Type.bool)).toLlvm(&o.builder); const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False); const fn_val = o.llvm_module.addFunction(llvm_fn_name, fn_type); fn_val.setLinkage(.Internal); @@ -9477,11 +9361,11 @@ pub const FuncGen = struct { const llvm_fn_name = try o.builder.fmt("__zig_tag_name_{}", .{fqn.fmt(&mod.intern_pool)}); const slice_ty = Type.slice_const_u8_sentinel_0; - const llvm_ret_ty = try o.lowerLlvmType(slice_ty); - const usize_llvm_ty = try o.lowerLlvmType(Type.usize); + const llvm_ret_ty = (try o.lowerType(slice_ty)).toLlvm(&o.builder); + const usize_llvm_ty = (try o.lowerType(Type.usize)).toLlvm(&o.builder); const slice_alignment = slice_ty.abiAlignment(mod); - const param_types = [_]*llvm.Type{try o.lowerLlvmType(enum_type.tag_ty.toType())}; + const param_types = [_]*llvm.Type{(try o.lowerType(enum_type.tag_ty.toType())).toLlvm(&o.builder)}; const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False); const fn_val = o.llvm_module.addFunction(llvm_fn_name.toSlice(&o.builder).?, fn_type); @@ -9490,7 +9374,7 @@ pub const FuncGen = struct { o.addCommonFnAttributes(fn_val); var global = Builder.Global{ - .type = .void, + .type = try o.builder.fnType(.void, &.{}, .normal), .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) }, }; var function = Builder.Function{ @@ -9573,8 +9457,8 @@ pub const FuncGen = struct { // Function signature: fn (anyerror) bool - const ret_llvm_ty = try o.lowerLlvmType(Type.bool); - const anyerror_llvm_ty = try o.lowerLlvmType(Type.anyerror); + const ret_llvm_ty = (try o.lowerType(Type.bool)).toLlvm(&o.builder); + const anyerror_llvm_ty = (try o.lowerType(Type.anyerror)).toLlvm(&o.builder); const param_types = [_]*llvm.Type{anyerror_llvm_ty}; const fn_type = llvm.functionType(ret_llvm_ty, ¶m_types, param_types.len, .False); @@ -9590,7 +9474,7 @@ pub const FuncGen = struct { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const slice_ty = self.typeOfIndex(inst); - const slice_llvm_ty = try o.lowerLlvmType(slice_ty); + const slice_llvm_ty = (try o.lowerType(slice_ty)).toLlvm(&o.builder); const error_name_table_ptr = try self.getErrorNameTable(); const ptr_slice_llvm_ty = self.context.pointerType(0); @@ -9676,7 +9560,7 @@ pub const FuncGen = struct { accum_init: *llvm.Value, ) !*llvm.Value { const o = self.dg.object; - const llvm_usize_ty = try o.lowerLlvmType(Type.usize); + const llvm_usize_ty = (try o.lowerType(Type.usize)).toLlvm(&o.builder); const llvm_vector_len = llvm_usize_ty.constInt(vector_len, .False); const llvm_result_ty = accum_init.typeOf(); @@ -9753,7 +9637,7 @@ pub const FuncGen = struct { .Add => switch (scalar_ty.zigTypeTag(mod)) { .Int => return self.builder.buildAddReduce(operand), .Float => if (intrinsicsAllowed(scalar_ty, target)) { - const scalar_llvm_ty = try o.lowerLlvmType(scalar_ty); + const scalar_llvm_ty = (try o.lowerType(scalar_ty)).toLlvm(&o.builder); const neutral_value = scalar_llvm_ty.constReal(-0.0); return self.builder.buildFPAddReduce(neutral_value, operand); }, @@ -9762,7 +9646,7 @@ pub const FuncGen = struct { .Mul => switch (scalar_ty.zigTypeTag(mod)) { .Int => return self.builder.buildMulReduce(operand), .Float => if (intrinsicsAllowed(scalar_ty, target)) { - const scalar_llvm_ty = try o.lowerLlvmType(scalar_ty); + const scalar_llvm_ty = (try o.lowerType(scalar_ty)).toLlvm(&o.builder); const neutral_value = scalar_llvm_ty.constReal(1.0); return self.builder.buildFPMulReduce(neutral_value, operand); }, @@ -9790,7 +9674,7 @@ pub const FuncGen = struct { else => unreachable, }; - const param_llvm_ty = try o.lowerLlvmType(scalar_ty); + const param_llvm_ty = (try o.lowerType(scalar_ty)).toLlvm(&o.builder); const param_types = [2]*llvm.Type{ param_llvm_ty, param_llvm_ty }; const libc_fn = try self.getLibcFunction(fn_name, ¶m_types, param_llvm_ty); const init_value = try o.lowerValue(.{ @@ -9813,7 +9697,7 @@ pub const FuncGen = struct { const result_ty = self.typeOfIndex(inst); const len = @as(usize, @intCast(result_ty.arrayLen(mod))); const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len])); - const llvm_result_ty = try o.lowerLlvmType(result_ty); + const llvm_result_ty = (try o.lowerType(result_ty)).toLlvm(&o.builder); switch (result_ty.zigTypeTag(mod)) { .Vector => { @@ -9901,7 +9785,7 @@ pub const FuncGen = struct { .Array => { assert(isByRef(result_ty, mod)); - const llvm_usize = try o.lowerLlvmType(Type.usize); + const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); const array_info = result_ty.arrayInfo(mod); @@ -9944,7 +9828,7 @@ pub const FuncGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; const union_ty = self.typeOfIndex(inst); - const union_llvm_ty = try o.lowerLlvmType(union_ty); + const union_llvm_ty = (try o.lowerType(union_ty)).toLlvm(&o.builder); const layout = union_ty.unionGetLayout(mod); const union_obj = mod.typeToUnion(union_ty).?; @@ -9986,7 +9870,7 @@ pub const FuncGen = struct { const llvm_payload = try self.resolveInst(extra.init); assert(union_obj.haveFieldTypes()); const field = union_obj.fields.values()[extra.field_index]; - const field_llvm_ty = try o.lowerLlvmType(field.ty); + const field_llvm_ty = (try o.lowerType(field.ty)).toLlvm(&o.builder); const field_size = field.ty.abiSize(mod); const field_align = field.normalAlignment(mod); @@ -10009,7 +9893,7 @@ pub const FuncGen = struct { const fields: [1]*llvm.Type = .{payload}; break :t self.context.structType(&fields, fields.len, .False); } - const tag_llvm_ty = try o.lowerLlvmType(union_obj.tag_ty); + const tag_llvm_ty = (try o.lowerType(union_obj.tag_ty)).toLlvm(&o.builder); var fields: [3]*llvm.Type = undefined; var fields_len: c_uint = 2; if (layout.tag_align >= layout.payload_align) { @@ -10062,7 +9946,7 @@ pub const FuncGen = struct { index_type.constInt(@intFromBool(layout.tag_align < layout.payload_align), .False), }; const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, result_ptr, &indices, indices.len, ""); - const tag_llvm_ty = try o.lowerLlvmType(union_obj.tag_ty); + const tag_llvm_ty = (try o.lowerType(union_obj.tag_ty)).toLlvm(&o.builder); const llvm_tag = tag_llvm_ty.constInt(tag_int, .False); const store_inst = self.builder.buildStore(llvm_tag, field_ptr); store_inst.setAlignment(union_obj.tag_ty.abiAlignment(mod)); @@ -10144,7 +10028,7 @@ pub const FuncGen = struct { const inst_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); - const llvm_dest_ty = try o.lowerLlvmType(inst_ty); + const llvm_dest_ty = (try o.lowerType(inst_ty)).toLlvm(&o.builder); return self.builder.buildAddrSpaceCast(operand, llvm_dest_ty, ""); } @@ -10278,7 +10162,7 @@ pub const FuncGen = struct { return fg.loadByRef(payload_ptr, payload_ty, payload_alignment, false); } - const payload_llvm_ty = try o.lowerLlvmType(payload_ty); + const payload_llvm_ty = (try o.lowerType(payload_ty)).toLlvm(&o.builder); const load_inst = fg.builder.buildLoad(payload_llvm_ty, payload_ptr, ""); load_inst.setAlignment(payload_alignment); return load_inst; @@ -10295,7 +10179,7 @@ pub const FuncGen = struct { non_null_bit: *llvm.Value, ) !?*llvm.Value { const o = self.dg.object; - const optional_llvm_ty = try o.lowerLlvmType(optional_ty); + const optional_llvm_ty = (try o.lowerType(optional_ty)).toLlvm(&o.builder); const non_null_field = self.builder.buildZExt(non_null_bit, self.context.intType(8), ""); const mod = o.module; @@ -10350,13 +10234,13 @@ pub const FuncGen = struct { const byte_offset = struct_ty.packedStructFieldByteOffset(field_index, mod); if (byte_offset == 0) return struct_ptr; const byte_llvm_ty = self.context.intType(8); - const llvm_usize = try o.lowerLlvmType(Type.usize); + const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); const llvm_index = llvm_usize.constInt(byte_offset, .False); const indices: [1]*llvm.Value = .{llvm_index}; return self.builder.buildInBoundsGEP(byte_llvm_ty, struct_ptr, &indices, indices.len, ""); }, else => { - const struct_llvm_ty = try o.lowerPtrElemTy(struct_ty); + const struct_llvm_ty = (try o.lowerPtrElemTy(struct_ty)).toLlvm(&o.builder); if (llvmField(struct_ty, field_index, mod)) |llvm_field| { return self.builder.buildStructGEP(struct_llvm_ty, struct_ptr, llvm_field.index, ""); @@ -10376,7 +10260,7 @@ pub const FuncGen = struct { const layout = struct_ty.unionGetLayout(mod); if (layout.payload_size == 0 or struct_ty.containerLayout(mod) == .Packed) return struct_ptr; const payload_index = @intFromBool(layout.tag_align >= layout.payload_align); - const union_llvm_ty = try o.lowerLlvmType(struct_ty); + const union_llvm_ty = (try o.lowerType(struct_ty)).toLlvm(&o.builder); const union_field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_ptr, payload_index, ""); return union_field_ptr; }, @@ -10401,7 +10285,7 @@ pub const FuncGen = struct { ) !*llvm.Value { const o = fg.dg.object; const mod = o.module; - const pointee_llvm_ty = try o.lowerLlvmType(pointee_type); + const pointee_llvm_ty = (try o.lowerType(pointee_type)).toLlvm(&o.builder); const result_align = @max(ptr_alignment, pointee_type.abiAlignment(mod)); const result_ptr = fg.buildAlloca(pointee_llvm_ty, result_align); const llvm_usize = fg.context.intType(Type.usize.intInfo(mod).bits); @@ -10434,7 +10318,7 @@ pub const FuncGen = struct { assert(info.flags.vector_index != .runtime); if (info.flags.vector_index != .none) { const index_u32 = self.context.intType(32).constInt(@intFromEnum(info.flags.vector_index), .False); - const vec_elem_ty = try o.lowerLlvmType(elem_ty); + const vec_elem_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); const vec_ty = vec_elem_ty.vectorType(info.packed_offset.host_size); const loaded_vector = self.builder.buildLoad(vec_ty, ptr, ""); @@ -10448,7 +10332,7 @@ pub const FuncGen = struct { if (isByRef(elem_ty, mod)) { return self.loadByRef(ptr, elem_ty, ptr_alignment, info.flags.is_volatile); } - const elem_llvm_ty = try o.lowerLlvmType(elem_ty); + const elem_llvm_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); const llvm_inst = self.builder.buildLoad(elem_llvm_ty, ptr, ""); llvm_inst.setAlignment(ptr_alignment); llvm_inst.setVolatile(ptr_volatile); @@ -10463,7 +10347,7 @@ pub const FuncGen = struct { const elem_bits = @as(c_uint, @intCast(ptr_ty.childType(mod).bitSize(mod))); const shift_amt = containing_int.typeOf().constInt(info.packed_offset.bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); - const elem_llvm_ty = try o.lowerLlvmType(elem_ty); + const elem_llvm_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); if (isByRef(elem_ty, mod)) { const result_align = elem_ty.abiAlignment(mod); @@ -10511,7 +10395,7 @@ pub const FuncGen = struct { assert(info.flags.vector_index != .runtime); if (info.flags.vector_index != .none) { const index_u32 = self.context.intType(32).constInt(@intFromEnum(info.flags.vector_index), .False); - const vec_elem_ty = try o.lowerLlvmType(elem_ty); + const vec_elem_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); const vec_ty = vec_elem_ty.vectorType(info.packed_offset.host_size); const loaded_vector = self.builder.buildLoad(vec_ty, ptr, ""); @@ -10951,14 +10835,14 @@ fn toLlvmCallConv(cc: std.builtin.CallingConvention, target: std.Target) llvm.Ca } /// Convert a zig-address space to an llvm address space. -fn toLlvmAddressSpace(address_space: std.builtin.AddressSpace, target: std.Target) c_uint { - for (llvmAddressSpaceInfo(target)) |info| if (info.zig == address_space) return info.llvm; +fn toLlvmAddressSpace(address_space: std.builtin.AddressSpace, target: std.Target) Builder.AddrSpace { + for (llvmAddrSpaceInfo(target)) |info| if (info.zig == address_space) return info.llvm; unreachable; } -const AddressSpaceInfo = struct { +const AddrSpaceInfo = struct { zig: ?std.builtin.AddressSpace, - llvm: c_uint, + llvm: Builder.AddrSpace, non_integral: bool = false, size: ?u16 = null, abi: ?u16 = null, @@ -10966,49 +10850,49 @@ const AddressSpaceInfo = struct { idx: ?u16 = null, force_in_data_layout: bool = false, }; -fn llvmAddressSpaceInfo(target: std.Target) []const AddressSpaceInfo { +fn llvmAddrSpaceInfo(target: std.Target) []const AddrSpaceInfo { return switch (target.cpu.arch) { .x86, .x86_64 => &.{ - .{ .zig = .generic, .llvm = llvm.address_space.default }, - .{ .zig = .gs, .llvm = llvm.address_space.x86.gs }, - .{ .zig = .fs, .llvm = llvm.address_space.x86.fs }, - .{ .zig = .ss, .llvm = llvm.address_space.x86.ss }, - .{ .zig = null, .llvm = llvm.address_space.x86.ptr32_sptr, .size = 32, .abi = 32, .force_in_data_layout = true }, - .{ .zig = null, .llvm = llvm.address_space.x86.ptr32_uptr, .size = 32, .abi = 32, .force_in_data_layout = true }, - .{ .zig = null, .llvm = llvm.address_space.x86.ptr64, .size = 64, .abi = 64, .force_in_data_layout = true }, + .{ .zig = .generic, .llvm = .default }, + .{ .zig = .gs, .llvm = Builder.AddrSpace.x86.gs }, + .{ .zig = .fs, .llvm = Builder.AddrSpace.x86.fs }, + .{ .zig = .ss, .llvm = Builder.AddrSpace.x86.ss }, + .{ .zig = null, .llvm = Builder.AddrSpace.x86.ptr32_sptr, .size = 32, .abi = 32, .force_in_data_layout = true }, + .{ .zig = null, .llvm = Builder.AddrSpace.x86.ptr32_uptr, .size = 32, .abi = 32, .force_in_data_layout = true }, + .{ .zig = null, .llvm = Builder.AddrSpace.x86.ptr64, .size = 64, .abi = 64, .force_in_data_layout = true }, }, .nvptx, .nvptx64 => &.{ - .{ .zig = .generic, .llvm = llvm.address_space.default }, - .{ .zig = .global, .llvm = llvm.address_space.nvptx.global }, - .{ .zig = .constant, .llvm = llvm.address_space.nvptx.constant }, - .{ .zig = .param, .llvm = llvm.address_space.nvptx.param }, - .{ .zig = .shared, .llvm = llvm.address_space.nvptx.shared }, - .{ .zig = .local, .llvm = llvm.address_space.nvptx.local }, + .{ .zig = .generic, .llvm = .default }, + .{ .zig = .global, .llvm = Builder.AddrSpace.nvptx.global }, + .{ .zig = .constant, .llvm = Builder.AddrSpace.nvptx.constant }, + .{ .zig = .param, .llvm = Builder.AddrSpace.nvptx.param }, + .{ .zig = .shared, .llvm = Builder.AddrSpace.nvptx.shared }, + .{ .zig = .local, .llvm = Builder.AddrSpace.nvptx.local }, }, .amdgcn => &.{ - .{ .zig = .generic, .llvm = llvm.address_space.amdgpu.flat }, - .{ .zig = .global, .llvm = llvm.address_space.amdgpu.global }, - .{ .zig = .constant, .llvm = llvm.address_space.amdgpu.constant }, - .{ .zig = .shared, .llvm = llvm.address_space.amdgpu.local }, - .{ .zig = .local, .llvm = llvm.address_space.amdgpu.private }, + .{ .zig = .generic, .llvm = Builder.AddrSpace.amdgpu.flat }, + .{ .zig = .global, .llvm = Builder.AddrSpace.amdgpu.global }, + .{ .zig = .constant, .llvm = Builder.AddrSpace.amdgpu.constant }, + .{ .zig = .shared, .llvm = Builder.AddrSpace.amdgpu.local }, + .{ .zig = .local, .llvm = Builder.AddrSpace.amdgpu.private }, }, .avr => &.{ - .{ .zig = .generic, .llvm = llvm.address_space.default }, - .{ .zig = .flash, .llvm = llvm.address_space.avr.flash }, - .{ .zig = .flash1, .llvm = llvm.address_space.avr.flash1 }, - .{ .zig = .flash2, .llvm = llvm.address_space.avr.flash2 }, - .{ .zig = .flash3, .llvm = llvm.address_space.avr.flash3 }, - .{ .zig = .flash4, .llvm = llvm.address_space.avr.flash4 }, - .{ .zig = .flash5, .llvm = llvm.address_space.avr.flash5 }, + .{ .zig = .generic, .llvm = .default }, + .{ .zig = .flash, .llvm = Builder.AddrSpace.avr.flash }, + .{ .zig = .flash1, .llvm = Builder.AddrSpace.avr.flash1 }, + .{ .zig = .flash2, .llvm = Builder.AddrSpace.avr.flash2 }, + .{ .zig = .flash3, .llvm = Builder.AddrSpace.avr.flash3 }, + .{ .zig = .flash4, .llvm = Builder.AddrSpace.avr.flash4 }, + .{ .zig = .flash5, .llvm = Builder.AddrSpace.avr.flash5 }, }, .wasm32, .wasm64 => &.{ - .{ .zig = .generic, .llvm = llvm.address_space.default }, - .{ .zig = null, .llvm = llvm.address_space.wasm.variable, .non_integral = true }, - .{ .zig = null, .llvm = llvm.address_space.wasm.externref, .non_integral = true, .size = 8, .abi = 8 }, - .{ .zig = null, .llvm = llvm.address_space.wasm.funcref, .non_integral = true, .size = 8, .abi = 8 }, + .{ .zig = .generic, .llvm = .default }, + .{ .zig = null, .llvm = Builder.AddrSpace.wasm.variable, .non_integral = true }, + .{ .zig = null, .llvm = Builder.AddrSpace.wasm.externref, .non_integral = true, .size = 8, .abi = 8 }, + .{ .zig = null, .llvm = Builder.AddrSpace.wasm.funcref, .non_integral = true, .size = 8, .abi = 8 }, }, else => &.{ - .{ .zig = .generic, .llvm = llvm.address_space.default }, + .{ .zig = .generic, .llvm = .default }, }, }; } @@ -11017,30 +10901,30 @@ fn llvmAddressSpaceInfo(target: std.Target) []const AddressSpaceInfo { /// different address, space and then cast back to the generic address space. /// For example, on GPUs local variable declarations must be generated into the local address space. /// This function returns the address space local values should be generated into. -fn llvmAllocaAddressSpace(target: std.Target) c_uint { +fn llvmAllocaAddressSpace(target: std.Target) Builder.AddrSpace { return switch (target.cpu.arch) { // On amdgcn, locals should be generated into the private address space. // To make Zig not impossible to use, these are then converted to addresses in the // generic address space and treates as regular pointers. This is the way that HIP also does it. - .amdgcn => llvm.address_space.amdgpu.private, - else => llvm.address_space.default, + .amdgcn => Builder.AddrSpace.amdgpu.private, + else => .default, }; } /// On some targets, global values that are in the generic address space must be generated into a /// different address space, and then cast back to the generic address space. -fn llvmDefaultGlobalAddressSpace(target: std.Target) c_uint { +fn llvmDefaultGlobalAddressSpace(target: std.Target) Builder.AddrSpace { return switch (target.cpu.arch) { // On amdgcn, globals must be explicitly allocated and uploaded so that the program can access // them. - .amdgcn => llvm.address_space.amdgpu.global, - else => llvm.address_space.default, + .amdgcn => Builder.AddrSpace.amdgpu.global, + else => .default, }; } /// Return the actual address space that a value should be stored in if its a global address space. /// When a value is placed in the resulting address space, it needs to be cast back into wanted_address_space. -fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, target: std.Target) c_uint { +fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, target: std.Target) Builder.AddrSpace { return switch (wanted_address_space) { .generic => llvmDefaultGlobalAddressSpace(target), else => |as| toLlvmAddressSpace(as, target), @@ -11170,161 +11054,129 @@ fn firstParamSRetSystemV(ty: Type, mod: *Module) bool { /// In order to support the C calling convention, some return types need to be lowered /// completely differently in the function prototype to honor the C ABI, and then /// be effectively bitcasted to the actual return type. -fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { +fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type { const mod = o.module; const return_type = fn_info.return_type.toType(); if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) { // If the return type is an error set or an error union, then we make this // anyerror return type instead, so that it can be coerced into a function // pointer type which has anyerror as the return type. - if (return_type.isError(mod)) { - return o.lowerLlvmType(Type.anyerror); - } else { - return o.context.voidType(); - } + return if (return_type.isError(mod)) Builder.Type.err_int else .void; } const target = mod.getTarget(); switch (fn_info.cc) { - .Unspecified, .Inline => { - if (isByRef(return_type, mod)) { - return o.context.voidType(); - } else { - return o.lowerLlvmType(return_type); - } - }, + .Unspecified, + .Inline, + => return if (isByRef(return_type, mod)) .void else o.lowerType(return_type), .C => { switch (target.cpu.arch) { - .mips, .mipsel => return o.lowerLlvmType(return_type), + .mips, .mipsel => return o.lowerType(return_type), .x86_64 => switch (target.os.tag) { .windows => return lowerWin64FnRetTy(o, fn_info), else => return lowerSystemVFnRetTy(o, fn_info), }, .wasm32 => { if (isScalar(mod, return_type)) { - return o.lowerLlvmType(return_type); + return o.lowerType(return_type); } const classes = wasm_c_abi.classifyType(return_type, mod); if (classes[0] == .indirect or classes[0] == .none) { - return o.context.voidType(); + return .void; } assert(classes[0] == .direct and classes[1] == .none); const scalar_type = wasm_c_abi.scalarType(return_type, mod); - const abi_size = scalar_type.abiSize(mod); - return o.context.intType(@as(c_uint, @intCast(abi_size * 8))); + return o.builder.intType(@intCast(scalar_type.abiSize(mod) * 8)); }, .aarch64, .aarch64_be => { switch (aarch64_c_abi.classifyType(return_type, mod)) { - .memory => return o.context.voidType(), - .float_array => return o.lowerLlvmType(return_type), - .byval => return o.lowerLlvmType(return_type), - .integer => { - const bit_size = return_type.bitSize(mod); - return o.context.intType(@as(c_uint, @intCast(bit_size))); - }, - .double_integer => return o.context.intType(64).arrayType(2), + .memory => return .void, + .float_array => return o.lowerType(return_type), + .byval => return o.lowerType(return_type), + .integer => return o.builder.intType(@intCast(return_type.bitSize(mod))), + .double_integer => return o.builder.arrayType(2, .i64), } }, .arm, .armeb => { switch (arm_c_abi.classifyType(return_type, mod, .ret)) { - .memory, .i64_array => return o.context.voidType(), - .i32_array => |len| if (len == 1) { - return o.context.intType(32); - } else { - return o.context.voidType(); - }, - .byval => return o.lowerLlvmType(return_type), + .memory, .i64_array => return .void, + .i32_array => |len| return if (len == 1) .i32 else .void, + .byval => return o.lowerType(return_type), } }, .riscv32, .riscv64 => { switch (riscv_c_abi.classifyType(return_type, mod)) { - .memory => return o.context.voidType(), + .memory => return .void, .integer => { - const bit_size = return_type.bitSize(mod); - return o.context.intType(@as(c_uint, @intCast(bit_size))); + return o.builder.intType(@intCast(return_type.bitSize(mod))); }, .double_integer => { - var llvm_types_buffer: [2]*llvm.Type = .{ - o.context.intType(64), - o.context.intType(64), - }; - return o.context.structType(&llvm_types_buffer, 2, .False); + return o.builder.structType(.normal, &.{ .i64, .i64 }); }, - .byval => return o.lowerLlvmType(return_type), + .byval => return o.lowerType(return_type), } }, // TODO investigate C ABI for other architectures - else => return o.lowerLlvmType(return_type), + else => return o.lowerType(return_type), } }, .Win64 => return lowerWin64FnRetTy(o, fn_info), .SysV => return lowerSystemVFnRetTy(o, fn_info), - .Stdcall => { - if (isScalar(mod, return_type)) { - return o.lowerLlvmType(return_type); - } else { - return o.context.voidType(); - } - }, - else => return o.lowerLlvmType(return_type), + .Stdcall => return if (isScalar(mod, return_type)) o.lowerType(return_type) else .void, + else => return o.lowerType(return_type), } } -fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { +fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type { const mod = o.module; const return_type = fn_info.return_type.toType(); switch (x86_64_abi.classifyWindows(return_type, mod)) { .integer => { if (isScalar(mod, return_type)) { - return o.lowerLlvmType(return_type); + return o.lowerType(return_type); } else { - const abi_size = return_type.abiSize(mod); - return o.context.intType(@as(c_uint, @intCast(abi_size * 8))); + return o.builder.intType(@intCast(return_type.abiSize(mod) * 8)); } }, - .win_i128 => return o.context.intType(64).vectorType(2), - .memory => return o.context.voidType(), - .sse => return o.lowerLlvmType(return_type), + .win_i128 => return o.builder.vectorType(.normal, 2, .i64), + .memory => return .void, + .sse => return o.lowerType(return_type), else => unreachable, } } -fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { +fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type { const mod = o.module; const return_type = fn_info.return_type.toType(); if (isScalar(mod, return_type)) { - return o.lowerLlvmType(return_type); + return o.lowerType(return_type); } const classes = x86_64_abi.classifySystemV(return_type, mod, .ret); - if (classes[0] == .memory) { - return o.context.voidType(); - } - var llvm_types_buffer: [8]*llvm.Type = undefined; - var llvm_types_index: u32 = 0; + if (classes[0] == .memory) return .void; + var types_index: u32 = 0; + var types_buffer: [8]Builder.Type = undefined; for (classes) |class| { switch (class) { .integer => { - llvm_types_buffer[llvm_types_index] = o.context.intType(64); - llvm_types_index += 1; + types_buffer[types_index] = .i64; + types_index += 1; }, .sse, .sseup => { - llvm_types_buffer[llvm_types_index] = o.context.doubleType(); - llvm_types_index += 1; + types_buffer[types_index] = .double; + types_index += 1; }, .float => { - llvm_types_buffer[llvm_types_index] = o.context.floatType(); - llvm_types_index += 1; + types_buffer[types_index] = .float; + types_index += 1; }, .float_combine => { - llvm_types_buffer[llvm_types_index] = o.context.floatType().vectorType(2); - llvm_types_index += 1; + types_buffer[types_index] = try o.builder.vectorType(.normal, 2, .float); + types_index += 1; }, .x87 => { - if (llvm_types_index != 0 or classes[2] != .none) { - return o.context.voidType(); - } - llvm_types_buffer[llvm_types_index] = o.context.x86_fp80Type(); - llvm_types_index += 1; + if (types_index != 0 or classes[2] != .none) return .void; + types_buffer[types_index] = .x86_fp80; + types_index += 1; }, .x87up => continue, .complex_x87 => { @@ -11336,10 +11188,9 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type } } if (classes[0] == .integer and classes[1] == .none) { - const abi_size = return_type.abiSize(mod); - return o.context.intType(@as(c_uint, @intCast(abi_size * 8))); + return o.builder.intType(@intCast(return_type.abiSize(mod) * 8)); } - return o.context.structType(&llvm_types_buffer, llvm_types_index, .False); + return o.builder.structType(.normal, types_buffer[0..types_index]); } const ParamTypeIterator = struct { @@ -11347,7 +11198,8 @@ const ParamTypeIterator = struct { fn_info: InternPool.Key.FuncType, zig_index: u32, llvm_index: u32, - llvm_types_len: u32, + types_len: u32, + types_buffer: [8]Builder.Type, llvm_types_buffer: [8]*llvm.Type, byval_attr: bool, @@ -11365,7 +11217,7 @@ const ParamTypeIterator = struct { i64_array: u8, }; - pub fn next(it: *ParamTypeIterator) ?Lowering { + pub fn next(it: *ParamTypeIterator) Allocator.Error!?Lowering { if (it.zig_index >= it.fn_info.param_types.len) return null; const mod = it.object.module; const ip = &mod.intern_pool; @@ -11375,7 +11227,7 @@ const ParamTypeIterator = struct { } /// `airCall` uses this instead of `next` so that it can take into account variadic functions. - pub fn nextCall(it: *ParamTypeIterator, fg: *FuncGen, args: []const Air.Inst.Ref) ?Lowering { + pub fn nextCall(it: *ParamTypeIterator, fg: *FuncGen, args: []const Air.Inst.Ref) Allocator.Error!?Lowering { const mod = it.object.module; const ip = &mod.intern_pool; if (it.zig_index >= it.fn_info.param_types.len) { @@ -11389,7 +11241,7 @@ const ParamTypeIterator = struct { } } - fn nextInner(it: *ParamTypeIterator, ty: Type) ?Lowering { + fn nextInner(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering { const mod = it.object.module; const target = mod.getTarget(); @@ -11444,8 +11296,9 @@ const ParamTypeIterator = struct { .float_array => |len| return Lowering{ .float_array = len }, .byval => return .byval, .integer => { - it.llvm_types_len = 1; - it.llvm_types_buffer[0] = it.object.context.intType(64); + it.types_len = 1; + it.types_buffer[0] = .i64; + it.llvm_types_buffer[0] = it.types_buffer[0].toLlvm(&it.object.builder); return .multiple_llvm_types; }, .double_integer => return Lowering{ .i64_array = 2 }, @@ -11539,7 +11392,7 @@ const ParamTypeIterator = struct { } } - fn nextSystemV(it: *ParamTypeIterator, ty: Type) ?Lowering { + fn nextSystemV(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering { const mod = it.object.module; const classes = x86_64_abi.classifySystemV(ty, mod, .arg); if (classes[0] == .memory) { @@ -11553,25 +11406,34 @@ const ParamTypeIterator = struct { it.llvm_index += 1; return .byval; } + var types_index: u32 = 0; + var types_buffer: [8]Builder.Type = undefined; var llvm_types_buffer: [8]*llvm.Type = undefined; - var llvm_types_index: u32 = 0; for (classes) |class| { switch (class) { .integer => { - llvm_types_buffer[llvm_types_index] = it.object.context.intType(64); - llvm_types_index += 1; + types_buffer[types_index] = .i64; + llvm_types_buffer[types_index] = + types_buffer[types_index].toLlvm(&it.object.builder); + types_index += 1; }, .sse, .sseup => { - llvm_types_buffer[llvm_types_index] = it.object.context.doubleType(); - llvm_types_index += 1; + types_buffer[types_index] = .double; + llvm_types_buffer[types_index] = + types_buffer[types_index].toLlvm(&it.object.builder); + types_index += 1; }, .float => { - llvm_types_buffer[llvm_types_index] = it.object.context.floatType(); - llvm_types_index += 1; + types_buffer[types_index] = .float; + llvm_types_buffer[types_index] = + types_buffer[types_index].toLlvm(&it.object.builder); + types_index += 1; }, .float_combine => { - llvm_types_buffer[llvm_types_index] = it.object.context.floatType().vectorType(2); - llvm_types_index += 1; + types_buffer[types_index] = try it.object.builder.vectorType(.normal, 2, .float); + llvm_types_buffer[types_index] = + types_buffer[types_index].toLlvm(&it.object.builder); + types_index += 1; }, .x87 => { it.zig_index += 1; @@ -11593,9 +11455,10 @@ const ParamTypeIterator = struct { it.llvm_index += 1; return .abi_sized_int; } + it.types_len = types_index; + it.types_buffer = types_buffer; it.llvm_types_buffer = llvm_types_buffer; - it.llvm_types_len = llvm_types_index; - it.llvm_index += llvm_types_index; + it.llvm_index += types_index; it.zig_index += 1; return .multiple_llvm_types; } @@ -11607,8 +11470,9 @@ fn iterateParamTypes(object: *Object, fn_info: InternPool.Key.FuncType) ParamTyp .fn_info = fn_info, .zig_index = 0, .llvm_index = 0, + .types_len = 0, + .types_buffer = undefined, .llvm_types_buffer = undefined, - .llvm_types_len = 0, .byval_attr = false, }; } @@ -11905,7 +11769,7 @@ fn buildAllocaInner( } builder.clearCurrentDebugLocation(); - break :blk builder.buildAllocaInAddressSpace(llvm_ty, address_space, ""); + break :blk builder.buildAllocaInAddressSpace(llvm_ty, @intFromEnum(address_space), ""); }; if (maybe_alignment) |alignment| { @@ -11914,7 +11778,7 @@ fn buildAllocaInner( // The pointer returned from this function should have the generic address space, // if this isn't the case then cast it to the generic address space. - if (address_space != llvm.address_space.default) { + if (address_space != .default) { return builder.buildAddrSpaceCast(alloca, context.pointerType(llvm.address_space.default), ""); } diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 04caf2d412..891450165d 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -19,7 +19,7 @@ types: std.AutoArrayHashMapUnmanaged(String, Type) = .{}, next_unnamed_type: String = @enumFromInt(0), next_unique_type_id: std.AutoHashMapUnmanaged(String, u32) = .{}, type_map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, -type_data: std.ArrayListUnmanaged(Type.Data) = .{}, +type_items: std.ArrayListUnmanaged(Type.Item) = .{}, type_extra: std.ArrayListUnmanaged(u32) = .{}, globals: std.AutoArrayHashMapUnmanaged(String, Global) = .{}, @@ -62,10 +62,10 @@ pub const String = enum(u32) { else @compileError("invalid format string: '" ++ fmt_str ++ "'"); if (need_quotes) try writer.writeByte('\"'); - for (slice) |c| switch (c) { + for (slice) |character| switch (character) { '\\' => try writer.writeAll("\\\\"), - ' '...'"' - 1, '"' + 1...'\\' - 1, '\\' + 1...'~' => try writer.writeByte(c), - else => try writer.print("\\{X:0>2}", .{c}), + ' '...'"' - 1, '"' + 1...'\\' - 1, '\\' + 1...'~' => try writer.writeByte(character), + else => try writer.print("\\{X:0>2}", .{character}), }; if (need_quotes) try writer.writeByte('\"'); } @@ -120,21 +120,25 @@ pub const Type = enum(u32) { none = std.math.maxInt(u32), _, - const Tag = enum(u4) { + pub const err_int = Type.i16; + + pub const Tag = enum(u4) { simple, function, + vararg_function, integer, pointer, target, vector, - vscale_vector, + scalable_vector, + small_array, array, structure, packed_structure, named_structure, }; - const Simple = enum { + pub const Simple = enum { void, half, bfloat, @@ -150,19 +154,55 @@ pub const Type = enum(u32) { metadata, }; - const NamedStructure = struct { - id: String, + pub const Function = struct { + ret: Type, + params_len: u32, + + pub const Kind = enum { normal, vararg }; + }; + + pub const Target = extern struct { + name: String, + types_len: u32, + ints_len: u32, + }; + + pub const Vector = extern struct { + len: u32, + child: Type, + + pub const Kind = enum { normal, scalable }; + }; + + pub const Array = extern struct { + len_lo: u32, + len_hi: u32, child: Type, + + fn len(self: Array) u64 { + return @as(u64, self.len_hi) << 32 | self.len_lo; + } + }; + + pub const Structure = struct { + fields_len: u32, + + pub const Kind = enum { normal, @"packed" }; }; - const Data = packed struct(u32) { + pub const NamedStructure = struct { + id: String, + body: Type, + }; + + pub const Item = packed struct(u32) { tag: Tag, data: ExtraIndex, }; - const ExtraIndex = u28; + pub const ExtraIndex = u28; - const FormatData = struct { + pub const FormatData = struct { type: Type, builder: *const Builder, }; @@ -174,27 +214,106 @@ pub const Type = enum(u32) { ) @TypeOf(writer).Error!void { assert(data.type != .none); if (std.enums.tagName(Type, data.type)) |name| return writer.writeAll(name); - const type_data = data.builder.type_data.items[@intFromEnum(data.type)]; - switch (type_data.tag) { - .integer => try writer.print("i{d}", .{type_data.data}), + const type_item = data.builder.type_items.items[@intFromEnum(data.type)]; + switch (type_item.tag) { + .simple => unreachable, + .function, .vararg_function => { + const extra = data.builder.typeExtraDataTrail(Type.Function, type_item.data); + const params: []const Type = + @ptrCast(data.builder.type_extra.items[extra.end..][0..extra.data.params_len]); + if (!comptime std.mem.eql(u8, fmt_str, ">")) + try writer.print("{%} ", .{extra.data.ret.fmt(data.builder)}); + if (!comptime std.mem.eql(u8, fmt_str, "<")) { + try writer.writeByte('('); + for (params, 0..) |param, index| { + if (index > 0) try writer.writeAll(", "); + try writer.print("{%}", .{param.fmt(data.builder)}); + } + switch (type_item.tag) { + .function => {}, + .vararg_function => { + if (params.len > 0) try writer.writeAll(", "); + try writer.writeAll("..."); + }, + else => unreachable, + } + try writer.writeByte(')'); + } + }, + .integer => try writer.print("i{d}", .{type_item.data}), + .pointer => try writer.print("ptr{}", .{@as(AddrSpace, @enumFromInt(type_item.data))}), + .target => { + const extra = data.builder.typeExtraDataTrail(Type.Target, type_item.data); + const types: []const Type = + @ptrCast(data.builder.type_extra.items[extra.end..][0..extra.data.types_len]); + const ints: []const u32 = @ptrCast(data.builder.type_extra.items[extra.end + + extra.data.types_len ..][0..extra.data.ints_len]); + try writer.print( + \\target({"} + , .{extra.data.name.fmt(data.builder)}); + for (types) |ty| try writer.print(", {%}", .{ty.fmt(data.builder)}); + for (ints) |int| try writer.print(", {d}", .{int}); + try writer.writeByte(')'); + }, + .vector => { + const extra = data.builder.typeExtraData(Type.Vector, type_item.data); + try writer.print("<{d} x {%}>", .{ extra.len, extra.child.fmt(data.builder) }); + }, + .scalable_vector => { + const extra = data.builder.typeExtraData(Type.Vector, type_item.data); + try writer.print("", .{ extra.len, extra.child.fmt(data.builder) }); + }, + .small_array => { + const extra = data.builder.typeExtraData(Type.Vector, type_item.data); + try writer.print("[{d} x {%}]", .{ extra.len, extra.child.fmt(data.builder) }); + }, + .array => { + const extra = data.builder.typeExtraData(Type.Array, type_item.data); + try writer.print("[{d} x {%}]", .{ extra.len(), extra.child.fmt(data.builder) }); + }, + .structure, .packed_structure => { + const extra = data.builder.typeExtraDataTrail(Type.Structure, type_item.data); + const fields: []const Type = + @ptrCast(data.builder.type_extra.items[extra.end..][0..extra.data.fields_len]); + switch (type_item.tag) { + .structure => {}, + .packed_structure => try writer.writeByte('<'), + else => unreachable, + } + try writer.writeAll("{ "); + for (fields, 0..) |field, index| { + if (index > 0) try writer.writeAll(", "); + try writer.print("{%}", .{field.fmt(data.builder)}); + } + try writer.writeAll(" }"); + switch (type_item.tag) { + .structure => {}, + .packed_structure => try writer.writeByte('>'), + else => unreachable, + } + }, .named_structure => { - const extra = data.builder.typeExtraData(NamedStructure, type_data.data); - if (comptime std.mem.eql(u8, fmt_str, "")) switch (extra.child) { + const extra = data.builder.typeExtraData(Type.NamedStructure, type_item.data); + if (comptime std.mem.eql(u8, fmt_str, "%")) try writer.print("%{}", .{ + extra.id.fmt(data.builder), + }) else switch (extra.body) { .none => try writer.writeAll("opaque"), else => try format(.{ - .type = extra.child, + .type = extra.body, .builder = data.builder, }, fmt_str, fmt_opts, writer), - } else if (comptime std.mem.eql(u8, fmt_str, "%")) try writer.print("%{}", .{ - extra.id.fmt(data.builder), - }) else @compileError("invalid format string: '" ++ fmt_str ++ "'"); + } }, - else => try writer.print("", .{@intFromEnum(data.type)}), } } pub fn fmt(self: Type, builder: *const Builder) std.fmt.Formatter(format) { return .{ .data = .{ .type = self, .builder = builder } }; } + + pub fn toLlvm(self: Type, builder: *const Builder) *llvm.Type { + assert(builder.useLibLlvm()); + return builder.llvm_types.items[@intFromEnum(self)]; + } }; pub const Linkage = enum { @@ -217,9 +336,7 @@ pub const Linkage = enum { _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - if (self == .default) return; - try writer.writeAll(@tagName(self)); - try writer.writeByte(' '); + if (self != .default) try writer.print(" {s}", .{@tagName(self)}); } }; @@ -234,9 +351,7 @@ pub const Preemption = enum { _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - if (self == .default) return; - try writer.writeAll(@tagName(self)); - try writer.writeByte(' '); + if (self != .default) try writer.print(" {s}", .{@tagName(self)}); } }; @@ -251,9 +366,7 @@ pub const Visibility = enum { _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - if (self == .default) return; - try writer.writeAll(@tagName(self)); - try writer.writeByte(' '); + if (self != .default) try writer.print(" {s}", .{@tagName(self)}); } }; @@ -268,9 +381,7 @@ pub const DllStorageClass = enum { _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - if (self == .default) return; - try writer.writeAll(@tagName(self)); - try writer.writeByte(' '); + if (self != .default) try writer.print(" {s}", .{@tagName(self)}); } }; @@ -288,13 +399,12 @@ pub const ThreadLocal = enum { writer: anytype, ) @TypeOf(writer).Error!void { if (self == .default) return; - try writer.writeAll("thread_local"); + try writer.writeAll(" thread_local"); if (self != .generaldynamic) { try writer.writeByte('('); try writer.writeAll(@tagName(self)); try writer.writeByte(')'); } - try writer.writeByte(' '); } }; @@ -309,9 +419,7 @@ pub const UnnamedAddr = enum { _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - if (self == .default) return; - try writer.writeAll(@tagName(self)); - try writer.writeByte(' '); + if (self != .default) try writer.print(" {s}", .{@tagName(self)}); } }; @@ -319,14 +427,82 @@ pub const AddrSpace = enum(u24) { default, _, + // See llvm/lib/Target/X86/X86.h + pub const x86 = struct { + pub const gs: AddrSpace = @enumFromInt(256); + pub const fs: AddrSpace = @enumFromInt(257); + pub const ss: AddrSpace = @enumFromInt(258); + + pub const ptr32_sptr: AddrSpace = @enumFromInt(270); + pub const ptr32_uptr: AddrSpace = @enumFromInt(271); + pub const ptr64: AddrSpace = @enumFromInt(272); + }; + pub const x86_64 = x86; + + // See llvm/lib/Target/AVR/AVR.h + pub const avr = struct { + pub const flash: AddrSpace = @enumFromInt(1); + pub const flash1: AddrSpace = @enumFromInt(2); + pub const flash2: AddrSpace = @enumFromInt(3); + pub const flash3: AddrSpace = @enumFromInt(4); + pub const flash4: AddrSpace = @enumFromInt(5); + pub const flash5: AddrSpace = @enumFromInt(6); + }; + + // See llvm/lib/Target/NVPTX/NVPTX.h + pub const nvptx = struct { + pub const generic: AddrSpace = @enumFromInt(0); + pub const global: AddrSpace = @enumFromInt(1); + pub const constant: AddrSpace = @enumFromInt(2); + pub const shared: AddrSpace = @enumFromInt(3); + pub const param: AddrSpace = @enumFromInt(4); + pub const local: AddrSpace = @enumFromInt(5); + }; + + // See llvm/lib/Target/AMDGPU/AMDGPU.h + pub const amdgpu = struct { + pub const flat: AddrSpace = @enumFromInt(0); + pub const global: AddrSpace = @enumFromInt(1); + pub const region: AddrSpace = @enumFromInt(2); + pub const local: AddrSpace = @enumFromInt(3); + pub const constant: AddrSpace = @enumFromInt(4); + pub const private: AddrSpace = @enumFromInt(5); + pub const constant_32bit: AddrSpace = @enumFromInt(6); + pub const buffer_fat_pointer: AddrSpace = @enumFromInt(7); + pub const param_d: AddrSpace = @enumFromInt(6); + pub const param_i: AddrSpace = @enumFromInt(7); + pub const constant_buffer_0: AddrSpace = @enumFromInt(8); + pub const constant_buffer_1: AddrSpace = @enumFromInt(9); + pub const constant_buffer_2: AddrSpace = @enumFromInt(10); + pub const constant_buffer_3: AddrSpace = @enumFromInt(11); + pub const constant_buffer_4: AddrSpace = @enumFromInt(12); + pub const constant_buffer_5: AddrSpace = @enumFromInt(13); + pub const constant_buffer_6: AddrSpace = @enumFromInt(14); + pub const constant_buffer_7: AddrSpace = @enumFromInt(15); + pub const constant_buffer_8: AddrSpace = @enumFromInt(16); + pub const constant_buffer_9: AddrSpace = @enumFromInt(17); + pub const constant_buffer_10: AddrSpace = @enumFromInt(18); + pub const constant_buffer_11: AddrSpace = @enumFromInt(19); + pub const constant_buffer_12: AddrSpace = @enumFromInt(20); + pub const constant_buffer_13: AddrSpace = @enumFromInt(21); + pub const constant_buffer_14: AddrSpace = @enumFromInt(22); + pub const constant_buffer_15: AddrSpace = @enumFromInt(23); + }; + + // See llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.h + pub const wasm = struct { + pub const variable: AddrSpace = @enumFromInt(1); + pub const externref: AddrSpace = @enumFromInt(10); + pub const funcref: AddrSpace = @enumFromInt(20); + }; + pub fn format( self: AddrSpace, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - if (self == .default) return; - try writer.print("addrspace({d}) ", .{@intFromEnum(self)}); + if (self != .default) try writer.print(" addrspace({d})", .{@intFromEnum(self)}); } }; @@ -341,8 +517,8 @@ pub const ExternallyInitialized = enum { writer: anytype, ) @TypeOf(writer).Error!void { if (self == .default) return; - try writer.writeAll(@tagName(self)); try writer.writeByte(' '); + try writer.writeAll(@tagName(self)); } }; @@ -399,6 +575,7 @@ pub const Global = struct { } pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value { + assert(builder.useLibLlvm()); return builder.llvm_globals.items[@intFromEnum(self)]; } @@ -517,7 +694,7 @@ pub fn init(self: *Builder) Allocator.Error!void { { const static_len = @typeInfo(Type).Enum.fields.len - 1; try self.type_map.ensureTotalCapacity(self.gpa, static_len); - try self.type_data.ensureTotalCapacity(self.gpa, static_len); + try self.type_items.ensureTotalCapacity(self.gpa, static_len); if (self.useLibLlvm()) try self.llvm_types.ensureTotalCapacity(self.gpa, static_len); inline for (@typeInfo(Type.Simple).Enum.fields) |simple_field| { const result = self.typeNoExtraAssumeCapacity(.{ @@ -532,7 +709,7 @@ pub fn init(self: *Builder) Allocator.Error!void { inline for (.{ 1, 8, 16, 29, 32, 64, 80, 128 }) |bits| assert(self.intTypeAssumeCapacity(bits) == @field(Type, std.fmt.comptimePrint("i{d}", .{bits}))); inline for (.{0}) |addr_space| - assert(self.pointerTypeAssumeCapacity(@enumFromInt(addr_space)) == .ptr); + assert(self.ptrTypeAssumeCapacity(@enumFromInt(addr_space)) == .ptr); } } @@ -547,7 +724,7 @@ pub fn deinit(self: *Builder) void { self.types.deinit(self.gpa); self.next_unique_type_id.deinit(self.gpa); self.type_map.deinit(self.gpa); - self.type_data.deinit(self.gpa); + self.type_items.deinit(self.gpa); self.type_extra.deinit(self.gpa); self.globals.deinit(self.gpa); @@ -600,25 +777,95 @@ pub fn fmtAssumeCapacity(self: *Builder, comptime fmt_str: []const u8, fmt_args: return String.fromIndex(gop.index); } +pub fn fnType( + self: *Builder, + ret: Type, + params: []const Type, + kind: Type.Function.Kind, +) Allocator.Error!Type { + try self.ensureUnusedCapacityTypes(1, Type.Function, params.len); + return switch (kind) { + inline else => |comptime_kind| self.fnTypeAssumeCapacity(ret, params, comptime_kind), + }; +} + +pub fn intType(self: *Builder, bits: u24) Allocator.Error!Type { + try self.ensureUnusedCapacityTypes(1, null, 0); + return self.intTypeAssumeCapacity(bits); +} + +pub fn ptrType(self: *Builder, addr_space: AddrSpace) Allocator.Error!Type { + try self.ensureUnusedCapacityTypes(1, null, 0); + return self.ptrTypeAssumeCapacity(addr_space); +} + +pub fn vectorType( + self: *Builder, + kind: Type.Vector.Kind, + len: u32, + child: Type, +) Allocator.Error!Type { + try self.ensureUnusedCapacityTypes(1, Type.Vector, 0); + return switch (kind) { + inline else => |comptime_kind| self.vectorTypeAssumeCapacity(comptime_kind, len, child), + }; +} + +pub fn arrayType(self: *Builder, len: u64, child: Type) Allocator.Error!Type { + comptime assert(@sizeOf(Type.Array) >= @sizeOf(Type.Vector)); + try self.ensureUnusedCapacityTypes(1, Type.Array, 0); + return self.arrayTypeAssumeCapacity(len, child); +} + +pub fn structType( + self: *Builder, + kind: Type.Structure.Kind, + fields: []const Type, +) Allocator.Error!Type { + try self.ensureUnusedCapacityTypes(1, Type.Structure, fields.len); + return switch (kind) { + inline else => |comptime_kind| self.structTypeAssumeCapacity(comptime_kind, fields), + }; +} + pub fn opaqueType(self: *Builder, name: String) Allocator.Error!Type { try self.string_map.ensureUnusedCapacity(self.gpa, 1); - try self.string_bytes.ensureUnusedCapacity(self.gpa, name.toSlice(self).?.len + + if (name.toSlice(self)) |id| try self.string_bytes.ensureUnusedCapacity(self.gpa, id.len + comptime std.fmt.count("{d}" ++ .{0}, .{std.math.maxInt(u32)})); try self.string_indices.ensureUnusedCapacity(self.gpa, 1); try self.types.ensureUnusedCapacity(self.gpa, 1); try self.next_unique_type_id.ensureUnusedCapacity(self.gpa, 1); - try self.ensureUnusedCapacityTypes(1, Type.NamedStructure); + try self.ensureUnusedCapacityTypes(1, Type.NamedStructure, 0); return self.opaqueTypeAssumeCapacity(name); } -pub fn intType(self: *Builder, bits: u24) Allocator.Error!Type { - try self.ensureUnusedCapacityTypes(1, null); - return self.intTypeAssumeCapacity(bits); -} - -pub fn pointerType(self: *Builder, addr_space: AddrSpace) Allocator.Error!Type { - try self.ensureUnusedCapacityTypes(1, null); - return self.pointerTypeAssumeCapacity(addr_space); +pub fn namedTypeSetBody( + self: *Builder, + named_type: Type, + body_type: Type, +) if (build_options.have_llvm) Allocator.Error!void else void { + const named_item = self.type_items.items[@intFromEnum(named_type)]; + self.type_extra.items[named_item.data + std.meta.fieldIndex(Type.NamedStructure, "body").?] = + @intFromEnum(body_type); + if (self.useLibLlvm()) { + const body_item = self.type_items.items[@intFromEnum(body_type)]; + const body_extra = self.typeExtraDataTrail(Type.Structure, body_item.data); + const body_fields: []const Type = + @ptrCast(self.type_extra.items[body_extra.end..][0..body_extra.data.fields_len]); + const llvm_fields = try self.gpa.alloc(*llvm.Type, body_fields.len); + defer self.gpa.free(llvm_fields); + for (llvm_fields, body_fields) |*llvm_field, body_field| + llvm_field.* = self.llvm_types.items[@intFromEnum(body_field)]; + self.llvm_types.items[@intFromEnum(named_type)].structSetBody( + llvm_fields.ptr, + @intCast(llvm_fields.len), + switch (body_item.tag) { + .structure => .False, + .packed_structure => .True, + else => unreachable, + }, + ); + } } pub fn addGlobal(self: *Builder, name: String, global: Global) Allocator.Error!Global.Index { @@ -667,6 +914,7 @@ fn addTypeExtraAssumeCapacity(self: *Builder, extra: anytype) Type.ExtraIndex { inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { const value = @field(extra, field.name); self.type_extra.appendAssumeCapacity(switch (field.type) { + u32 => value, String, Type => @intFromEnum(value), else => @compileError("bad field type: " ++ @typeName(field.type)), }); @@ -683,6 +931,7 @@ fn typeExtraDataTrail( const fields = @typeInfo(T).Struct.fields; inline for (fields, self.type_extra.items[index..][0..fields.len]) |field, data| @field(result, field.name) = switch (field.type) { + u32 => data, String, Type => @enumFromInt(data), else => @compileError("bad field type: " ++ @typeName(field.type)), }; @@ -693,14 +942,254 @@ fn typeExtraData(self: *const Builder, comptime T: type, index: Type.ExtraIndex) return self.typeExtraDataTrail(T, index).data; } +fn fnTypeAssumeCapacity( + self: *Builder, + ret: Type, + params: []const Type, + comptime kind: Type.Function.Kind, +) if (build_options.have_llvm) Allocator.Error!Type else Type { + const tag: Type.Tag = switch (kind) { + .normal => .function, + .vararg => .vararg_function, + }; + const Key = struct { ret: Type, params: []const Type }; + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: Key) u32 { + var hasher = std.hash.Wyhash.init(comptime std.hash.uint32(@intFromEnum(tag))); + hasher.update(std.mem.asBytes(&key.ret)); + hasher.update(std.mem.sliceAsBytes(key.params)); + return @truncate(hasher.final()); + } + pub fn eql(ctx: @This(), lhs: Key, _: void, rhs_index: usize) bool { + const rhs_data = ctx.builder.type_items.items[rhs_index]; + const rhs_extra = ctx.builder.typeExtraDataTrail(Type.Function, rhs_data.data); + const rhs_params: []const Type = + @ptrCast(ctx.builder.type_extra.items[rhs_extra.end..][0..rhs_extra.data.params_len]); + return rhs_data.tag == tag and lhs.ret == rhs_extra.data.ret and + std.mem.eql(Type, lhs.params, rhs_params); + } + }; + const data = Key{ .ret = ret, .params = params }; + const gop = self.type_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.type_items.appendAssumeCapacity(.{ + .tag = .function, + .data = self.addTypeExtraAssumeCapacity(Type.Function{ + .ret = ret, + .params_len = @intCast(params.len), + }), + }); + self.type_extra.appendSliceAssumeCapacity(@ptrCast(params)); + if (self.useLibLlvm()) { + const llvm_params = try self.gpa.alloc(*llvm.Type, params.len); + defer self.gpa.free(llvm_params); + for (llvm_params, params) |*llvm_param, param| llvm_param.* = param.toLlvm(self); + self.llvm_types.appendAssumeCapacity(llvm.functionType( + ret.toLlvm(self), + llvm_params.ptr, + @intCast(llvm_params.len), + switch (kind) { + .normal => .False, + .vararg => .True, + }, + )); + } + } + return @enumFromInt(gop.index); +} + +fn intTypeAssumeCapacity(self: *Builder, bits: u24) Type { + const result = self.typeNoExtraAssumeCapacity(.{ .tag = .integer, .data = bits }); + if (self.useLibLlvm() and result.new) + self.llvm_types.appendAssumeCapacity(self.llvm_context.intType(bits)); + return result.type; +} + +fn ptrTypeAssumeCapacity(self: *Builder, addr_space: AddrSpace) Type { + const result = self.typeNoExtraAssumeCapacity(.{ + .tag = .pointer, + .data = @intFromEnum(addr_space), + }); + if (self.useLibLlvm() and result.new) + self.llvm_types.appendAssumeCapacity(self.llvm_context.pointerType(@intFromEnum(addr_space))); + return result.type; +} + +fn vectorTypeAssumeCapacity( + self: *Builder, + comptime kind: Type.Vector.Kind, + len: u32, + child: Type, +) Type { + const tag: Type.Tag = switch (kind) { + .normal => .vector, + .scalable => .scalable_vector, + }; + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: Type.Vector) u32 { + return @truncate(std.hash.Wyhash.hash( + comptime std.hash.uint32(@intFromEnum(tag)), + std.mem.asBytes(&key), + )); + } + pub fn eql(ctx: @This(), lhs: Type.Vector, _: void, rhs_index: usize) bool { + const rhs_data = ctx.builder.type_items.items[rhs_index]; + return rhs_data.tag == tag and + std.meta.eql(lhs, ctx.builder.typeExtraData(Type.Vector, rhs_data.data)); + } + }; + const data = Type.Vector{ .len = len, .child = child }; + const gop = self.type_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.type_items.appendAssumeCapacity(.{ + .tag = tag, + .data = self.addTypeExtraAssumeCapacity(data), + }); + if (self.useLibLlvm()) self.llvm_types.appendAssumeCapacity(switch (kind) { + .normal => &llvm.Type.vectorType, + .scalable => &llvm.Type.scalableVectorType, + }(child.toLlvm(self), @intCast(len))); + } + return @enumFromInt(gop.index); +} + +fn arrayTypeAssumeCapacity(self: *Builder, len: u64, child: Type) Type { + if (std.math.cast(u32, len)) |small_len| { + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: Type.Vector) u32 { + return @truncate(std.hash.Wyhash.hash( + comptime std.hash.uint32(@intFromEnum(Type.Tag.small_array)), + std.mem.asBytes(&key), + )); + } + pub fn eql(ctx: @This(), lhs: Type.Vector, _: void, rhs_index: usize) bool { + const rhs_data = ctx.builder.type_items.items[rhs_index]; + return rhs_data.tag == .small_array and + std.meta.eql(lhs, ctx.builder.typeExtraData(Type.Vector, rhs_data.data)); + } + }; + const data = Type.Vector{ .len = small_len, .child = child }; + const gop = self.type_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.type_items.appendAssumeCapacity(.{ + .tag = .small_array, + .data = self.addTypeExtraAssumeCapacity(data), + }); + if (self.useLibLlvm()) self.llvm_types.appendAssumeCapacity( + child.toLlvm(self).arrayType(@intCast(len)), + ); + } + return @enumFromInt(gop.index); + } else { + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: Type.Array) u32 { + return @truncate(std.hash.Wyhash.hash( + comptime std.hash.uint32(@intFromEnum(Type.Tag.array)), + std.mem.asBytes(&key), + )); + } + pub fn eql(ctx: @This(), lhs: Type.Array, _: void, rhs_index: usize) bool { + const rhs_data = ctx.builder.type_items.items[rhs_index]; + return rhs_data.tag == .array and + std.meta.eql(lhs, ctx.builder.typeExtraData(Type.Array, rhs_data.data)); + } + }; + const data = Type.Array{ + .len_lo = @truncate(len), + .len_hi = @intCast(len >> 32), + .child = child, + }; + const gop = self.type_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.type_items.appendAssumeCapacity(.{ + .tag = .array, + .data = self.addTypeExtraAssumeCapacity(data), + }); + if (self.useLibLlvm()) self.llvm_types.appendAssumeCapacity( + child.toLlvm(self).arrayType(@intCast(len)), + ); + } + return @enumFromInt(gop.index); + } +} + +fn structTypeAssumeCapacity( + self: *Builder, + comptime kind: Type.Structure.Kind, + fields: []const Type, +) if (build_options.have_llvm) Allocator.Error!Type else Type { + const tag: Type.Tag = switch (kind) { + .normal => .structure, + .@"packed" => .packed_structure, + }; + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: []const Type) u32 { + return @truncate(std.hash.Wyhash.hash( + comptime std.hash.uint32(@intFromEnum(tag)), + std.mem.sliceAsBytes(key), + )); + } + pub fn eql(ctx: @This(), lhs: []const Type, _: void, rhs_index: usize) bool { + const rhs_data = ctx.builder.type_items.items[rhs_index]; + const rhs_extra = ctx.builder.typeExtraDataTrail(Type.Structure, rhs_data.data); + const rhs_fields: []const Type = + @ptrCast(ctx.builder.type_extra.items[rhs_extra.end..][0..rhs_extra.data.fields_len]); + return rhs_data.tag == tag and std.mem.eql(Type, lhs, rhs_fields); + } + }; + const gop = self.type_map.getOrPutAssumeCapacityAdapted(fields, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.type_items.appendAssumeCapacity(.{ + .tag = tag, + .data = self.addTypeExtraAssumeCapacity(Type.Structure{ + .fields_len = @intCast(fields.len), + }), + }); + self.type_extra.appendSliceAssumeCapacity(@ptrCast(fields)); + if (self.useLibLlvm()) { + const llvm_fields = try self.gpa.alloc(*llvm.Type, fields.len); + defer self.gpa.free(llvm_fields); + for (llvm_fields, fields) |*llvm_field, field| + llvm_field.* = self.llvm_types.items[@intFromEnum(field)]; + self.llvm_types.appendAssumeCapacity(self.llvm_context.structType( + llvm_fields.ptr, + @intCast(llvm_fields.len), + switch (kind) { + .normal => .False, + .@"packed" => .True, + }, + )); + } + } + return @enumFromInt(gop.index); +} + fn opaqueTypeAssumeCapacity(self: *Builder, name: String) Type { const Adapter = struct { builder: *const Builder, pub fn hash(_: @This(), key: String) u32 { - return std.hash.uint32(@intFromEnum(key)); + return @truncate(std.hash.Wyhash.hash( + comptime std.hash.uint32(@intFromEnum(Type.Tag.named_structure)), + std.mem.asBytes(&key), + )); } pub fn eql(ctx: @This(), lhs: String, _: void, rhs_index: usize) bool { - const rhs_data = ctx.builder.type_data.items[rhs_index]; + const rhs_data = ctx.builder.type_items.items[rhs_index]; return rhs_data.tag == .named_structure and lhs == ctx.builder.typeExtraData(Type.NamedStructure, rhs_data.data).id; } @@ -718,11 +1207,11 @@ fn opaqueTypeAssumeCapacity(self: *Builder, name: String) Type { assert(!gop.found_existing); gop.key_ptr.* = {}; gop.value_ptr.* = {}; - self.type_data.appendAssumeCapacity(.{ + self.type_items.appendAssumeCapacity(.{ .tag = .named_structure, .data = self.addTypeExtraAssumeCapacity(Type.NamedStructure{ .id = id, - .child = .none, + .body = .none, }), }); const result: Type = @enumFromInt(gop.index); @@ -740,53 +1229,49 @@ fn opaqueTypeAssumeCapacity(self: *Builder, name: String) Type { } } -fn intTypeAssumeCapacity(self: *Builder, bits: u24) Type { - const result = self.typeNoExtraAssumeCapacity(.{ .tag = .integer, .data = bits }); - if (self.useLibLlvm() and result.new) - self.llvm_types.appendAssumeCapacity(self.llvm_context.intType(bits)); - return result.type; -} - -fn pointerTypeAssumeCapacity(self: *Builder, addr_space: AddrSpace) Type { - const result = self.typeNoExtraAssumeCapacity(.{ .tag = .pointer, .data = @intFromEnum(addr_space) }); - if (self.useLibLlvm() and result.new) - self.llvm_types.appendAssumeCapacity(self.llvm_context.pointerType(@intFromEnum(addr_space))); - return result.type; -} - -fn ensureUnusedCapacityTypes(self: *Builder, count: usize, comptime Extra: ?type) Allocator.Error!void { +fn ensureUnusedCapacityTypes( + self: *Builder, + count: usize, + comptime Extra: ?type, + trail_len: usize, +) Allocator.Error!void { try self.type_map.ensureUnusedCapacity(self.gpa, count); - try self.type_data.ensureUnusedCapacity(self.gpa, count); - if (Extra) |E| - try self.type_extra.ensureUnusedCapacity(self.gpa, count * @typeInfo(E).Struct.fields.len); + try self.type_items.ensureUnusedCapacity(self.gpa, count); + if (Extra) |E| try self.type_extra.ensureUnusedCapacity( + self.gpa, + count * (@typeInfo(E).Struct.fields.len + trail_len), + ); if (self.useLibLlvm()) try self.llvm_types.ensureUnusedCapacity(self.gpa, count); } -fn typeNoExtraAssumeCapacity(self: *Builder, data: Type.Data) struct { new: bool, type: Type } { +fn typeNoExtraAssumeCapacity(self: *Builder, item: Type.Item) struct { new: bool, type: Type } { const Adapter = struct { builder: *const Builder, - pub fn hash(_: @This(), key: Type.Data) u32 { - return std.hash.uint32(@bitCast(key)); + pub fn hash(_: @This(), key: Type.Item) u32 { + return @truncate(std.hash.Wyhash.hash( + comptime std.hash.uint32(@intFromEnum(Type.Tag.simple)), + std.mem.asBytes(&key), + )); } - pub fn eql(ctx: @This(), lhs: Type.Data, _: void, rhs_index: usize) bool { + pub fn eql(ctx: @This(), lhs: Type.Item, _: void, rhs_index: usize) bool { const lhs_bits: u32 = @bitCast(lhs); - const rhs_bits: u32 = @bitCast(ctx.builder.type_data.items[rhs_index]); + const rhs_bits: u32 = @bitCast(ctx.builder.type_items.items[rhs_index]); return lhs_bits == rhs_bits; } }; - const gop = self.type_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); + const gop = self.type_map.getOrPutAssumeCapacityAdapted(item, Adapter{ .builder = self }); if (!gop.found_existing) { gop.key_ptr.* = {}; gop.value_ptr.* = {}; - self.type_data.appendAssumeCapacity(data); + self.type_items.appendAssumeCapacity(item); } return .{ .new = !gop.found_existing, .type = @enumFromInt(gop.index) }; } fn isValidIdentifier(id: []const u8) bool { - for (id, 0..) |c, i| switch (c) { + for (id, 0..) |character, index| switch (character) { '$', '-', '.', 'A'...'Z', '_', 'a'...'z' => {}, - '0'...'9' => if (i == 0) return false, + '0'...'9' => if (index == 0) return false, else => return false, }; return true; @@ -815,7 +1300,7 @@ pub fn dump(self: *Builder, writer: anytype) @TypeOf(writer).Error!void { for (self.objects.items) |object| { const global = self.globals.entries.get(@intFromEnum(object.global)); try writer.print( - \\@{} = {}{}{}{}{}{}{}{}{s} {}{,} + \\@{} ={}{}{}{}{}{}{}{} {s} {%}{,} \\ , .{ global.key.fmt(self), @@ -836,8 +1321,8 @@ pub fn dump(self: *Builder, writer: anytype) @TypeOf(writer).Error!void { for (self.functions.items) |function| { const global = self.globals.entries.get(@intFromEnum(function.global)); try writer.print( - \\{s} {}{}{}{}void @{}() {}{}{{ - \\ ret void + \\{s} {}{}{}{}{<}@{}{>} {}{}{{ + \\ ret {%} \\}} \\ , .{ @@ -846,9 +1331,15 @@ pub fn dump(self: *Builder, writer: anytype) @TypeOf(writer).Error!void { global.value.preemption, global.value.visibility, global.value.dll_storage_class, + global.value.type.fmt(self), global.key.fmt(self), + global.value.type.fmt(self), global.value.unnamed_addr, global.value.alignment, + self.typeExtraData( + Type.Function, + self.type_items.items[@intFromEnum(global.value.type)].data, + ).ret.fmt(self), }); } try writer.writeByte('\n'); diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index c89f9ee2d5..5d04ec930b 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -317,6 +317,9 @@ pub const Type = opaque { pub const vectorType = LLVMVectorType; extern fn LLVMVectorType(ElementType: *Type, ElementCount: c_uint) *Type; + pub const scalableVectorType = LLVMScalableVectorType; + extern fn LLVMScalableVectorType(ElementType: *Type, ElementCount: c_uint) *Type; + pub const structSetBody = LLVMStructSetBody; extern fn LLVMStructSetBody( StructTy: *Type, @@ -1615,9 +1618,9 @@ pub const address_space = struct { // See llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.h pub const wasm = struct { - pub const variable = 1; - pub const externref = 10; - pub const funcref = 20; + pub const variable: c_uint = 1; + pub const externref: c_uint = 10; + pub const funcref: c_uint = 20; }; }; -- cgit v1.2.3 From 65fd401c063ed5214fd6d38b04278571df24f962 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 8 Jul 2023 07:02:53 -0400 Subject: llvm: remove more usages of `llvm.Type` --- src/codegen/llvm.zig | 808 +++++++++++++++++++++---------------------- src/codegen/llvm/Builder.zig | 40 ++- 2 files changed, 415 insertions(+), 433 deletions(-) (limited to 'src') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index c74f6021db..e7aaa05864 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -569,7 +569,7 @@ pub const Object = struct { /// Therefore, this table keeps track of the mapping. decl_map: std.AutoHashMapUnmanaged(Module.Decl.Index, Builder.Global.Index), /// Serves the same purpose as `decl_map` but only used for the `is_named_enum_value` instruction. - named_enum_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *llvm.Value), + named_enum_map: std.AutoHashMapUnmanaged(Module.Decl.Index, Builder.Function.Index), /// Maps Zig types to LLVM types. The table memory is backed by the GPA of /// the compiler. /// TODO when InternPool garbage collection is implemented, this map needs @@ -1210,7 +1210,7 @@ pub const Object = struct { if (isByRef(param_ty, mod)) { const alignment = param_ty.abiAlignment(mod); const param_llvm_ty = param.typeOf(); - const arg_ptr = buildAllocaInner(o.context, builder, llvm_func, false, param_llvm_ty, alignment, target); + const arg_ptr = try o.buildAllocaInner(builder, llvm_func, false, param_llvm_ty, alignment, target); const store_inst = builder.buildStore(param, arg_ptr); store_inst.setAlignment(alignment); args.appendAssumeCapacity(arg_ptr); @@ -1267,12 +1267,12 @@ pub const Object = struct { const param_llvm_ty = (try o.lowerType(param_ty)).toLlvm(&o.builder); const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod))); - const int_llvm_ty = o.context.intType(abi_size * 8); + const int_llvm_ty = (try o.builder.intType(@intCast(abi_size * 8))).toLlvm(&o.builder); const alignment = @max( param_ty.abiAlignment(mod), o.target_data.abiAlignmentOfType(int_llvm_ty), ); - const arg_ptr = buildAllocaInner(o.context, builder, llvm_func, false, param_llvm_ty, alignment, target); + const arg_ptr = try o.buildAllocaInner(builder, llvm_func, false, param_llvm_ty, alignment, target); const store_inst = builder.buildStore(param, arg_ptr); store_inst.setAlignment(alignment); @@ -1317,13 +1317,13 @@ pub const Object = struct { }, .multiple_llvm_types => { assert(!it.byval_attr); - const field_types = it.llvm_types_buffer[0..it.types_len]; + const field_types = it.types_buffer[0..it.types_len]; const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); const param_llvm_ty = (try o.lowerType(param_ty)).toLlvm(&o.builder); const param_alignment = param_ty.abiAlignment(mod); - const arg_ptr = buildAllocaInner(o.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target); - const llvm_ty = o.context.structType(field_types.ptr, @as(c_uint, @intCast(field_types.len)), .False); - for (field_types, 0..) |_, field_i_usize| { + const arg_ptr = try o.buildAllocaInner(builder, llvm_func, false, param_llvm_ty, param_alignment, target); + const llvm_ty = (try o.builder.structType(.normal, field_types)).toLlvm(&o.builder); + for (0..field_types.len) |field_i_usize| { const field_i = @as(c_uint, @intCast(field_i_usize)); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; @@ -1344,7 +1344,7 @@ pub const Object = struct { assert(!it.byval_attr); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; - const casted = builder.buildBitCast(param, o.context.halfType(), ""); + const casted = builder.buildBitCast(param, Builder.Type.half.toLlvm(&o.builder), ""); try args.ensureUnusedCapacity(1); args.appendAssumeCapacity(casted); }, @@ -1355,7 +1355,7 @@ pub const Object = struct { llvm_arg_i += 1; const alignment = param_ty.abiAlignment(mod); - const arg_ptr = buildAllocaInner(o.context, builder, llvm_func, false, param_llvm_ty, alignment, target); + const arg_ptr = try o.buildAllocaInner(builder, llvm_func, false, param_llvm_ty, alignment, target); _ = builder.buildStore(param, arg_ptr); if (isByRef(param_ty, mod)) { @@ -1373,7 +1373,7 @@ pub const Object = struct { llvm_arg_i += 1; const alignment = param_ty.abiAlignment(mod); - const arg_ptr = buildAllocaInner(o.context, builder, llvm_func, false, param_llvm_ty, alignment, target); + const arg_ptr = try o.buildAllocaInner(builder, llvm_func, false, param_llvm_ty, alignment, target); _ = builder.buildStore(param, arg_ptr); if (isByRef(param_ty, mod)) { @@ -3563,7 +3563,7 @@ pub const Object = struct { const bigint = int_val.toBigInt(&bigint_space, mod); const int_info = tv.ty.intInfo(mod); - const llvm_type = o.context.intType(int_info.bits); + const llvm_type = (try o.builder.intType(@intCast(int_info.bits))).toLlvm(&o.builder); const unsigned_val = v: { if (bigint.limbs.len == 1) { @@ -3587,26 +3587,26 @@ pub const Object = struct { switch (tv.ty.floatBits(target)) { 16 => { const repr = @as(u16, @bitCast(tv.val.toFloat(f16, mod))); - const llvm_i16 = o.context.intType(16); + const llvm_i16 = Builder.Type.i16.toLlvm(&o.builder); const int = llvm_i16.constInt(repr, .False); return int.constBitCast(llvm_ty); }, 32 => { const repr = @as(u32, @bitCast(tv.val.toFloat(f32, mod))); - const llvm_i32 = o.context.intType(32); + const llvm_i32 = Builder.Type.i32.toLlvm(&o.builder); const int = llvm_i32.constInt(repr, .False); return int.constBitCast(llvm_ty); }, 64 => { const repr = @as(u64, @bitCast(tv.val.toFloat(f64, mod))); - const llvm_i64 = o.context.intType(64); + const llvm_i64 = Builder.Type.i64.toLlvm(&o.builder); const int = llvm_i64.constInt(repr, .False); return int.constBitCast(llvm_ty); }, 80 => { const float = tv.val.toFloat(f80, mod); const repr = std.math.break_f80(float); - const llvm_i80 = o.context.intType(80); + const llvm_i80 = Builder.Type.i80.toLlvm(&o.builder); var x = llvm_i80.constInt(repr.exp, .False); x = x.constShl(llvm_i80.constInt(64, .False)); x = x.constOr(llvm_i80.constInt(repr.fraction, .False)); @@ -3623,7 +3623,7 @@ pub const Object = struct { if (native_endian == .Big) { std.mem.swap(u64, &buf[0], &buf[1]); } - const int = o.context.intType(128).constIntOfArbitraryPrecision(buf.len, &buf); + const int = Builder.Type.i128.toLlvm(&o.builder).constIntOfArbitraryPrecision(buf.len, &buf); return int.constBitCast(llvm_ty); }, else => unreachable, @@ -3660,7 +3660,7 @@ pub const Object = struct { comptime assert(optional_layout_version == 3); const payload_ty = tv.ty.optionalChild(mod); - const llvm_i8 = o.context.intType(8); + const llvm_i8 = Builder.Type.i8.toLlvm(&o.builder); const non_null_bit = switch (opt.val) { .none => llvm_i8.constNull(), else => llvm_i8.constInt(1, .False), @@ -3761,7 +3761,7 @@ pub const Object = struct { const elem_ty = vector_type.child.toType(); const llvm_elems = try gpa.alloc(*llvm.Value, vector_type.len); defer gpa.free(llvm_elems); - const llvm_i8 = o.context.intType(8); + const llvm_i8 = Builder.Type.i8.toLlvm(&o.builder); for (llvm_elems, 0..) |*llvm_elem, i| { llvm_elem.* = switch (aggregate.storage) { .bytes => |bytes| llvm_i8.constInt(bytes[i], .False), @@ -3802,7 +3802,7 @@ pub const Object = struct { const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); + const llvm_array_ty = Builder.Type.i8.toLlvm(&o.builder).arrayType(@as(c_uint, @intCast(padding_len))); // TODO make this and all other padding elsewhere in debug // builds be 0xaa not undef. llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); @@ -3824,7 +3824,7 @@ pub const Object = struct { offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); + const llvm_array_ty = Builder.Type.i8.toLlvm(&o.builder).arrayType(@as(c_uint, @intCast(padding_len))); llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); } } @@ -3850,7 +3850,7 @@ pub const Object = struct { if (struct_obj.layout == .Packed) { assert(struct_obj.haveLayout()); const big_bits = struct_obj.backing_int_ty.bitSize(mod); - const int_llvm_ty = o.context.intType(@as(c_uint, @intCast(big_bits))); + const int_llvm_ty = (try o.builder.intType(@intCast(big_bits))).toLlvm(&o.builder); const fields = struct_obj.fields.values(); comptime assert(Type.packed_struct_layout_version == 2); var running_int: *llvm.Value = int_llvm_ty.constNull(); @@ -3863,7 +3863,7 @@ pub const Object = struct { .val = try tv.val.fieldValue(mod, i), }); const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod))); - const small_int_ty = o.context.intType(ty_bit_size); + const small_int_ty = (try o.builder.intType(@intCast(ty_bit_size))).toLlvm(&o.builder); const small_int_val = if (field.ty.isPtrAtRuntime(mod)) non_int_val.constPtrToInt(small_int_ty) else @@ -3899,7 +3899,7 @@ pub const Object = struct { const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); + const llvm_array_ty = Builder.Type.i8.toLlvm(&o.builder).arrayType(@as(c_uint, @intCast(padding_len))); // TODO make this and all other padding elsewhere in debug // builds be 0xaa not undef. llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); @@ -3921,7 +3921,7 @@ pub const Object = struct { offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); + const llvm_array_ty = Builder.Type.i8.toLlvm(&o.builder).arrayType(@as(c_uint, @intCast(padding_len))); llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); } } @@ -3969,7 +3969,7 @@ pub const Object = struct { return llvm_union_ty.constNull(); const non_int_val = try lowerValue(o, .{ .ty = field_ty, .val = tag_and_val.val }); const ty_bit_size = @as(u16, @intCast(field_ty.bitSize(mod))); - const small_int_ty = o.context.intType(ty_bit_size); + const small_int_ty = (try o.builder.intType(@intCast(ty_bit_size))).toLlvm(&o.builder); const small_int_val = if (field_ty.isPtrAtRuntime(mod)) non_int_val.constPtrToInt(small_int_ty) else @@ -3985,7 +3985,7 @@ pub const Object = struct { const payload = p: { if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { const padding_len = @as(c_uint, @intCast(layout.payload_size)); - break :p o.context.intType(8).arrayType(padding_len).getUndef(); + break :p Builder.Type.i8.toLlvm(&o.builder).arrayType(padding_len).getUndef(); } const field = try lowerValue(o, .{ .ty = field_ty, .val = tag_and_val.val }); need_unnamed = need_unnamed or o.isUnnamedType(field_ty, field); @@ -3995,7 +3995,7 @@ pub const Object = struct { } const padding_len = @as(c_uint, @intCast(layout.payload_size - field_size)); const fields: [2]*llvm.Value = .{ - field, o.context.intType(8).arrayType(padding_len).getUndef(), + field, Builder.Type.i8.toLlvm(&o.builder).arrayType(padding_len).getUndef(), }; break :p o.context.constStruct(&fields, fields.len, .True); }; @@ -4020,7 +4020,7 @@ pub const Object = struct { fields = .{ payload, llvm_tag_value, undefined }; } if (layout.padding != 0) { - fields[2] = o.context.intType(8).arrayType(layout.padding).getUndef(); + fields[2] = Builder.Type.i8.toLlvm(&o.builder).arrayType(layout.padding).getUndef(); fields_len = 3; } if (need_unnamed) { @@ -4033,25 +4033,25 @@ pub const Object = struct { } } - fn lowerIntAsPtr(o: *Object, val: Value) Error!*llvm.Value { + fn lowerIntAsPtr(o: *Object, val: Value) Allocator.Error!*llvm.Value { const mod = o.module; switch (mod.intern_pool.indexToKey(val.toIntern())) { .undef => return o.context.pointerType(0).getUndef(), .int => { var bigint_space: Value.BigIntSpace = undefined; const bigint = val.toBigInt(&bigint_space, mod); - const llvm_int = lowerBigInt(o, Type.usize, bigint); + const llvm_int = try lowerBigInt(o, Type.usize, bigint); return llvm_int.constIntToPtr(o.context.pointerType(0)); }, else => unreachable, } } - fn lowerBigInt(o: *Object, ty: Type, bigint: std.math.big.int.Const) *llvm.Value { + fn lowerBigInt(o: *Object, ty: Type, bigint: std.math.big.int.Const) Allocator.Error!*llvm.Value { const mod = o.module; const int_info = ty.intInfo(mod); assert(int_info.bits != 0); - const llvm_type = o.context.intType(int_info.bits); + const llvm_type = (try o.builder.intType(@intCast(int_info.bits))).toLlvm(&o.builder); const unsigned_val = v: { if (bigint.limbs.len == 1) { @@ -4090,7 +4090,6 @@ pub const Object = struct { fn lowerParentPtr(o: *Object, ptr_val: Value, byte_aligned: bool) Error!*llvm.Value { const mod = o.module; - const target = mod.getTarget(); return switch (mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr) { .decl => |decl| o.lowerParentPtrDecl(ptr_val, decl), .mut_decl => |mut_decl| o.lowerParentPtrDecl(ptr_val, mut_decl.decl), @@ -4107,7 +4106,7 @@ pub const Object = struct { } const payload_offset: u8 = if (payload_ty.abiAlignment(mod) > Type.anyerror.abiSize(mod)) 2 else 1; - const llvm_u32 = o.context.intType(32); + const llvm_u32 = Builder.Type.i32.toLlvm(&o.builder); const indices: [2]*llvm.Value = .{ llvm_u32.constInt(0, .False), llvm_u32.constInt(payload_offset, .False), @@ -4128,7 +4127,7 @@ pub const Object = struct { return parent_llvm_ptr; } - const llvm_u32 = o.context.intType(32); + const llvm_u32 = Builder.Type.i32.toLlvm(&o.builder); const indices: [2]*llvm.Value = .{ llvm_u32.constInt(0, .False), llvm_u32.constInt(0, .False), @@ -4153,7 +4152,7 @@ pub const Object = struct { const parent_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); const field_index = @as(u32, @intCast(field_ptr.index)); - const llvm_u32 = o.context.intType(32); + const llvm_u32 = Builder.Type.i32.toLlvm(&o.builder); switch (parent_ty.zigTypeTag(mod)) { .Union => { if (parent_ty.containerLayout(mod) == .Packed) { @@ -4180,7 +4179,7 @@ pub const Object = struct { .Struct => { if (parent_ty.containerLayout(mod) == .Packed) { if (!byte_aligned) return parent_llvm_ptr; - const llvm_usize = o.context.intType(target.ptrBitWidth()); + const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize); // count bits of fields before this one const prev_bits = b: { @@ -4438,6 +4437,51 @@ pub const Object = struct { llvm_fn.addByValAttr(llvm_arg_i, param_llvm_ty.toLlvm(&o.builder)); } } + + fn buildAllocaInner( + o: *Object, + builder: *llvm.Builder, + llvm_func: *llvm.Value, + di_scope_non_null: bool, + llvm_ty: *llvm.Type, + maybe_alignment: ?c_uint, + target: std.Target, + ) Allocator.Error!*llvm.Value { + const address_space = llvmAllocaAddressSpace(target); + + const alloca = blk: { + const prev_block = builder.getInsertBlock(); + const prev_debug_location = builder.getCurrentDebugLocation2(); + defer { + builder.positionBuilderAtEnd(prev_block); + if (di_scope_non_null) { + builder.setCurrentDebugLocation2(prev_debug_location); + } + } + + const entry_block = llvm_func.getFirstBasicBlock().?; + if (entry_block.getFirstInstruction()) |first_inst| { + builder.positionBuilder(entry_block, first_inst); + } else { + builder.positionBuilderAtEnd(entry_block); + } + builder.clearCurrentDebugLocation(); + + break :blk builder.buildAllocaInAddressSpace(llvm_ty, @intFromEnum(address_space), ""); + }; + + if (maybe_alignment) |alignment| { + alloca.setAlignment(alignment); + } + + // The pointer returned from this function should have the generic address space, + // if this isn't the case then cast it to the generic address space. + if (address_space != .default) { + return builder.buildAddrSpaceCast(alloca, Builder.Type.ptr.toLlvm(&o.builder), ""); + } + + return alloca; + } }; pub const DeclGen = struct { @@ -4934,7 +4978,7 @@ pub const FuncGen = struct { const ret_ptr = if (!sret) null else blk: { const llvm_ret_ty = (try o.lowerType(return_type)).toLlvm(&o.builder); - const ret_ptr = self.buildAlloca(llvm_ret_ty, return_type.abiAlignment(mod)); + const ret_ptr = try self.buildAlloca(llvm_ret_ty, return_type.abiAlignment(mod)); try llvm_args.append(ret_ptr); break :blk ret_ptr; }; @@ -4971,7 +5015,7 @@ pub const FuncGen = struct { } else { const alignment = param_ty.abiAlignment(mod); const param_llvm_ty = llvm_arg.typeOf(); - const arg_ptr = self.buildAlloca(param_llvm_ty, alignment); + const arg_ptr = try self.buildAlloca(param_llvm_ty, alignment); const store_inst = self.builder.buildStore(llvm_arg, arg_ptr); store_inst.setAlignment(alignment); try llvm_args.append(arg_ptr); @@ -4984,7 +5028,7 @@ pub const FuncGen = struct { const alignment = param_ty.abiAlignment(mod); const param_llvm_ty = (try o.lowerType(param_ty)).toLlvm(&o.builder); - const arg_ptr = self.buildAlloca(param_llvm_ty, alignment); + const arg_ptr = try self.buildAlloca(param_llvm_ty, alignment); if (isByRef(param_ty, mod)) { const load_inst = self.builder.buildLoad(param_llvm_ty, llvm_arg, ""); load_inst.setAlignment(alignment); @@ -5003,7 +5047,7 @@ pub const FuncGen = struct { const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod))); - const int_llvm_ty = self.context.intType(abi_size * 8); + const int_llvm_ty = (try o.builder.intType(@intCast(abi_size * 8))).toLlvm(&o.builder); if (isByRef(param_ty, mod)) { const alignment = param_ty.abiAlignment(mod); @@ -5017,7 +5061,7 @@ pub const FuncGen = struct { param_ty.abiAlignment(mod), o.target_data.abiAlignmentOfType(int_llvm_ty), ); - const int_ptr = self.buildAlloca(int_llvm_ty, alignment); + const int_ptr = try self.buildAlloca(int_llvm_ty, alignment); const store_inst = self.builder.buildStore(llvm_arg, int_ptr); store_inst.setAlignment(alignment); const load_inst = self.builder.buildLoad(int_llvm_ty, int_ptr, ""); @@ -5037,22 +5081,22 @@ pub const FuncGen = struct { .multiple_llvm_types => { const arg = args[it.zig_index - 1]; const param_ty = self.typeOf(arg); - const llvm_types = it.llvm_types_buffer[0..it.types_len]; + const llvm_types = it.types_buffer[0..it.types_len]; const llvm_arg = try self.resolveInst(arg); const is_by_ref = isByRef(param_ty, mod); const arg_ptr = if (is_by_ref) llvm_arg else p: { - const p = self.buildAlloca(llvm_arg.typeOf(), null); + const p = try self.buildAlloca(llvm_arg.typeOf(), null); const store_inst = self.builder.buildStore(llvm_arg, p); store_inst.setAlignment(param_ty.abiAlignment(mod)); break :p p; }; - const llvm_ty = self.context.structType(llvm_types.ptr, @as(c_uint, @intCast(llvm_types.len)), .False); + const llvm_ty = (try o.builder.structType(.normal, llvm_types)).toLlvm(&o.builder); try llvm_args.ensureUnusedCapacity(it.types_len); for (llvm_types, 0..) |field_ty, i_usize| { const i = @as(c_uint, @intCast(i_usize)); const field_ptr = self.builder.buildStructGEP(llvm_ty, arg_ptr, i, ""); - const load_inst = self.builder.buildLoad(field_ty, field_ptr, ""); + const load_inst = self.builder.buildLoad(field_ty.toLlvm(&o.builder), field_ptr, ""); load_inst.setAlignment(target.ptrBitWidth() / 8); llvm_args.appendAssumeCapacity(load_inst); } @@ -5060,7 +5104,7 @@ pub const FuncGen = struct { .as_u16 => { const arg = args[it.zig_index - 1]; const llvm_arg = try self.resolveInst(arg); - const casted = self.builder.buildBitCast(llvm_arg, self.context.intType(16), ""); + const casted = self.builder.buildBitCast(llvm_arg, Builder.Type.i16.toLlvm(&o.builder), ""); try llvm_args.append(casted); }, .float_array => |count| { @@ -5068,7 +5112,7 @@ pub const FuncGen = struct { const arg_ty = self.typeOf(arg); var llvm_arg = try self.resolveInst(arg); if (!isByRef(arg_ty, mod)) { - const p = self.buildAlloca(llvm_arg.typeOf(), null); + const p = try self.buildAlloca(llvm_arg.typeOf(), null); const store_inst = self.builder.buildStore(llvm_arg, p); store_inst.setAlignment(arg_ty.abiAlignment(mod)); llvm_arg = store_inst; @@ -5088,13 +5132,13 @@ pub const FuncGen = struct { const arg_ty = self.typeOf(arg); var llvm_arg = try self.resolveInst(arg); if (!isByRef(arg_ty, mod)) { - const p = self.buildAlloca(llvm_arg.typeOf(), null); + const p = try self.buildAlloca(llvm_arg.typeOf(), null); const store_inst = self.builder.buildStore(llvm_arg, p); store_inst.setAlignment(arg_ty.abiAlignment(mod)); llvm_arg = store_inst; } - const array_llvm_ty = self.context.intType(elem_size).arrayType(arr_len); + const array_llvm_ty = (try o.builder.intType(@intCast(elem_size))).toLlvm(&o.builder).arrayType(arr_len); const alignment = arg_ty.abiAlignment(mod); const load_inst = self.builder.buildLoad(array_llvm_ty, llvm_arg, ""); load_inst.setAlignment(alignment); @@ -5198,7 +5242,7 @@ pub const FuncGen = struct { // a different LLVM type than the usual one. We solve this here at the callsite // by using our canonical type, then loading it if necessary. const alignment = o.target_data.abiAlignmentOfType(abi_ret_ty); - const rp = self.buildAlloca(llvm_ret_ty, alignment); + const rp = try self.buildAlloca(llvm_ret_ty, alignment); const store_inst = self.builder.buildStore(call, rp); store_inst.setAlignment(alignment); if (isByRef(return_type, mod)) { @@ -5214,7 +5258,7 @@ pub const FuncGen = struct { // our by-ref status disagrees with sret so we must allocate, store, // and return the allocation pointer. const alignment = return_type.abiAlignment(mod); - const rp = self.buildAlloca(llvm_ret_ty, alignment); + const rp = try self.buildAlloca(llvm_ret_ty, alignment); const store_inst = self.builder.buildStore(call, rp); store_inst.setAlignment(alignment); return rp; @@ -5235,7 +5279,7 @@ pub const FuncGen = struct { }); const null_opt_addr_global = try o.getNullOptAddr(); const target = mod.getTarget(); - const llvm_usize = fg.context.intType(target.ptrBitWidth()); + const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); // example: // call fastcc void @test2.panic( // ptr @builtin.panic_messages.integer_overflow__anon_987, ; msg.ptr @@ -5310,7 +5354,7 @@ pub const FuncGen = struct { return null; } - const rp = self.buildAlloca(llvm_ret_ty, alignment); + const rp = try self.buildAlloca(llvm_ret_ty, alignment); const store_inst = self.builder.buildStore(operand, rp); store_inst.setAlignment(alignment); const load_inst = self.builder.buildLoad(abi_ret_ty, rp, ""); @@ -5369,16 +5413,12 @@ pub const FuncGen = struct { const mod = o.module; const result_alignment = va_list_ty.abiAlignment(mod); - const dest_list = self.buildAlloca(llvm_va_list_ty, result_alignment); + const dest_list = try self.buildAlloca(llvm_va_list_ty, result_alignment); const llvm_fn_name = "llvm.va_copy"; const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: { - const param_types = [_]*llvm.Type{ - self.context.pointerType(0), - self.context.pointerType(0), - }; - const fn_type = llvm.functionType(self.context.voidType(), ¶m_types, param_types.len, .False); - break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type); + const fn_type = try o.builder.fnType(.void, &.{ .ptr, .ptr }, .normal); + break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type.toLlvm(&o.builder)); }; const args: [2]*llvm.Value = .{ dest_list, src_list }; @@ -5400,9 +5440,8 @@ pub const FuncGen = struct { const llvm_fn_name = "llvm.va_end"; const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: { - const param_types = [_]*llvm.Type{self.context.pointerType(0)}; - const fn_type = llvm.functionType(self.context.voidType(), ¶m_types, param_types.len, .False); - break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type); + const fn_type = try o.builder.fnType(.void, &.{.ptr}, .normal); + break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type.toLlvm(&o.builder)); }; const args: [1]*llvm.Value = .{list}; _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); @@ -5416,13 +5455,12 @@ pub const FuncGen = struct { const llvm_va_list_ty = (try o.lowerType(va_list_ty)).toLlvm(&o.builder); const result_alignment = va_list_ty.abiAlignment(mod); - const list = self.buildAlloca(llvm_va_list_ty, result_alignment); + const list = try self.buildAlloca(llvm_va_list_ty, result_alignment); const llvm_fn_name = "llvm.va_start"; const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: { - const param_types = [_]*llvm.Type{self.context.pointerType(0)}; - const fn_type = llvm.functionType(self.context.voidType(), ¶m_types, param_types.len, .False); - break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type); + const fn_type = try o.builder.fnType(.void, &.{.ptr}, .normal); + break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type.toLlvm(&o.builder)); }; const args: [1]*llvm.Value = .{list}; _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); @@ -5495,7 +5533,7 @@ pub const FuncGen = struct { const opt_llvm_ty = (try o.lowerType(scalar_ty)).toLlvm(&o.builder); const lhs_non_null = self.optIsNonNull(opt_llvm_ty, lhs, is_by_ref); const rhs_non_null = self.optIsNonNull(opt_llvm_ty, rhs, is_by_ref); - const llvm_i2 = self.context.intType(2); + const llvm_i2 = (try o.builder.intType(2)).toLlvm(&o.builder); const lhs_non_null_i2 = self.builder.buildZExt(lhs_non_null, llvm_i2, ""); const rhs_non_null_i2 = self.builder.buildZExt(rhs_non_null, llvm_i2, ""); const lhs_shifted = self.builder.buildShl(lhs_non_null_i2, llvm_i2.constInt(1, .False), ""); @@ -5529,7 +5567,7 @@ pub const FuncGen = struct { mixed_block, both_pl_block_end, }; - const llvm_i1 = self.context.intType(1); + const llvm_i1 = Builder.Type.i1.toLlvm(&o.builder); const llvm_i1_0 = llvm_i1.constInt(0, .False); const llvm_i1_1 = llvm_i1.constInt(1, .False); const incoming_values: [3]*llvm.Value = .{ @@ -5767,13 +5805,11 @@ pub const FuncGen = struct { fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const o = self.dg.object; - const mod = o.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); const else_block = self.context.appendBasicBlock(self.llvm_func, "Else"); - const target = mod.getTarget(); - const llvm_usize = self.context.intType(target.ptrBitWidth()); + const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); const cond_int = if (cond.typeOf().getTypeKind() == .Pointer) self.builder.buildPtrToInt(cond, llvm_usize, "") else @@ -5874,48 +5910,46 @@ pub const FuncGen = struct { const dest_ty = self.typeOfIndex(inst); const dest_scalar_ty = dest_ty.scalarType(mod); - const dest_llvm_ty = (try o.lowerType(dest_ty)).toLlvm(&o.builder); + const dest_llvm_ty = try o.lowerType(dest_ty); const target = mod.getTarget(); if (intrinsicsAllowed(dest_scalar_ty, target)) { if (operand_scalar_ty.isSignedInt(mod)) { - return self.builder.buildSIToFP(operand, dest_llvm_ty, ""); + return self.builder.buildSIToFP(operand, dest_llvm_ty.toLlvm(&o.builder), ""); } else { - return self.builder.buildUIToFP(operand, dest_llvm_ty, ""); + return self.builder.buildUIToFP(operand, dest_llvm_ty.toLlvm(&o.builder), ""); } } const operand_bits = @as(u16, @intCast(operand_scalar_ty.bitSize(mod))); const rt_int_bits = compilerRtIntBits(operand_bits); - const rt_int_ty = self.context.intType(rt_int_bits); + const rt_int_ty = try o.builder.intType(rt_int_bits); var extended = e: { if (operand_scalar_ty.isSignedInt(mod)) { - break :e self.builder.buildSExtOrBitCast(operand, rt_int_ty, ""); + break :e self.builder.buildSExtOrBitCast(operand, rt_int_ty.toLlvm(&o.builder), ""); } else { - break :e self.builder.buildZExtOrBitCast(operand, rt_int_ty, ""); + break :e self.builder.buildZExtOrBitCast(operand, rt_int_ty.toLlvm(&o.builder), ""); } }; const dest_bits = dest_scalar_ty.floatBits(target); const compiler_rt_operand_abbrev = compilerRtIntAbbrev(rt_int_bits); const compiler_rt_dest_abbrev = compilerRtFloatAbbrev(dest_bits); const sign_prefix = if (operand_scalar_ty.isSignedInt(mod)) "" else "un"; - var fn_name_buf: [64]u8 = undefined; - const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__float{s}{s}i{s}f", .{ + const fn_name = try o.builder.fmt("__float{s}{s}i{s}f", .{ sign_prefix, compiler_rt_operand_abbrev, compiler_rt_dest_abbrev, - }) catch unreachable; + }); - var param_types = [1]*llvm.Type{rt_int_ty}; + var param_type = rt_int_ty; if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) { // On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard // i128 calling convention to adhere to the ABI that LLVM expects compiler-rt to have. - const v2i64 = self.context.intType(64).vectorType(2); - extended = self.builder.buildBitCast(extended, v2i64, ""); - param_types = [1]*llvm.Type{v2i64}; + param_type = try o.builder.vectorType(.normal, 2, .i64); + extended = self.builder.buildBitCast(extended, param_type.toLlvm(&o.builder), ""); } - const libc_fn = try self.getLibcFunction(fn_name, ¶m_types, dest_llvm_ty); + const libc_fn = try self.getLibcFunction(fn_name, &.{param_type}, dest_llvm_ty); const params = [1]*llvm.Value{extended}; return self.builder.buildCall(libc_fn.globalGetValueType(), libc_fn, ¶ms, params.len, .C, .Auto, ""); @@ -5935,23 +5969,23 @@ pub const FuncGen = struct { const dest_ty = self.typeOfIndex(inst); const dest_scalar_ty = dest_ty.scalarType(mod); - const dest_llvm_ty = (try o.lowerType(dest_ty)).toLlvm(&o.builder); + const dest_llvm_ty = try o.lowerType(dest_ty); if (intrinsicsAllowed(operand_scalar_ty, target)) { // TODO set fast math flag if (dest_scalar_ty.isSignedInt(mod)) { - return self.builder.buildFPToSI(operand, dest_llvm_ty, ""); + return self.builder.buildFPToSI(operand, dest_llvm_ty.toLlvm(&o.builder), ""); } else { - return self.builder.buildFPToUI(operand, dest_llvm_ty, ""); + return self.builder.buildFPToUI(operand, dest_llvm_ty.toLlvm(&o.builder), ""); } } - const rt_int_bits = compilerRtIntBits(@as(u16, @intCast(dest_scalar_ty.bitSize(mod)))); - const ret_ty = self.context.intType(rt_int_bits); + const rt_int_bits = compilerRtIntBits(@intCast(dest_scalar_ty.bitSize(mod))); + const ret_ty = try o.builder.intType(rt_int_bits); const libc_ret_ty = if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) b: { // On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard // i128 calling convention to adhere to the ABI that LLVM expects compiler-rt to have. - break :b self.context.intType(64).vectorType(2); + break :b try o.builder.vectorType(.normal, 2, .i64); } else ret_ty; const operand_bits = operand_scalar_ty.floatBits(target); @@ -5960,22 +5994,20 @@ pub const FuncGen = struct { const compiler_rt_dest_abbrev = compilerRtIntAbbrev(rt_int_bits); const sign_prefix = if (dest_scalar_ty.isSignedInt(mod)) "" else "uns"; - var fn_name_buf: [64]u8 = undefined; - const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__fix{s}{s}f{s}i", .{ + const fn_name = try o.builder.fmt("__fix{s}{s}f{s}i", .{ sign_prefix, compiler_rt_operand_abbrev, compiler_rt_dest_abbrev, - }) catch unreachable; + }); - const operand_llvm_ty = (try o.lowerType(operand_ty)).toLlvm(&o.builder); - const param_types = [1]*llvm.Type{operand_llvm_ty}; - const libc_fn = try self.getLibcFunction(fn_name, ¶m_types, libc_ret_ty); + const operand_llvm_ty = try o.lowerType(operand_ty); + const libc_fn = try self.getLibcFunction(fn_name, &.{operand_llvm_ty}, libc_ret_ty); const params = [1]*llvm.Value{operand}; var result = self.builder.buildCall(libc_fn.globalGetValueType(), libc_fn, ¶ms, params.len, .C, .Auto, ""); - if (libc_ret_ty != ret_ty) result = self.builder.buildBitCast(result, ret_ty, ""); - if (ret_ty != dest_llvm_ty) result = self.builder.buildTrunc(result, dest_llvm_ty, ""); + if (libc_ret_ty != ret_ty) result = self.builder.buildBitCast(result, ret_ty.toLlvm(&o.builder), ""); + if (ret_ty != dest_llvm_ty) result = self.builder.buildTrunc(result, dest_llvm_ty.toLlvm(&o.builder), ""); return result; } @@ -5989,11 +6021,10 @@ pub const FuncGen = struct { } } - fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value { + fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: *llvm.Value, ty: Type) Allocator.Error!*llvm.Value { const o = fg.dg.object; const mod = o.module; - const target = mod.getTarget(); - const llvm_usize_ty = fg.context.intType(target.ptrBitWidth()); + const llvm_usize_ty = (try o.lowerType(Type.usize)).toLlvm(&o.builder); switch (ty.ptrSize(mod)) { .Slice => { const len = fg.builder.buildExtractValue(ptr, 1, ""); @@ -6080,7 +6111,7 @@ pub const FuncGen = struct { const array_llvm_ty = (try o.lowerType(array_ty)).toLlvm(&o.builder); const elem_ty = array_ty.childType(mod); if (isByRef(array_ty, mod)) { - const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs }; + const indices: [2]*llvm.Value = .{ Builder.Type.i32.toLlvm(&o.builder).constNull(), rhs }; if (isByRef(elem_ty, mod)) { const elem_ptr = self.builder.buildInBoundsGEP(array_llvm_ty, array_llvm_val, &indices, indices.len, ""); if (canElideLoad(self, body_tail)) @@ -6128,7 +6159,7 @@ pub const FuncGen = struct { // TODO: when we go fully opaque pointers in LLVM 16 we can remove this branch const ptr = if (ptr_ty.isSinglePointer(mod)) ptr: { // If this is a single-item pointer to an array, we need another index in the GEP. - const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs }; + const indices: [2]*llvm.Value = .{ Builder.Type.i32.toLlvm(&o.builder).constNull(), rhs }; break :ptr self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); } else ptr: { const indices: [1]*llvm.Value = .{rhs}; @@ -6162,7 +6193,7 @@ pub const FuncGen = struct { const llvm_elem_ty = (try o.lowerPtrElemTy(elem_ty)).toLlvm(&o.builder); if (ptr_ty.isSinglePointer(mod)) { // If this is a single-item pointer to an array, we need another index in the GEP. - const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs }; + const indices: [2]*llvm.Value = .{ Builder.Type.i32.toLlvm(&o.builder).constNull(), rhs }; return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); } else { const indices: [1]*llvm.Value = .{rhs}; @@ -6216,12 +6247,12 @@ pub const FuncGen = struct { const elem_llvm_ty = (try o.lowerType(field_ty)).toLlvm(&o.builder); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); - const same_size_int = self.context.intType(elem_bits); + const same_size_int = (try o.builder.intType(@intCast(elem_bits))).toLlvm(&o.builder); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); } else if (field_ty.isPtrAtRuntime(mod)) { const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); - const same_size_int = self.context.intType(elem_bits); + const same_size_int = (try o.builder.intType(@intCast(elem_bits))).toLlvm(&o.builder); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); } @@ -6238,12 +6269,12 @@ pub const FuncGen = struct { const elem_llvm_ty = (try o.lowerType(field_ty)).toLlvm(&o.builder); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); - const same_size_int = self.context.intType(elem_bits); + const same_size_int = (try o.builder.intType(@intCast(elem_bits))).toLlvm(&o.builder); const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); } else if (field_ty.isPtrAtRuntime(mod)) { const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); - const same_size_int = self.context.intType(elem_bits); + const same_size_int = (try o.builder.intType(@intCast(elem_bits))).toLlvm(&o.builder); const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, ""); return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); } @@ -6302,7 +6333,6 @@ pub const FuncGen = struct { const field_ptr = try self.resolveInst(extra.field_ptr); - const target = o.module.getTarget(); const parent_ty = self.air.getRefType(ty_pl.ty).childType(mod); const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); @@ -6310,7 +6340,7 @@ pub const FuncGen = struct { if (field_offset == 0) { return field_ptr; } - const llvm_usize_ty = self.context.intType(target.ptrBitWidth()); + const llvm_usize_ty = (try o.lowerType(Type.usize)).toLlvm(&o.builder); const field_ptr_int = self.builder.buildPtrToInt(field_ptr, llvm_usize_ty, ""); const base_ptr_int = self.builder.buildNUWSub(field_ptr_int, llvm_usize_ty.constInt(field_offset, .False), ""); @@ -6493,7 +6523,7 @@ pub const FuncGen = struct { _ = dib.insertDeclareAtEnd(operand, di_local_var, debug_loc, insert_block); } else if (o.module.comp.bin_file.options.optimize_mode == .Debug) { const alignment = operand_ty.abiAlignment(mod); - const alloca = self.buildAlloca(operand.typeOf(), alignment); + const alloca = try self.buildAlloca(operand.typeOf(), alignment); const store_inst = self.builder.buildStore(operand, alloca); store_inst.setAlignment(alignment); _ = dib.insertDeclareAtEnd(alloca, di_local_var, debug_loc, insert_block); @@ -6532,7 +6562,7 @@ pub const FuncGen = struct { // The exact number of return / parameter values depends on which output values // are passed by reference as indirect outputs (determined below). const max_return_count = outputs.len; - const llvm_ret_types = try arena.alloc(*llvm.Type, max_return_count); + const llvm_ret_types = try arena.alloc(Builder.Type, max_return_count); const llvm_ret_indirect = try arena.alloc(bool, max_return_count); const max_param_count = inputs.len + outputs.len; @@ -6571,7 +6601,7 @@ pub const FuncGen = struct { const output_inst = try self.resolveInst(output); const output_ty = self.typeOf(output); assert(output_ty.zigTypeTag(mod) == .Pointer); - const elem_llvm_ty = (try o.lowerPtrElemTy(output_ty.childType(mod))).toLlvm(&o.builder); + const elem_llvm_ty = try o.lowerPtrElemTy(output_ty.childType(mod)); if (llvm_ret_indirect[i]) { // Pass the result by reference as an indirect output (e.g. "=*m") @@ -6579,7 +6609,7 @@ pub const FuncGen = struct { llvm_param_values[llvm_param_i] = output_inst; llvm_param_types[llvm_param_i] = output_inst.typeOf(); - llvm_param_attrs[llvm_param_i] = elem_llvm_ty; + llvm_param_attrs[llvm_param_i] = elem_llvm_ty.toLlvm(&o.builder); llvm_param_i += 1; } else { // Pass the result directly (e.g. "=r") @@ -6588,7 +6618,7 @@ pub const FuncGen = struct { } } else { const ret_ty = self.typeOfIndex(inst); - llvm_ret_types[llvm_ret_i] = (try o.lowerType(ret_ty)).toLlvm(&o.builder); + llvm_ret_types[llvm_ret_i] = try o.lowerType(ret_ty); llvm_ret_i += 1; } @@ -6623,9 +6653,9 @@ pub const FuncGen = struct { const arg_llvm_value = try self.resolveInst(input); const arg_ty = self.typeOf(input); - var llvm_elem_ty: ?*llvm.Type = null; + var llvm_elem_ty: Builder.Type = .none; if (isByRef(arg_ty, mod)) { - llvm_elem_ty = (try o.lowerPtrElemTy(arg_ty)).toLlvm(&o.builder); + llvm_elem_ty = try o.lowerPtrElemTy(arg_ty); if (constraintAllowsMemory(constraint)) { llvm_param_values[llvm_param_i] = arg_llvm_value; llvm_param_types[llvm_param_i] = arg_llvm_value.typeOf(); @@ -6643,7 +6673,7 @@ pub const FuncGen = struct { llvm_param_types[llvm_param_i] = arg_llvm_value.typeOf(); } else { const alignment = arg_ty.abiAlignment(mod); - const arg_ptr = self.buildAlloca(arg_llvm_value.typeOf(), alignment); + const arg_ptr = try self.buildAlloca(arg_llvm_value.typeOf(), alignment); const store_inst = self.builder.buildStore(arg_llvm_value, arg_ptr); store_inst.setAlignment(alignment); llvm_param_values[llvm_param_i] = arg_ptr; @@ -6671,8 +6701,10 @@ pub const FuncGen = struct { // In the case of indirect inputs, LLVM requires the callsite to have // an elementtype() attribute. if (constraint[0] == '*') { - llvm_param_attrs[llvm_param_i] = llvm_elem_ty orelse - (try o.lowerPtrElemTy(arg_ty.childType(mod))).toLlvm(&o.builder); + llvm_param_attrs[llvm_param_i] = (if (llvm_elem_ty != .none) + llvm_elem_ty + else + try o.lowerPtrElemTy(arg_ty.childType(mod))).toLlvm(&o.builder); } else { llvm_param_attrs[llvm_param_i] = null; } @@ -6792,17 +6824,13 @@ pub const FuncGen = struct { } const ret_llvm_ty = switch (return_count) { - 0 => self.context.voidType(), + 0 => .void, 1 => llvm_ret_types[0], - else => self.context.structType( - llvm_ret_types.ptr, - @as(c_uint, @intCast(return_count)), - .False, - ), + else => try o.builder.structType(.normal, llvm_ret_types), }; const llvm_fn_ty = llvm.functionType( - ret_llvm_ty, + ret_llvm_ty.toLlvm(&o.builder), llvm_param_types.ptr, @as(c_uint, @intCast(param_count)), .False, @@ -6891,7 +6919,7 @@ pub const FuncGen = struct { self.builder.buildLoad(optional_llvm_ty, operand, "") else operand; - const llvm_i8 = self.context.intType(8); + const llvm_i8 = Builder.Type.i8.toLlvm(&o.builder); return self.builder.buildICmp(pred, loaded, llvm_i8.constNull(), ""); } @@ -6921,7 +6949,7 @@ pub const FuncGen = struct { const zero = err_set_ty.constNull(); if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { - const llvm_i1 = self.context.intType(1); + const llvm_i1 = Builder.Type.i1.toLlvm(&o.builder); switch (op) { .EQ => return llvm_i1.constInt(1, .False), // 0 == 0 .NE => return llvm_i1.constInt(0, .False), // 0 != 0 @@ -6979,7 +7007,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = optional_ty.optionalChild(mod); - const non_null_bit = self.context.intType(8).constInt(1, .False); + const non_null_bit = Builder.Type.i8.toLlvm(&o.builder).constInt(1, .False); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // We have a pointer to a i8. We need to set it to 1 and then return the same pointer. _ = self.builder.buildStore(non_null_bit, operand); @@ -7165,7 +7193,7 @@ pub const FuncGen = struct { const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const payload_ty = self.typeOf(ty_op.operand); - const non_null_bit = self.context.intType(8).constInt(1, .False); + const non_null_bit = Builder.Type.i8.toLlvm(&o.builder).constInt(1, .False); comptime assert(optional_layout_version == 3); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return non_null_bit; const operand = try self.resolveInst(ty_op.operand); @@ -7175,7 +7203,7 @@ pub const FuncGen = struct { } const llvm_optional_ty = (try o.lowerType(optional_ty)).toLlvm(&o.builder); if (isByRef(optional_ty, mod)) { - const optional_ptr = self.buildAlloca(llvm_optional_ty, optional_ty.abiAlignment(mod)); + const optional_ptr = try self.buildAlloca(llvm_optional_ty, optional_ty.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 0, ""); const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); try self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic); @@ -7203,7 +7231,7 @@ pub const FuncGen = struct { const payload_offset = errUnionPayloadOffset(payload_ty, mod); const error_offset = errUnionErrorOffset(payload_ty, mod); if (isByRef(err_un_ty, mod)) { - const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(mod)); + const result_ptr = try self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(mod)); const err_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, error_offset, ""); const store_inst = self.builder.buildStore(ok_err_code, err_ptr); store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); @@ -7232,7 +7260,7 @@ pub const FuncGen = struct { const payload_offset = errUnionPayloadOffset(payload_ty, mod); const error_offset = errUnionErrorOffset(payload_ty, mod); if (isByRef(err_un_ty, mod)) { - const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(mod)); + const result_ptr = try self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(mod)); const err_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, error_offset, ""); const store_inst = self.builder.buildStore(operand, err_ptr); store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); @@ -7252,8 +7280,8 @@ pub const FuncGen = struct { fn airWasmMemorySize(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const index = pl_op.payload; - const llvm_u32 = self.context.intType(32); - const llvm_fn = self.getIntrinsic("llvm.wasm.memory.size", &.{llvm_u32}); + const llvm_u32 = Builder.Type.i32.toLlvm(&self.dg.object.builder); + const llvm_fn = try self.getIntrinsic("llvm.wasm.memory.size", &.{.i32}); const args: [1]*llvm.Value = .{llvm_u32.constInt(index, .False)}; return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); } @@ -7262,8 +7290,8 @@ pub const FuncGen = struct { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const index = pl_op.payload; const operand = try self.resolveInst(pl_op.operand); - const llvm_u32 = self.context.intType(32); - const llvm_fn = self.getIntrinsic("llvm.wasm.memory.grow", &.{llvm_u32}); + const llvm_u32 = Builder.Type.i32.toLlvm(&self.dg.object.builder); + const llvm_fn = try self.getIntrinsic("llvm.wasm.memory.grow", &.{.i32}); const args: [2]*llvm.Value = .{ llvm_u32.constInt(index, .False), operand, @@ -7371,8 +7399,7 @@ pub const FuncGen = struct { true => signed_intrinsic, false => unsigned_intrinsic, }; - const llvm_inst_ty = (try o.lowerType(inst_ty)).toLlvm(&o.builder); - const llvm_fn = fg.getIntrinsic(intrinsic_name, &.{llvm_inst_ty}); + const llvm_fn = try fg.getIntrinsic(intrinsic_name, &.{try o.lowerType(inst_ty)}); const result_struct = fg.builder.buildCall( llvm_fn.globalGetValueType(), llvm_fn, @@ -7658,7 +7685,7 @@ pub const FuncGen = struct { switch (ptr_ty.ptrSize(mod)) { .One => { // It's a pointer to an array, so according to LLVM we need an extra GEP index. - const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), offset }; + const indices: [2]*llvm.Value = .{ Builder.Type.i32.toLlvm(&o.builder).constNull(), offset }; return self.builder.buildInBoundsGEP(llvm_elem_ty, ptr, &indices, indices.len, ""); }, .C, .Many => { @@ -7687,7 +7714,7 @@ pub const FuncGen = struct { .One => { // It's a pointer to an array, so according to LLVM we need an extra GEP index. const indices: [2]*llvm.Value = .{ - self.context.intType(32).constNull(), negative_offset, + Builder.Type.i32.toLlvm(&o.builder).constNull(), negative_offset, }; return self.builder.buildInBoundsGEP(llvm_elem_ty, ptr, &indices, indices.len, ""); }, @@ -7723,10 +7750,9 @@ pub const FuncGen = struct { const intrinsic_name = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic; - const llvm_lhs_ty = (try o.lowerType(lhs_ty)).toLlvm(&o.builder); const llvm_dest_ty = (try o.lowerType(dest_ty)).toLlvm(&o.builder); - const llvm_fn = self.getIntrinsic(intrinsic_name, &.{llvm_lhs_ty}); + const llvm_fn = try self.getIntrinsic(intrinsic_name, &.{try o.lowerType(lhs_ty)}); const result_struct = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &[_]*llvm.Value{ lhs, rhs }, 2, .Fast, .Auto, ""); const result = self.builder.buildExtractValue(result_struct, 0, ""); @@ -7737,7 +7763,7 @@ pub const FuncGen = struct { if (isByRef(dest_ty, mod)) { const result_alignment = dest_ty.abiAlignment(mod); - const alloca_inst = self.buildAlloca(llvm_dest_ty, result_alignment); + const alloca_inst = try self.buildAlloca(llvm_dest_ty, result_alignment); { const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, result_index, ""); const store_inst = self.builder.buildStore(result, field_ptr); @@ -7764,7 +7790,7 @@ pub const FuncGen = struct { vector_len: usize, ) !*llvm.Value { const args_len = @as(c_uint, @intCast(args_vectors.len)); - const llvm_i32 = self.context.intType(32); + const llvm_i32 = Builder.Type.i32.toLlvm(&self.dg.object.builder); assert(args_len <= 3); var i: usize = 0; @@ -7784,23 +7810,21 @@ pub const FuncGen = struct { fn getLibcFunction( self: *FuncGen, - fn_name: [:0]const u8, - param_types: []const *llvm.Type, - return_type: *llvm.Type, + fn_name: Builder.String, + param_types: []const Builder.Type, + return_type: Builder.Type, ) Allocator.Error!*llvm.Value { const o = self.dg.object; - return o.llvm_module.getNamedFunction(fn_name.ptr) orelse b: { - const alias = o.llvm_module.getNamedGlobalAlias(fn_name.ptr, fn_name.len); + const slice = fn_name.toSlice(&o.builder).?; + return o.llvm_module.getNamedFunction(slice) orelse b: { + const alias = o.llvm_module.getNamedGlobalAlias(slice.ptr, slice.len); break :b if (alias) |a| a.getAliasee() else null; } orelse b: { - const name = try o.builder.string(fn_name); - - const params_len = @as(c_uint, @intCast(param_types.len)); - const fn_type = llvm.functionType(return_type, param_types.ptr, params_len, .False); - const f = o.llvm_module.addFunction(name.toSlice(&o.builder).?, fn_type); + const fn_type = try o.builder.fnType(return_type, param_types, .normal); + const f = o.llvm_module.addFunction(slice, fn_type.toLlvm(&o.builder)); var global = Builder.Global{ - .type = try o.builder.fnType(.void, &.{}, .normal), + .type = fn_type, .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) }, }; var function = Builder.Function{ @@ -7808,9 +7832,8 @@ pub const FuncGen = struct { }; try o.builder.llvm_globals.append(self.gpa, f); - _ = try o.builder.addGlobal(name, global); + _ = try o.builder.addGlobal(fn_name, global); try o.builder.functions.append(self.gpa, function); - break :b f; }; } @@ -7827,7 +7850,7 @@ pub const FuncGen = struct { const mod = o.module; const target = o.module.getTarget(); const scalar_ty = ty.scalarType(mod); - const scalar_llvm_ty = (try o.lowerType(scalar_ty)).toLlvm(&o.builder); + const scalar_llvm_ty = try o.lowerType(scalar_ty); if (intrinsicsAllowed(scalar_ty, target)) { const llvm_predicate: llvm.RealPredicate = switch (pred) { @@ -7843,7 +7866,6 @@ pub const FuncGen = struct { const float_bits = scalar_ty.floatBits(target); const compiler_rt_float_abbrev = compilerRtFloatAbbrev(float_bits); - var fn_name_buf: [64]u8 = undefined; const fn_base_name = switch (pred) { .neq => "ne", .eq => "eq", @@ -7852,15 +7874,15 @@ pub const FuncGen = struct { .gt => "gt", .gte => "ge", }; - const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__{s}{s}f2", .{ - fn_base_name, compiler_rt_float_abbrev, - }) catch unreachable; + const fn_name = try o.builder.fmt("__{s}{s}f2", .{ fn_base_name, compiler_rt_float_abbrev }); - const param_types = [2]*llvm.Type{ scalar_llvm_ty, scalar_llvm_ty }; - const llvm_i32 = self.context.intType(32); - const libc_fn = try self.getLibcFunction(fn_name, param_types[0..], llvm_i32); + const libc_fn = try self.getLibcFunction( + fn_name, + ([1]Builder.Type{scalar_llvm_ty} ** 2)[0..], + .i32, + ); - const zero = llvm_i32.constInt(0, .False); + const zero = Builder.Type.i32.toLlvm(&o.builder).constInt(0, .False); const int_pred: llvm.IntPredicate = switch (pred) { .eq => .EQ, .neq => .NE, @@ -7872,7 +7894,7 @@ pub const FuncGen = struct { if (ty.zigTypeTag(mod) == .Vector) { const vec_len = ty.vectorLen(mod); - const vector_result_ty = llvm_i32.vectorType(vec_len); + const vector_result_ty = (try o.builder.vectorType(.normal, vec_len, .i32)).toLlvm(&o.builder); var result = vector_result_ty.getUndef(); result = try self.buildElementwiseCall(libc_fn, ¶ms, result, vec_len); @@ -7913,7 +7935,7 @@ pub const FuncGen = struct { const FloatOpStrat = union(enum) { intrinsic: []const u8, - libc: [:0]const u8, + libc: Builder.String, }; /// Creates a floating point operation (add, sub, fma, sqrt, exp, etc.) @@ -7930,11 +7952,10 @@ pub const FuncGen = struct { const mod = o.module; const target = mod.getTarget(); const scalar_ty = ty.scalarType(mod); - const llvm_ty = (try o.lowerType(ty)).toLlvm(&o.builder); - const scalar_llvm_ty = (try o.lowerType(scalar_ty)).toLlvm(&o.builder); + const llvm_ty = try o.lowerType(ty); + const scalar_llvm_ty = try o.lowerType(scalar_ty); const intrinsics_allowed = op != .tan and intrinsicsAllowed(scalar_ty, target); - var fn_name_buf: [64]u8 = undefined; const strat: FloatOpStrat = if (intrinsics_allowed) switch (op) { // Some operations are dedicated LLVM instructions, not available as intrinsics .neg => return self.builder.buildFNeg(params[0], ""), @@ -7952,7 +7973,7 @@ pub const FuncGen = struct { .neg => { // In this case we can generate a softfloat negation by XORing the // bits with a constant. - const int_llvm_ty = self.context.intType(float_bits); + const int_llvm_ty = (try o.builder.intType(@intCast(float_bits))).toLlvm(&o.builder); const one = int_llvm_ty.constInt(1, .False); const shift_amt = int_llvm_ty.constInt(float_bits - 1, .False); const sign_mask = one.constShl(shift_amt); @@ -7965,13 +7986,11 @@ pub const FuncGen = struct { const bitcasted_operand = self.builder.buildBitCast(params[0], int_llvm_ty, ""); break :blk self.builder.buildXor(bitcasted_operand, sign_mask, ""); }; - return self.builder.buildBitCast(result, llvm_ty, ""); - }, - .add, .sub, .div, .mul => FloatOpStrat{ - .libc = std.fmt.bufPrintZ(&fn_name_buf, "__{s}{s}f3", .{ - @tagName(op), compilerRtFloatAbbrev(float_bits), - }) catch unreachable, + return self.builder.buildBitCast(result, llvm_ty.toLlvm(&o.builder), ""); }, + .add, .sub, .div, .mul => .{ .libc = try o.builder.fmt("__{s}{s}f3", .{ + @tagName(op), compilerRtFloatAbbrev(float_bits), + }) }, .ceil, .cos, .exp, @@ -7990,21 +8009,22 @@ pub const FuncGen = struct { .sqrt, .tan, .trunc, - => FloatOpStrat{ - .libc = std.fmt.bufPrintZ(&fn_name_buf, "{s}{s}{s}", .{ - libcFloatPrefix(float_bits), @tagName(op), libcFloatSuffix(float_bits), - }) catch unreachable, - }, + => .{ .libc = try o.builder.fmt("{s}{s}{s}", .{ + libcFloatPrefix(float_bits), @tagName(op), libcFloatSuffix(float_bits), + }) }, }; }; const llvm_fn: *llvm.Value = switch (strat) { - .intrinsic => |fn_name| self.getIntrinsic(fn_name, &.{llvm_ty}), + .intrinsic => |fn_name| try self.getIntrinsic(fn_name, &.{llvm_ty}), .libc => |fn_name| b: { - const param_types = [3]*llvm.Type{ scalar_llvm_ty, scalar_llvm_ty, scalar_llvm_ty }; - const libc_fn = try self.getLibcFunction(fn_name, param_types[0..params.len], scalar_llvm_ty); + const libc_fn = try self.getLibcFunction( + fn_name, + ([1]Builder.Type{scalar_llvm_ty} ** 3)[0..params.len], + scalar_llvm_ty, + ); if (ty.zigTypeTag(mod) == .Vector) { - const result = llvm_ty.getUndef(); + const result = llvm_ty.toLlvm(&o.builder).getUndef(); return self.buildElementwiseCall(libc_fn, ¶ms, result, ty.vectorLen(mod)); } @@ -8061,7 +8081,7 @@ pub const FuncGen = struct { if (isByRef(dest_ty, mod)) { const result_alignment = dest_ty.abiAlignment(mod); - const alloca_inst = self.buildAlloca(llvm_dest_ty, result_alignment); + const alloca_inst = try self.buildAlloca(llvm_dest_ty, result_alignment); { const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, result_index, ""); const store_inst = self.builder.buildStore(result, field_ptr); @@ -8266,17 +8286,15 @@ pub const FuncGen = struct { const dest_llvm_ty = (try o.lowerType(dest_ty)).toLlvm(&o.builder); return self.builder.buildFPTrunc(operand, dest_llvm_ty, ""); } else { - const operand_llvm_ty = (try o.lowerType(operand_ty)).toLlvm(&o.builder); - const dest_llvm_ty = (try o.lowerType(dest_ty)).toLlvm(&o.builder); + const operand_llvm_ty = try o.lowerType(operand_ty); + const dest_llvm_ty = try o.lowerType(dest_ty); - var fn_name_buf: [64]u8 = undefined; - const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__trunc{s}f{s}f2", .{ + const fn_name = try o.builder.fmt("__trunc{s}f{s}f2", .{ compilerRtFloatAbbrev(src_bits), compilerRtFloatAbbrev(dest_bits), - }) catch unreachable; + }); const params = [1]*llvm.Value{operand}; - const param_types = [1]*llvm.Type{operand_llvm_ty}; - const llvm_fn = try self.getLibcFunction(fn_name, ¶m_types, dest_llvm_ty); + const llvm_fn = try self.getLibcFunction(fn_name, &.{operand_llvm_ty}, dest_llvm_ty); return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .C, .Auto, ""); } @@ -8297,17 +8315,15 @@ pub const FuncGen = struct { const dest_llvm_ty = (try o.lowerType(dest_ty)).toLlvm(&o.builder); return self.builder.buildFPExt(operand, dest_llvm_ty, ""); } else { - const operand_llvm_ty = (try o.lowerType(operand_ty)).toLlvm(&o.builder); - const dest_llvm_ty = (try o.lowerType(dest_ty)).toLlvm(&o.builder); + const operand_llvm_ty = try o.lowerType(operand_ty); + const dest_llvm_ty = try o.lowerType(dest_ty); - var fn_name_buf: [64]u8 = undefined; - const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__extend{s}f{s}f2", .{ + const fn_name = try o.builder.fmt("__extend{s}f{s}f2", .{ compilerRtFloatAbbrev(src_bits), compilerRtFloatAbbrev(dest_bits), - }) catch unreachable; + }); const params = [1]*llvm.Value{operand}; - const param_types = [1]*llvm.Type{operand_llvm_ty}; - const llvm_fn = try self.getLibcFunction(fn_name, ¶m_types, dest_llvm_ty); + const llvm_fn = try self.getLibcFunction(fn_name, &.{operand_llvm_ty}, dest_llvm_ty); return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .C, .Auto, ""); } @@ -8358,7 +8374,7 @@ pub const FuncGen = struct { if (!result_is_ref) { return self.dg.todo("implement bitcast vector to non-ref array", .{}); } - const array_ptr = self.buildAlloca(llvm_dest_ty, null); + const array_ptr = try self.buildAlloca(llvm_dest_ty, null); const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8; if (bitcast_ok) { const llvm_store = self.builder.buildStore(operand, array_ptr); @@ -8367,7 +8383,7 @@ pub const FuncGen = struct { // If the ABI size of the element type is not evenly divisible by size in bits; // a simple bitcast will not work, and we fall back to extractelement. const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); - const llvm_u32 = self.context.intType(32); + const llvm_u32 = Builder.Type.i32.toLlvm(&o.builder); const zero = llvm_usize.constNull(); const vector_len = operand_ty.arrayLen(mod); var i: u64 = 0; @@ -8401,7 +8417,7 @@ pub const FuncGen = struct { const array_llvm_ty = (try o.lowerType(operand_ty)).toLlvm(&o.builder); const elem_llvm_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); - const llvm_u32 = self.context.intType(32); + const llvm_u32 = Builder.Type.i32.toLlvm(&o.builder); const zero = llvm_usize.constNull(); const vector_len = operand_ty.arrayLen(mod); var vector = llvm_vector_ty.getUndef(); @@ -8427,7 +8443,7 @@ pub const FuncGen = struct { if (result_is_ref) { const alignment = @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)); - const result_ptr = self.buildAlloca(llvm_dest_ty, alignment); + const result_ptr = try self.buildAlloca(llvm_dest_ty, alignment); const store_inst = self.builder.buildStore(operand, result_ptr); store_inst.setAlignment(alignment); return result_ptr; @@ -8438,7 +8454,7 @@ pub const FuncGen = struct { // but LLVM won't let us bitcast struct values. // Therefore, we store operand to alloca, then load for result. const alignment = @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)); - const result_ptr = self.buildAlloca(llvm_dest_ty, alignment); + const result_ptr = try self.buildAlloca(llvm_dest_ty, alignment); const store_inst = self.builder.buildStore(operand, result_ptr); store_inst.setAlignment(alignment); const load_inst = self.builder.buildLoad(llvm_dest_ty, result_ptr, ""); @@ -8489,7 +8505,7 @@ pub const FuncGen = struct { _ = dib.insertDeclareAtEnd(arg_val, di_local_var, debug_loc, insert_block); } else if (o.module.comp.bin_file.options.optimize_mode == .Debug) { const alignment = inst_ty.abiAlignment(mod); - const alloca = self.buildAlloca(arg_val.typeOf(), alignment); + const alloca = try self.buildAlloca(arg_val.typeOf(), alignment); const store_inst = self.builder.buildStore(arg_val, alloca); store_inst.setAlignment(alignment); _ = dib.insertDeclareAtEnd(alloca, di_local_var, debug_loc, insert_block); @@ -8527,11 +8543,11 @@ pub const FuncGen = struct { /// Use this instead of builder.buildAlloca, because this function makes sure to /// put the alloca instruction at the top of the function! - fn buildAlloca(self: *FuncGen, llvm_ty: *llvm.Type, alignment: ?c_uint) *llvm.Value { + fn buildAlloca(self: *FuncGen, llvm_ty: *llvm.Type, alignment: ?c_uint) Allocator.Error!*llvm.Value { const o = self.dg.object; const mod = o.module; const target = mod.getTarget(); - return buildAllocaInner(self.context, self.builder, self.llvm_func, self.di_scope != null, llvm_ty, alignment, target); + return o.buildAllocaInner(self.builder, self.llvm_func, self.di_scope != null, llvm_ty, alignment, target); } fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value { @@ -8547,7 +8563,7 @@ pub const FuncGen = struct { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using // 0xaa or actual undefined for the fill byte. - const u8_llvm_ty = self.context.intType(8); + const u8_llvm_ty = Builder.Type.i8.toLlvm(&o.builder); const fill_byte = if (safety) u8_llvm_ty.constInt(0xaa, .False) else @@ -8558,7 +8574,7 @@ pub const FuncGen = struct { const dest_ptr_align = ptr_ty.ptrAlignment(mod); _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr(mod)); if (safety and mod.comp.bin_file.options.valgrind) { - self.valgrindMarkUndef(dest_ptr, len); + try self.valgrindMarkUndef(dest_ptr, len); } return null; } @@ -8609,7 +8625,7 @@ pub const FuncGen = struct { fn airTrap(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { _ = inst; - const llvm_fn = self.getIntrinsic("llvm.trap", &.{}); + const llvm_fn = try self.getIntrinsic("llvm.trap", &.{}); _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, undefined, 0, .Cold, .Auto, ""); _ = self.builder.buildUnreachable(); return null; @@ -8617,7 +8633,7 @@ pub const FuncGen = struct { fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { _ = inst; - const llvm_fn = self.getIntrinsic("llvm.debugtrap", &.{}); + const llvm_fn = try self.getIntrinsic("llvm.debugtrap", &.{}); _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, undefined, 0, .C, .Auto, ""); return null; } @@ -8633,8 +8649,8 @@ pub const FuncGen = struct { return llvm_usize.constNull(); } - const llvm_i32 = self.context.intType(32); - const llvm_fn = self.getIntrinsic("llvm.returnaddress", &.{}); + const llvm_i32 = Builder.Type.i32.toLlvm(&o.builder); + const llvm_fn = try self.getIntrinsic("llvm.returnaddress", &.{}); const params = [_]*llvm.Value{llvm_i32.constNull()}; const ptr_val = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .Fast, .Auto, ""); return self.builder.buildPtrToInt(ptr_val, llvm_usize, ""); @@ -8643,16 +8659,13 @@ pub const FuncGen = struct { fn airFrameAddress(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { _ = inst; const o = self.dg.object; - const llvm_i32 = self.context.intType(32); const llvm_fn_name = "llvm.frameaddress.p0"; const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: { - const llvm_p0i8 = self.context.pointerType(0); - const param_types = [_]*llvm.Type{llvm_i32}; - const fn_type = llvm.functionType(llvm_p0i8, ¶m_types, param_types.len, .False); - break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type); + const fn_type = try o.builder.fnType(.ptr, &.{.i32}, .normal); + break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type.toLlvm(&o.builder)); }; - const params = [_]*llvm.Value{llvm_i32.constNull()}; + const params = [_]*llvm.Value{Builder.Type.i32.toLlvm(&o.builder).constNull()}; const ptr_val = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .Fast, .Auto, ""); const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); return self.builder.buildPtrToInt(ptr_val, llvm_usize, ""); @@ -8841,9 +8854,8 @@ pub const FuncGen = struct { const dest_slice = try self.resolveInst(bin_op.lhs); const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = self.typeOf(bin_op.rhs); - const target = mod.getTarget(); const dest_ptr_align = ptr_ty.ptrAlignment(mod); - const u8_llvm_ty = self.context.intType(8); + const u8_llvm_ty = Builder.Type.i8.toLlvm(&o.builder); const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty); const is_volatile = ptr_ty.isVolatilePtr(mod); @@ -8864,7 +8876,7 @@ pub const FuncGen = struct { u8_llvm_ty.constInt(0xaa, .False) else u8_llvm_ty.getUndef(); - const len = self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); + const len = try self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); if (intrinsic_len0_traps) { try self.safeWasmMemset(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); } else { @@ -8872,7 +8884,7 @@ pub const FuncGen = struct { } if (safety and mod.comp.bin_file.options.valgrind) { - self.valgrindMarkUndef(dest_ptr, len); + try self.valgrindMarkUndef(dest_ptr, len); } return null; } @@ -8886,7 +8898,7 @@ pub const FuncGen = struct { .ty = Type.u8, .val = byte_val, }); - const len = self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); + const len = try self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); if (intrinsic_len0_traps) { try self.safeWasmMemset(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); @@ -8903,7 +8915,7 @@ pub const FuncGen = struct { if (elem_abi_size == 1) { // In this case we can take advantage of LLVM's intrinsic. const fill_byte = try self.bitCast(value, elem_ty, Type.u8); - const len = self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); + const len = try self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); if (intrinsic_len0_traps) { try self.safeWasmMemset(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); @@ -8934,7 +8946,7 @@ pub const FuncGen = struct { const body_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetBody"); const end_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetEnd"); - const llvm_usize_ty = self.context.intType(target.ptrBitWidth()); + const llvm_usize_ty = (try o.lowerType(Type.usize)).toLlvm(&o.builder); const len = switch (ptr_ty.ptrSize(mod)) { .Slice => self.builder.buildExtractValue(dest_slice, 1, ""), .One => llvm_usize_ty.constInt(ptr_ty.childType(mod).arrayLen(mod), .False), @@ -9008,7 +9020,7 @@ pub const FuncGen = struct { const src_slice = try self.resolveInst(bin_op.rhs); const src_ptr_ty = self.typeOf(bin_op.rhs); const src_ptr = self.sliceOrArrayPtr(src_slice, src_ptr_ty); - const len = self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty); + const len = try self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty); const dest_ptr = self.sliceOrArrayPtr(dest_slice, dest_ptr_ty); const is_volatile = src_ptr_ty.isVolatilePtr(mod) or dest_ptr_ty.isVolatilePtr(mod); @@ -9123,9 +9135,8 @@ pub const FuncGen = struct { const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); - const llvm_i1 = self.context.intType(1); - const operand_llvm_ty = (try o.lowerType(operand_ty)).toLlvm(&o.builder); - const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); + const llvm_i1 = Builder.Type.i1.toLlvm(&o.builder); + const fn_val = try self.getIntrinsic(llvm_fn_name, &.{try o.lowerType(operand_ty)}); const params = [_]*llvm.Value{ operand, llvm_i1.constNull() }; const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); @@ -9151,8 +9162,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const params = [_]*llvm.Value{operand}; - const operand_llvm_ty = (try o.lowerType(operand_ty)).toLlvm(&o.builder); - const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); + const fn_val = try self.getIntrinsic(llvm_fn_name, &.{try o.lowerType(operand_ty)}); const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); const result_ty = self.typeOfIndex(inst); @@ -9178,15 +9188,16 @@ pub const FuncGen = struct { assert(bits % 8 == 0); var operand = try self.resolveInst(ty_op.operand); - var operand_llvm_ty = (try o.lowerType(operand_ty)).toLlvm(&o.builder); + var operand_llvm_ty = try o.lowerType(operand_ty); if (bits % 16 == 8) { // If not an even byte-multiple, we need zero-extend + shift-left 1 byte // The truncated result at the end will be the correct bswap - const scalar_llvm_ty = self.context.intType(bits + 8); + const scalar_ty = try o.builder.intType(@intCast(bits + 8)); + const scalar_llvm_ty = scalar_ty.toLlvm(&o.builder); if (operand_ty.zigTypeTag(mod) == .Vector) { const vec_len = operand_ty.vectorLen(mod); - operand_llvm_ty = scalar_llvm_ty.vectorType(vec_len); + operand_llvm_ty = try o.builder.vectorType(.normal, vec_len, scalar_ty); const shifts = try self.gpa.alloc(*llvm.Value, vec_len); defer self.gpa.free(shifts); @@ -9196,18 +9207,18 @@ pub const FuncGen = struct { } const shift_vec = llvm.constVector(shifts.ptr, vec_len); - const extended = self.builder.buildZExt(operand, operand_llvm_ty, ""); + const extended = self.builder.buildZExt(operand, operand_llvm_ty.toLlvm(&o.builder), ""); operand = self.builder.buildShl(extended, shift_vec, ""); } else { const extended = self.builder.buildZExt(operand, scalar_llvm_ty, ""); operand = self.builder.buildShl(extended, scalar_llvm_ty.constInt(8, .False), ""); - operand_llvm_ty = scalar_llvm_ty; + operand_llvm_ty = scalar_ty; } bits = bits + 8; } const params = [_]*llvm.Value{operand}; - const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); + const fn_val = try self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); @@ -9252,7 +9263,7 @@ pub const FuncGen = struct { self.builder.positionBuilderAtEnd(end_block); - const llvm_type = self.context.intType(1); + const llvm_type = Builder.Type.i1.toLlvm(&o.builder); const incoming_values: [2]*llvm.Value = .{ llvm_type.constInt(1, .False), llvm_type.constInt(0, .False), }; @@ -9281,25 +9292,30 @@ pub const FuncGen = struct { // TODO: detect when the type changes and re-emit this function. const gop = try o.named_enum_map.getOrPut(o.gpa, enum_type.decl); - if (gop.found_existing) return gop.value_ptr.*; + if (gop.found_existing) return gop.value_ptr.toLlvm(&o.builder); errdefer assert(o.named_enum_map.remove(enum_type.decl)); - var arena_allocator = std.heap.ArenaAllocator.init(self.gpa); - defer arena_allocator.deinit(); - const arena = arena_allocator.allocator(); - const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod); - const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{}", .{fqn.fmt(&mod.intern_pool)}); - - const param_types = [_]*llvm.Type{(try o.lowerType(enum_type.tag_ty.toType())).toLlvm(&o.builder)}; + const llvm_fn_name = try o.builder.fmt("__zig_is_named_enum_value_{}", .{ + fqn.fmt(&mod.intern_pool), + }); - const llvm_ret_ty = (try o.lowerType(Type.bool)).toLlvm(&o.builder); - const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False); - const fn_val = o.llvm_module.addFunction(llvm_fn_name, fn_type); + const fn_type = try o.builder.fnType(.i1, &.{try o.lowerType( + enum_type.tag_ty.toType(), + )}, .normal); + const fn_val = o.llvm_module.addFunction(llvm_fn_name.toSlice(&o.builder).?, fn_type.toLlvm(&o.builder)); fn_val.setLinkage(.Internal); fn_val.setFunctionCallConv(.Fast); o.addCommonFnAttributes(fn_val); - gop.value_ptr.* = fn_val; + + var global = Builder.Global{ + .linkage = .internal, + .type = fn_type, + .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) }, + }; + var function = Builder.Function{ + .global = @enumFromInt(o.builder.globals.count()), + }; const prev_block = self.builder.getInsertBlock(); const prev_debug_location = self.builder.getCurrentDebugLocation2(); @@ -9330,10 +9346,15 @@ pub const FuncGen = struct { switch_instr.addCase(this_tag_int_value, named_block); } self.builder.positionBuilderAtEnd(named_block); - _ = self.builder.buildRet(self.context.intType(1).constInt(1, .False)); + _ = self.builder.buildRet(Builder.Type.i1.toLlvm(&o.builder).constInt(1, .False)); self.builder.positionBuilderAtEnd(unnamed_block); - _ = self.builder.buildRet(self.context.intType(1).constInt(0, .False)); + _ = self.builder.buildRet(Builder.Type.i1.toLlvm(&o.builder).constInt(0, .False)); + + try o.builder.llvm_globals.append(self.gpa, fn_val); + _ = try o.builder.addGlobal(llvm_fn_name, global); + try o.builder.functions.append(self.gpa, function); + gop.value_ptr.* = global.kind.function; return fn_val; } @@ -9361,20 +9382,22 @@ pub const FuncGen = struct { const llvm_fn_name = try o.builder.fmt("__zig_tag_name_{}", .{fqn.fmt(&mod.intern_pool)}); const slice_ty = Type.slice_const_u8_sentinel_0; - const llvm_ret_ty = (try o.lowerType(slice_ty)).toLlvm(&o.builder); + const ret_ty = try o.lowerType(slice_ty); + const llvm_ret_ty = ret_ty.toLlvm(&o.builder); const usize_llvm_ty = (try o.lowerType(Type.usize)).toLlvm(&o.builder); const slice_alignment = slice_ty.abiAlignment(mod); - const param_types = [_]*llvm.Type{(try o.lowerType(enum_type.tag_ty.toType())).toLlvm(&o.builder)}; - - const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False); - const fn_val = o.llvm_module.addFunction(llvm_fn_name.toSlice(&o.builder).?, fn_type); + const fn_type = try o.builder.fnType(ret_ty, &.{ + try o.lowerType(enum_type.tag_ty.toType()), + }, .normal); + const fn_val = o.llvm_module.addFunction(llvm_fn_name.toSlice(&o.builder).?, fn_type.toLlvm(&o.builder)); fn_val.setLinkage(.Internal); fn_val.setFunctionCallConv(.Fast); o.addCommonFnAttributes(fn_val); var global = Builder.Global{ - .type = try o.builder.fnType(.void, &.{}, .normal), + .linkage = .internal, + .type = fn_type, .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) }, }; var function = Builder.Function{ @@ -9457,15 +9480,24 @@ pub const FuncGen = struct { // Function signature: fn (anyerror) bool - const ret_llvm_ty = (try o.lowerType(Type.bool)).toLlvm(&o.builder); - const anyerror_llvm_ty = (try o.lowerType(Type.anyerror)).toLlvm(&o.builder); - const param_types = [_]*llvm.Type{anyerror_llvm_ty}; + const fn_type = try o.builder.fnType(.i1, &.{Builder.Type.err_int}, .normal); + const llvm_fn = o.llvm_module.addFunction(lt_errors_fn_name, fn_type.toLlvm(&o.builder)); + + var global = Builder.Global{ + .type = fn_type, + .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) }, + }; + var function = Builder.Function{ + .global = @enumFromInt(o.builder.globals.count()), + }; - const fn_type = llvm.functionType(ret_llvm_ty, ¶m_types, param_types.len, .False); - const llvm_fn = o.llvm_module.addFunction(lt_errors_fn_name, fn_type); llvm_fn.setLinkage(.Internal); llvm_fn.setFunctionCallConv(.Fast); o.addCommonFnAttributes(llvm_fn); + + try o.builder.llvm_globals.append(self.gpa, llvm_fn); + _ = try o.builder.addGlobal(try o.builder.string(lt_errors_fn_name), global); + try o.builder.functions.append(self.gpa, function); return llvm_fn; } @@ -9523,7 +9555,7 @@ pub const FuncGen = struct { const values = try self.gpa.alloc(*llvm.Value, mask_len); defer self.gpa.free(values); - const llvm_i32 = self.context.intType(32); + const llvm_i32 = Builder.Type.i32.toLlvm(&o.builder); for (values, 0..) |*val, i| { const elem = try mask.elemValue(mod, i); @@ -9565,9 +9597,9 @@ pub const FuncGen = struct { const llvm_result_ty = accum_init.typeOf(); // Allocate and initialize our mutable variables - const i_ptr = self.buildAlloca(llvm_usize_ty, null); + const i_ptr = try self.buildAlloca(llvm_usize_ty, null); _ = self.builder.buildStore(llvm_usize_ty.constInt(0, .False), i_ptr); - const accum_ptr = self.buildAlloca(llvm_result_ty, null); + const accum_ptr = try self.buildAlloca(llvm_result_ty, null); _ = self.builder.buildStore(accum_init, accum_ptr); // Setup the loop @@ -9656,27 +9688,25 @@ pub const FuncGen = struct { // Reduction could not be performed with intrinsics. // Use a manual loop over a softfloat call instead. - var fn_name_buf: [64]u8 = undefined; const float_bits = scalar_ty.floatBits(target); const fn_name = switch (reduce.operation) { - .Min => std.fmt.bufPrintZ(&fn_name_buf, "{s}fmin{s}", .{ + .Min => try o.builder.fmt("{s}fmin{s}", .{ libcFloatPrefix(float_bits), libcFloatSuffix(float_bits), - }) catch unreachable, - .Max => std.fmt.bufPrintZ(&fn_name_buf, "{s}fmax{s}", .{ + }), + .Max => try o.builder.fmt("{s}fmax{s}", .{ libcFloatPrefix(float_bits), libcFloatSuffix(float_bits), - }) catch unreachable, - .Add => std.fmt.bufPrintZ(&fn_name_buf, "__add{s}f3", .{ + }), + .Add => try o.builder.fmt("__add{s}f3", .{ compilerRtFloatAbbrev(float_bits), - }) catch unreachable, - .Mul => std.fmt.bufPrintZ(&fn_name_buf, "__mul{s}f3", .{ + }), + .Mul => try o.builder.fmt("__mul{s}f3", .{ compilerRtFloatAbbrev(float_bits), - }) catch unreachable, + }), else => unreachable, }; - const param_llvm_ty = (try o.lowerType(scalar_ty)).toLlvm(&o.builder); - const param_types = [2]*llvm.Type{ param_llvm_ty, param_llvm_ty }; - const libc_fn = try self.getLibcFunction(fn_name, ¶m_types, param_llvm_ty); + const param_llvm_ty = try o.lowerType(scalar_ty); + const libc_fn = try self.getLibcFunction(fn_name, &(.{param_llvm_ty} ** 2), param_llvm_ty); const init_value = try o.lowerValue(.{ .ty = scalar_ty, .val = try mod.floatValue(scalar_ty, switch (reduce.operation) { @@ -9701,7 +9731,7 @@ pub const FuncGen = struct { switch (result_ty.zigTypeTag(mod)) { .Vector => { - const llvm_u32 = self.context.intType(32); + const llvm_u32 = Builder.Type.i32.toLlvm(&o.builder); var vector = llvm_result_ty.getUndef(); for (elements, 0..) |elem, i| { @@ -9716,7 +9746,7 @@ pub const FuncGen = struct { const struct_obj = mod.typeToStruct(result_ty).?; assert(struct_obj.haveLayout()); const big_bits = struct_obj.backing_int_ty.bitSize(mod); - const int_llvm_ty = self.context.intType(@as(c_uint, @intCast(big_bits))); + const int_llvm_ty = (try o.builder.intType(@intCast(big_bits))).toLlvm(&o.builder); const fields = struct_obj.fields.values(); comptime assert(Type.packed_struct_layout_version == 2); var running_int: *llvm.Value = int_llvm_ty.constNull(); @@ -9727,7 +9757,7 @@ pub const FuncGen = struct { const non_int_val = try self.resolveInst(elem); const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod))); - const small_int_ty = self.context.intType(ty_bit_size); + const small_int_ty = (try o.builder.intType(@intCast(ty_bit_size))).toLlvm(&o.builder); const small_int_val = if (field.ty.isPtrAtRuntime(mod)) self.builder.buildPtrToInt(non_int_val, small_int_ty, "") else @@ -9745,10 +9775,10 @@ pub const FuncGen = struct { } if (isByRef(result_ty, mod)) { - const llvm_u32 = self.context.intType(32); + const llvm_u32 = Builder.Type.i32.toLlvm(&o.builder); // TODO in debug builds init to undef so that the padding will be 0xaa // even if we fully populate the fields. - const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); + const alloca_inst = try self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); var indices: [2]*llvm.Value = .{ llvm_u32.constNull(), undefined }; for (elements, 0..) |elem, i| { @@ -9786,7 +9816,7 @@ pub const FuncGen = struct { assert(isByRef(result_ty, mod)); const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); - const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); + const alloca_inst = try self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); const array_info = result_ty.arrayInfo(mod); const elem_ptr_ty = try mod.ptrType(.{ @@ -9834,11 +9864,11 @@ pub const FuncGen = struct { if (union_obj.layout == .Packed) { const big_bits = union_ty.bitSize(mod); - const int_llvm_ty = self.context.intType(@as(c_uint, @intCast(big_bits))); + const int_llvm_ty = (try o.builder.intType(@intCast(big_bits))).toLlvm(&o.builder); const field = union_obj.fields.values()[extra.field_index]; const non_int_val = try self.resolveInst(extra.init); const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod))); - const small_int_ty = self.context.intType(ty_bit_size); + const small_int_ty = (try o.builder.intType(@intCast(ty_bit_size))).toLlvm(&o.builder); const small_int_val = if (field.ty.isPtrAtRuntime(mod)) self.builder.buildPtrToInt(non_int_val, small_int_ty, "") else @@ -9866,51 +9896,47 @@ pub const FuncGen = struct { // necessarily match the format that we need, depending on which tag is active. // We must construct the correct unnamed struct type here, in order to then set // the fields appropriately. - const result_ptr = self.buildAlloca(union_llvm_ty, layout.abi_align); + const result_ptr = try self.buildAlloca(union_llvm_ty, layout.abi_align); const llvm_payload = try self.resolveInst(extra.init); assert(union_obj.haveFieldTypes()); const field = union_obj.fields.values()[extra.field_index]; - const field_llvm_ty = (try o.lowerType(field.ty)).toLlvm(&o.builder); + const field_llvm_ty = try o.lowerType(field.ty); const field_size = field.ty.abiSize(mod); const field_align = field.normalAlignment(mod); - const llvm_union_ty = t: { - const payload = p: { + const llvm_union_ty = (t: { + const payload_ty = p: { if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) { - const padding_len = @as(c_uint, @intCast(layout.payload_size)); - break :p self.context.intType(8).arrayType(padding_len); + const padding_len = layout.payload_size; + break :p try o.builder.arrayType(padding_len, .i8); } if (field_size == layout.payload_size) { break :p field_llvm_ty; } - const padding_len = @as(c_uint, @intCast(layout.payload_size - field_size)); - const fields: [2]*llvm.Type = .{ - field_llvm_ty, self.context.intType(8).arrayType(padding_len), - }; - break :p self.context.structType(&fields, fields.len, .True); + const padding_len = layout.payload_size - field_size; + break :p try o.builder.structType(.@"packed", &.{ + field_llvm_ty, try o.builder.arrayType(padding_len, .i8), + }); }; - if (layout.tag_size == 0) { - const fields: [1]*llvm.Type = .{payload}; - break :t self.context.structType(&fields, fields.len, .False); - } - const tag_llvm_ty = (try o.lowerType(union_obj.tag_ty)).toLlvm(&o.builder); - var fields: [3]*llvm.Type = undefined; - var fields_len: c_uint = 2; + if (layout.tag_size == 0) break :t try o.builder.structType(.normal, &.{payload_ty}); + const tag_ty = try o.lowerType(union_obj.tag_ty); + var fields: [3]Builder.Type = undefined; + var fields_len: usize = 2; if (layout.tag_align >= layout.payload_align) { - fields = .{ tag_llvm_ty, payload, undefined }; + fields = .{ tag_ty, payload_ty, undefined }; } else { - fields = .{ payload, tag_llvm_ty, undefined }; + fields = .{ payload_ty, tag_ty, undefined }; } if (layout.padding != 0) { - fields[2] = self.context.intType(8).arrayType(layout.padding); - fields_len = 3; + fields[fields_len] = try o.builder.arrayType(layout.padding, .i8); + fields_len += 1; } - break :t self.context.structType(&fields, fields_len, .False); - }; + break :t try o.builder.structType(.normal, fields[0..fields_len]); + }).toLlvm(&o.builder); // Now we follow the layout as expressed above with GEP instructions to set the // tag and the payload. - const index_type = self.context.intType(32); + const index_type = Builder.Type.i32.toLlvm(&o.builder); const field_ptr_ty = try mod.ptrType(.{ .child = field.ty.toIntern(), @@ -9996,22 +10022,16 @@ pub const FuncGen = struct { .data => {}, } - const llvm_ptr_u8 = self.context.pointerType(0); - const llvm_u32 = self.context.intType(32); - const llvm_fn_name = "llvm.prefetch.p0"; const fn_val = o.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: { // declare void @llvm.prefetch(i8*, i32, i32, i32) - const llvm_void = self.context.voidType(); - const param_types = [_]*llvm.Type{ - llvm_ptr_u8, llvm_u32, llvm_u32, llvm_u32, - }; - const fn_type = llvm.functionType(llvm_void, ¶m_types, param_types.len, .False); - break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type); + const fn_type = try o.builder.fnType(.void, &.{ .ptr, .i32, .i32, .i32 }, .normal); + break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type.toLlvm(&o.builder)); }; const ptr = try self.resolveInst(prefetch.ptr); + const llvm_u32 = Builder.Type.i32.toLlvm(&o.builder); const params = [_]*llvm.Value{ ptr, llvm_u32.constInt(@intFromEnum(prefetch.rw), .False), @@ -10033,7 +10053,7 @@ pub const FuncGen = struct { } fn amdgcnWorkIntrinsic(self: *FuncGen, dimension: u32, default: u32, comptime basename: []const u8) !?*llvm.Value { - const llvm_u32 = self.context.intType(32); + const llvm_u32 = Builder.Type.i32.toLlvm(&self.dg.object.builder); const llvm_fn_name = switch (dimension) { 0 => basename ++ ".x", @@ -10043,7 +10063,7 @@ pub const FuncGen = struct { }; const args: [0]*llvm.Value = .{}; - const llvm_fn = self.getIntrinsic(llvm_fn_name, &.{}); + const llvm_fn = try self.getIntrinsic(llvm_fn_name, &.{}); return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); } @@ -10064,14 +10084,14 @@ pub const FuncGen = struct { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const dimension = pl_op.payload; - const llvm_u32 = self.context.intType(32); + const llvm_u32 = Builder.Type.i32.toLlvm(&o.builder); if (dimension >= 3) { return llvm_u32.constInt(1, .False); } // Fetch the dispatch pointer, which points to this structure: // https://github.com/RadeonOpenCompute/ROCR-Runtime/blob/adae6c61e10d371f7cbc3d0e94ae2c070cab18a4/src/inc/hsa.h#L2913 - const llvm_fn = self.getIntrinsic("llvm.amdgcn.dispatch.ptr", &.{}); + const llvm_fn = try self.getIntrinsic("llvm.amdgcn.dispatch.ptr", &.{}); const args: [0]*llvm.Value = .{}; const dispatch_ptr = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); dispatch_ptr.setAlignment(4); @@ -10080,7 +10100,7 @@ pub const FuncGen = struct { // Just treat the dispatch pointer as an array of u16 to keep things simple. const offset = 2 + dimension; const index = [_]*llvm.Value{llvm_u32.constInt(offset, .False)}; - const llvm_u16 = self.context.intType(16); + const llvm_u16 = Builder.Type.i16.toLlvm(&o.builder); const workgroup_size_ptr = self.builder.buildInBoundsGEP(llvm_u16, dispatch_ptr, &index, index.len, ""); const workgroup_size = self.builder.buildLoad(llvm_u16, workgroup_size_ptr, ""); workgroup_size.setAlignment(2); @@ -10126,7 +10146,7 @@ pub const FuncGen = struct { opt_handle: *llvm.Value, is_by_ref: bool, ) *llvm.Value { - const non_null_llvm_ty = self.context.intType(8); + const non_null_llvm_ty = Builder.Type.i8.toLlvm(&self.dg.object.builder); const field = b: { if (is_by_ref) { const field_ptr = self.builder.buildStructGEP(opt_llvm_ty, opt_handle, 1, ""); @@ -10180,12 +10200,12 @@ pub const FuncGen = struct { ) !?*llvm.Value { const o = self.dg.object; const optional_llvm_ty = (try o.lowerType(optional_ty)).toLlvm(&o.builder); - const non_null_field = self.builder.buildZExt(non_null_bit, self.context.intType(8), ""); + const non_null_field = self.builder.buildZExt(non_null_bit, Builder.Type.i8.toLlvm(&o.builder), ""); const mod = o.module; if (isByRef(optional_ty, mod)) { const payload_alignment = optional_ty.abiAlignment(mod); - const alloca_inst = self.buildAlloca(optional_llvm_ty, payload_alignment); + const alloca_inst = try self.buildAlloca(optional_llvm_ty, payload_alignment); { const field_ptr = self.builder.buildStructGEP(optional_llvm_ty, alloca_inst, 0, ""); @@ -10233,7 +10253,7 @@ pub const FuncGen = struct { // Offset our operand pointer by the correct number of bytes. const byte_offset = struct_ty.packedStructFieldByteOffset(field_index, mod); if (byte_offset == 0) return struct_ptr; - const byte_llvm_ty = self.context.intType(8); + const byte_llvm_ty = Builder.Type.i8.toLlvm(&o.builder); const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); const llvm_index = llvm_usize.constInt(byte_offset, .False); const indices: [1]*llvm.Value = .{llvm_index}; @@ -10249,7 +10269,7 @@ pub const FuncGen = struct { // end of the struct. Treat our struct pointer as an array of two and get // the index to the element at index `1` to get a pointer to the end of // the struct. - const llvm_u32 = self.context.intType(32); + const llvm_u32 = Builder.Type.i32.toLlvm(&o.builder); const llvm_index = llvm_u32.constInt(@intFromBool(struct_ty.hasRuntimeBitsIgnoreComptime(mod)), .False); const indices: [1]*llvm.Value = .{llvm_index}; return self.builder.buildInBoundsGEP(struct_llvm_ty, struct_ptr, &indices, indices.len, ""); @@ -10268,11 +10288,14 @@ pub const FuncGen = struct { } } - fn getIntrinsic(fg: *FuncGen, name: []const u8, types: []const *llvm.Type) *llvm.Value { + fn getIntrinsic(fg: *FuncGen, name: []const u8, types: []const Builder.Type) Allocator.Error!*llvm.Value { + const o = fg.dg.object; const id = llvm.lookupIntrinsicID(name.ptr, name.len); assert(id != 0); - const o = fg.dg.object; - return o.llvm_module.getIntrinsicDeclaration(id, types.ptr, types.len); + const llvm_types = try o.gpa.alloc(*llvm.Type, types.len); + defer o.gpa.free(llvm_types); + for (llvm_types, types) |*llvm_type, ty| llvm_type.* = ty.toLlvm(&o.builder); + return o.llvm_module.getIntrinsicDeclaration(id, llvm_types.ptr, llvm_types.len); } /// Load a by-ref type by constructing a new alloca and performing a memcpy. @@ -10287,8 +10310,8 @@ pub const FuncGen = struct { const mod = o.module; const pointee_llvm_ty = (try o.lowerType(pointee_type)).toLlvm(&o.builder); const result_align = @max(ptr_alignment, pointee_type.abiAlignment(mod)); - const result_ptr = fg.buildAlloca(pointee_llvm_ty, result_align); - const llvm_usize = fg.context.intType(Type.usize.intInfo(mod).bits); + const result_ptr = try fg.buildAlloca(pointee_llvm_ty, result_align); + const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); const size_bytes = pointee_type.abiSize(mod); _ = fg.builder.buildMemCpy( result_ptr, @@ -10317,7 +10340,7 @@ pub const FuncGen = struct { assert(info.flags.vector_index != .runtime); if (info.flags.vector_index != .none) { - const index_u32 = self.context.intType(32).constInt(@intFromEnum(info.flags.vector_index), .False); + const index_u32 = Builder.Type.i32.toLlvm(&o.builder).constInt(@intFromEnum(info.flags.vector_index), .False); const vec_elem_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); const vec_ty = vec_elem_ty.vectorType(info.packed_offset.host_size); @@ -10339,7 +10362,7 @@ pub const FuncGen = struct { return llvm_inst; } - const int_elem_ty = self.context.intType(info.packed_offset.host_size * 8); + const int_elem_ty = (try o.builder.intType(@intCast(info.packed_offset.host_size * 8))).toLlvm(&o.builder); const containing_int = self.builder.buildLoad(int_elem_ty, ptr, ""); containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); @@ -10351,9 +10374,9 @@ pub const FuncGen = struct { if (isByRef(elem_ty, mod)) { const result_align = elem_ty.abiAlignment(mod); - const result_ptr = self.buildAlloca(elem_llvm_ty, result_align); + const result_ptr = try self.buildAlloca(elem_llvm_ty, result_align); - const same_size_int = self.context.intType(elem_bits); + const same_size_int = (try o.builder.intType(@intCast(elem_bits))).toLlvm(&o.builder); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); const store_inst = self.builder.buildStore(truncated_int, result_ptr); store_inst.setAlignment(result_align); @@ -10361,13 +10384,13 @@ pub const FuncGen = struct { } if (elem_ty.zigTypeTag(mod) == .Float or elem_ty.zigTypeTag(mod) == .Vector) { - const same_size_int = self.context.intType(elem_bits); + const same_size_int = (try o.builder.intType(@intCast(elem_bits))).toLlvm(&o.builder); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); } if (elem_ty.isPtrAtRuntime(mod)) { - const same_size_int = self.context.intType(elem_bits); + const same_size_int = (try o.builder.intType(@intCast(elem_bits))).toLlvm(&o.builder); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); } @@ -10394,7 +10417,7 @@ pub const FuncGen = struct { assert(info.flags.vector_index != .runtime); if (info.flags.vector_index != .none) { - const index_u32 = self.context.intType(32).constInt(@intFromEnum(info.flags.vector_index), .False); + const index_u32 = Builder.Type.i32.toLlvm(&o.builder).constInt(@intFromEnum(info.flags.vector_index), .False); const vec_elem_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); const vec_ty = vec_elem_ty.vectorType(info.packed_offset.host_size); @@ -10412,7 +10435,7 @@ pub const FuncGen = struct { } if (info.packed_offset.host_size != 0) { - const int_elem_ty = self.context.intType(info.packed_offset.host_size * 8); + const int_elem_ty = (try o.builder.intType(@intCast(info.packed_offset.host_size * 8))).toLlvm(&o.builder); const containing_int = self.builder.buildLoad(int_elem_ty, ptr, ""); assert(ordering == .NotAtomic); containing_int.setAlignment(ptr_alignment); @@ -10422,7 +10445,7 @@ pub const FuncGen = struct { const shift_amt = containing_int_ty.constInt(info.packed_offset.bit_offset, .False); // Convert to equally-sized integer type in order to perform the bit // operations on the value to store - const value_bits_type = self.context.intType(elem_bits); + const value_bits_type = (try o.builder.intType(@intCast(elem_bits))).toLlvm(&o.builder); const value_bits = if (elem_ty.isPtrAtRuntime(mod)) self.builder.buildPtrToInt(elem, value_bits_type, "") else @@ -10458,20 +10481,19 @@ pub const FuncGen = struct { ptr_alignment, elem, elem_ty.abiAlignment(mod), - self.context.intType(Type.usize.intInfo(mod).bits).constInt(size_bytes, .False), + (try o.lowerType(Type.usize)).toLlvm(&o.builder).constInt(size_bytes, .False), info.flags.is_volatile, ); } - fn valgrindMarkUndef(fg: *FuncGen, ptr: *llvm.Value, len: *llvm.Value) void { + fn valgrindMarkUndef(fg: *FuncGen, ptr: *llvm.Value, len: *llvm.Value) Allocator.Error!void { const VG_USERREQ__MAKE_MEM_UNDEFINED = 1296236545; const o = fg.dg.object; - const target = o.module.getTarget(); - const usize_llvm_ty = fg.context.intType(target.ptrBitWidth()); + const usize_llvm_ty = (try o.lowerType(Type.usize)).toLlvm(&o.builder); const zero = usize_llvm_ty.constInt(0, .False); const req = usize_llvm_ty.constInt(VG_USERREQ__MAKE_MEM_UNDEFINED, .False); const ptr_as_usize = fg.builder.buildPtrToInt(ptr, usize_llvm_ty, ""); - _ = valgrindClientRequest(fg, zero, req, ptr_as_usize, len, zero, zero, zero); + _ = try valgrindClientRequest(fg, zero, req, ptr_as_usize, len, zero, zero, zero); } fn valgrindClientRequest( @@ -10483,18 +10505,19 @@ pub const FuncGen = struct { a3: *llvm.Value, a4: *llvm.Value, a5: *llvm.Value, - ) *llvm.Value { + ) Allocator.Error!*llvm.Value { const o = fg.dg.object; const mod = o.module; const target = mod.getTarget(); if (!target_util.hasValgrindSupport(target)) return default_value; - const usize_llvm_ty = fg.context.intType(target.ptrBitWidth()); + const usize_ty = try o.lowerType(Type.usize); + const usize_llvm_ty = usize_ty.toLlvm(&o.builder); const usize_alignment = @as(c_uint, @intCast(Type.usize.abiSize(mod))); const array_llvm_ty = usize_llvm_ty.arrayType(6); const array_ptr = fg.valgrind_client_request_array orelse a: { - const array_ptr = fg.buildAlloca(array_llvm_ty, usize_alignment); + const array_ptr = try fg.buildAlloca(array_llvm_ty, usize_alignment); fg.valgrind_client_request_array = array_ptr; break :a array_ptr; }; @@ -10540,10 +10563,9 @@ pub const FuncGen = struct { else => unreachable, }; + const fn_llvm_ty = (try o.builder.fnType(usize_ty, &(.{usize_ty} ** 2), .normal)).toLlvm(&o.builder); const array_ptr_as_usize = fg.builder.buildPtrToInt(array_ptr, usize_llvm_ty, ""); const args = [_]*llvm.Value{ array_ptr_as_usize, default_value }; - const param_types = [_]*llvm.Type{ usize_llvm_ty, usize_llvm_ty }; - const fn_llvm_ty = llvm.functionType(usize_llvm_ty, ¶m_types, args.len, .False); const asm_fn = llvm.getInlineAsm( fn_llvm_ty, arch_specific.template.ptr, @@ -11200,7 +11222,6 @@ const ParamTypeIterator = struct { llvm_index: u32, types_len: u32, types_buffer: [8]Builder.Type, - llvm_types_buffer: [8]*llvm.Type, byval_attr: bool, const Lowering = union(enum) { @@ -11298,7 +11319,6 @@ const ParamTypeIterator = struct { .integer => { it.types_len = 1; it.types_buffer[0] = .i64; - it.llvm_types_buffer[0] = it.types_buffer[0].toLlvm(&it.object.builder); return .multiple_llvm_types; }, .double_integer => return Lowering{ .i64_array = 2 }, @@ -11408,31 +11428,22 @@ const ParamTypeIterator = struct { } var types_index: u32 = 0; var types_buffer: [8]Builder.Type = undefined; - var llvm_types_buffer: [8]*llvm.Type = undefined; for (classes) |class| { switch (class) { .integer => { types_buffer[types_index] = .i64; - llvm_types_buffer[types_index] = - types_buffer[types_index].toLlvm(&it.object.builder); types_index += 1; }, .sse, .sseup => { types_buffer[types_index] = .double; - llvm_types_buffer[types_index] = - types_buffer[types_index].toLlvm(&it.object.builder); types_index += 1; }, .float => { types_buffer[types_index] = .float; - llvm_types_buffer[types_index] = - types_buffer[types_index].toLlvm(&it.object.builder); types_index += 1; }, .float_combine => { types_buffer[types_index] = try it.object.builder.vectorType(.normal, 2, .float); - llvm_types_buffer[types_index] = - types_buffer[types_index].toLlvm(&it.object.builder); types_index += 1; }, .x87 => { @@ -11457,7 +11468,6 @@ const ParamTypeIterator = struct { } it.types_len = types_index; it.types_buffer = types_buffer; - it.llvm_types_buffer = llvm_types_buffer; it.llvm_index += types_index; it.zig_index += 1; return .multiple_llvm_types; @@ -11472,7 +11482,6 @@ fn iterateParamTypes(object: *Object, fn_info: InternPool.Key.FuncType) ParamTyp .llvm_index = 0, .types_len = 0, .types_buffer = undefined, - .llvm_types_buffer = undefined, .byval_attr = false, }; } @@ -11740,51 +11749,6 @@ fn compilerRtIntBits(bits: u16) u16 { return bits; } -fn buildAllocaInner( - context: *llvm.Context, - builder: *llvm.Builder, - llvm_func: *llvm.Value, - di_scope_non_null: bool, - llvm_ty: *llvm.Type, - maybe_alignment: ?c_uint, - target: std.Target, -) *llvm.Value { - const address_space = llvmAllocaAddressSpace(target); - - const alloca = blk: { - const prev_block = builder.getInsertBlock(); - const prev_debug_location = builder.getCurrentDebugLocation2(); - defer { - builder.positionBuilderAtEnd(prev_block); - if (di_scope_non_null) { - builder.setCurrentDebugLocation2(prev_debug_location); - } - } - - const entry_block = llvm_func.getFirstBasicBlock().?; - if (entry_block.getFirstInstruction()) |first_inst| { - builder.positionBuilder(entry_block, first_inst); - } else { - builder.positionBuilderAtEnd(entry_block); - } - builder.clearCurrentDebugLocation(); - - break :blk builder.buildAllocaInAddressSpace(llvm_ty, @intFromEnum(address_space), ""); - }; - - if (maybe_alignment) |alignment| { - alloca.setAlignment(alignment); - } - - // The pointer returned from this function should have the generic address space, - // if this isn't the case then cast it to the generic address space. - if (address_space != .default) { - return builder.buildAddrSpaceCast(alloca, context.pointerType(llvm.address_space.default), ""); - } - - return alloca; -} - fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u1 { return @intFromBool(Type.anyerror.abiAlignment(mod) > payload_ty.abiAlignment(mod)); } diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 891450165d..91735c0fe0 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -1320,29 +1320,47 @@ pub fn dump(self: *Builder, writer: anytype) @TypeOf(writer).Error!void { try writer.writeByte('\n'); for (self.functions.items) |function| { const global = self.globals.entries.get(@intFromEnum(function.global)); + const item = self.type_items.items[@intFromEnum(global.value.type)]; + const extra = self.typeExtraDataTrail(Type.Function, item.data); + const params: []const Type = + @ptrCast(self.type_extra.items[extra.end..][0..extra.data.params_len]); try writer.print( - \\{s} {}{}{}{}{<}@{}{>} {}{}{{ - \\ ret {%} - \\}} - \\ + \\{s} {}{}{}{}{} @{}( , .{ if (function.body) |_| "define" else "declare", global.value.linkage, global.value.preemption, global.value.visibility, global.value.dll_storage_class, - global.value.type.fmt(self), + extra.data.ret.fmt(self), global.key.fmt(self), - global.value.type.fmt(self), + }); + for (params, 0..) |param, index| { + if (index > 0) try writer.writeAll(", "); + try writer.print("{%} %{d}", .{ param.fmt(self), index }); + } + switch (item.tag) { + .function => {}, + .vararg_function => { + if (params.len > 0) try writer.writeAll(", "); + try writer.writeAll("..."); + }, + else => unreachable, + } + try writer.print(") {}{}", .{ global.value.unnamed_addr, global.value.alignment, - self.typeExtraData( - Type.Function, - self.type_items.items[@intFromEnum(global.value.type)].data, - ).ret.fmt(self), }); + if (function.body) |_| try writer.print( + \\{{ + \\ ret {%} + \\}} + \\ + , .{ + extra.data.ret.fmt(self), + }); + try writer.writeByte('\n'); } - try writer.writeByte('\n'); } inline fn useLibLlvm(self: *const Builder) bool { -- cgit v1.2.3 From 2cb52235b91f7e4bf5a4ebf77a5008adfc30c8b9 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 8 Jul 2023 23:13:06 -0400 Subject: llvm: convert all calls to `constInt` --- src/codegen/llvm.zig | 636 ++++++++++++++++++----------------------- src/codegen/llvm/Builder.zig | 646 ++++++++++++++++++++++++++++++++++++------ src/codegen/llvm/bindings.zig | 6 + 3 files changed, 834 insertions(+), 454 deletions(-) (limited to 'src') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index e7aaa05864..b4bde977dd 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -611,7 +611,7 @@ pub const Object = struct { }; errdefer builder.llvm_context.dispose(); - initializeLLVMTarget(options.target.cpu.arch); + builder.initializeLLVMTarget(options.target.cpu.arch); builder.llvm_module = llvm.Module.createWithName(options.root_name.ptr, builder.llvm_context); errdefer builder.llvm_module.dispose(); @@ -832,7 +832,7 @@ pub const Object = struct { const slice_fields = [_]*llvm.Value{ str_global, - llvm_usize_ty.toLlvm(&o.builder).constInt(name.len, .False), + (try o.builder.intConst(llvm_usize_ty, name.len)).toLlvm(&o.builder), }; llvm_error.* = llvm_slice_ty.constNamedStruct(&slice_fields, slice_fields.len); } @@ -874,8 +874,8 @@ pub const Object = struct { // } const lhs = llvm_fn.getParam(0); - const rhs = lhs.typeOf().constInt(errors_len, .False); - const is_lt = builder.buildICmp(.ULT, lhs, rhs, ""); + const rhs = try object.builder.intConst(Builder.Type.err_int, errors_len); + const is_lt = builder.buildICmp(.ULT, lhs, rhs.toLlvm(&object.builder), ""); _ = builder.buildRet(is_lt); } @@ -3474,10 +3474,8 @@ pub const Object = struct { .@"unreachable", .generic_poison, => unreachable, // non-runtime values - .false, .true => { - const llvm_type = (try o.lowerType(tv.ty)).toLlvm(&o.builder); - return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(); - }, + .false => return Builder.Constant.false.toLlvm(&o.builder), + .true => return Builder.Constant.true.toLlvm(&o.builder), }, .variable, .enum_literal, @@ -3503,9 +3501,9 @@ pub const Object = struct { return lowerBigInt(o, tv.ty, bigint); }, .err => |err| { - const llvm_ty = Builder.Type.err_int.toLlvm(&o.builder); const int = try mod.getErrorValue(err.name); - return llvm_ty.constInt(int, .False); + const llvm_int = try o.builder.intConst(Builder.Type.err_int, int); + return llvm_int.toLlvm(&o.builder); }, .error_union => |error_union| { const err_tv: TypedValue = switch (error_union.val) { @@ -3556,79 +3554,33 @@ pub const Object = struct { return o.context.constStruct(&fields_buf, llvm_field_count, .False); } }, - .enum_tag => { - const int_val = try tv.intFromEnum(mod); - - var bigint_space: Value.BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_space, mod); - - const int_info = tv.ty.intInfo(mod); - const llvm_type = (try o.builder.intType(@intCast(int_info.bits))).toLlvm(&o.builder); - - const unsigned_val = v: { - if (bigint.limbs.len == 1) { - break :v llvm_type.constInt(bigint.limbs[0], .False); - } - if (@sizeOf(usize) == @sizeOf(u64)) { - break :v llvm_type.constIntOfArbitraryPrecision( - @as(c_uint, @intCast(bigint.limbs.len)), - bigint.limbs.ptr, - ); - } - @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); - }; - if (!bigint.positive) { - return llvm.constNeg(unsigned_val); - } - return unsigned_val; - }, - .float => { - const llvm_ty = (try o.lowerType(tv.ty)).toLlvm(&o.builder); - switch (tv.ty.floatBits(target)) { - 16 => { - const repr = @as(u16, @bitCast(tv.val.toFloat(f16, mod))); - const llvm_i16 = Builder.Type.i16.toLlvm(&o.builder); - const int = llvm_i16.constInt(repr, .False); - return int.constBitCast(llvm_ty); - }, - 32 => { - const repr = @as(u32, @bitCast(tv.val.toFloat(f32, mod))); - const llvm_i32 = Builder.Type.i32.toLlvm(&o.builder); - const int = llvm_i32.constInt(repr, .False); - return int.constBitCast(llvm_ty); - }, - 64 => { - const repr = @as(u64, @bitCast(tv.val.toFloat(f64, mod))); - const llvm_i64 = Builder.Type.i64.toLlvm(&o.builder); - const int = llvm_i64.constInt(repr, .False); - return int.constBitCast(llvm_ty); - }, - 80 => { - const float = tv.val.toFloat(f80, mod); - const repr = std.math.break_f80(float); - const llvm_i80 = Builder.Type.i80.toLlvm(&o.builder); - var x = llvm_i80.constInt(repr.exp, .False); - x = x.constShl(llvm_i80.constInt(64, .False)); - x = x.constOr(llvm_i80.constInt(repr.fraction, .False)); - if (backendSupportsF80(target)) { - return x.constBitCast(llvm_ty); - } else { - return x; - } - }, - 128 => { - var buf: [2]u64 = @as([2]u64, @bitCast(tv.val.toFloat(f128, mod))); - // LLVM seems to require that the lower half of the f128 be placed first - // in the buffer. - if (native_endian == .Big) { - std.mem.swap(u64, &buf[0], &buf[1]); - } - const int = Builder.Type.i128.toLlvm(&o.builder).constIntOfArbitraryPrecision(buf.len, &buf); - return int.constBitCast(llvm_ty); - }, - else => unreachable, - } - }, + .enum_tag => |enum_tag| return o.lowerValue(.{ + .ty = mod.intern_pool.typeOf(enum_tag.int).toType(), + .val = enum_tag.int.toValue(), + }), + .float => return switch (tv.ty.floatBits(target)) { + 16 => int: { + const repr: i16 = @bitCast(tv.val.toFloat(f16, mod)); + break :int try o.builder.intConst(.i16, repr); + }, + 32 => int: { + const repr: i32 = @bitCast(tv.val.toFloat(f32, mod)); + break :int try o.builder.intConst(.i32, repr); + }, + 64 => int: { + const repr: i64 = @bitCast(tv.val.toFloat(f64, mod)); + break :int try o.builder.intConst(.i64, repr); + }, + 80 => int: { + const repr: i80 = @bitCast(tv.val.toFloat(f80, mod)); + break :int try o.builder.intConst(.i80, repr); + }, + 128 => int: { + const repr: i128 = @bitCast(tv.val.toFloat(f128, mod)); + break :int try o.builder.intConst(.i128, repr); + }, + else => unreachable, + }.toLlvm(&o.builder).constBitCast((try o.lowerType(tv.ty)).toLlvm(&o.builder)), .ptr => |ptr| { const ptr_tv: TypedValue = switch (ptr.len) { .none => tv, @@ -3660,11 +3612,7 @@ pub const Object = struct { comptime assert(optional_layout_version == 3); const payload_ty = tv.ty.optionalChild(mod); - const llvm_i8 = Builder.Type.i8.toLlvm(&o.builder); - const non_null_bit = switch (opt.val) { - .none => llvm_i8.constNull(), - else => llvm_i8.constInt(1, .False), - }; + const non_null_bit = (try o.builder.intConst(.i8, @intFromBool(opt.val != .none))).toLlvm(&o.builder); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return non_null_bit; } @@ -3761,10 +3709,9 @@ pub const Object = struct { const elem_ty = vector_type.child.toType(); const llvm_elems = try gpa.alloc(*llvm.Value, vector_type.len); defer gpa.free(llvm_elems); - const llvm_i8 = Builder.Type.i8.toLlvm(&o.builder); for (llvm_elems, 0..) |*llvm_elem, i| { llvm_elem.* = switch (aggregate.storage) { - .bytes => |bytes| llvm_i8.constInt(bytes[i], .False), + .bytes => |bytes| (try o.builder.intConst(.i8, bytes[i])).toLlvm(&o.builder), .elems => |elems| try o.lowerValue(.{ .ty = elem_ty, .val = elems[i].toValue(), @@ -3802,10 +3749,10 @@ pub const Object = struct { const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = Builder.Type.i8.toLlvm(&o.builder).arrayType(@as(c_uint, @intCast(padding_len))); + const llvm_array_ty = try o.builder.arrayType(padding_len, .i8); // TODO make this and all other padding elsewhere in debug // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + llvm_fields.appendAssumeCapacity(llvm_array_ty.toLlvm(&o.builder).getUndef()); } const field_llvm_val = try o.lowerValue(.{ @@ -3824,8 +3771,8 @@ pub const Object = struct { offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = Builder.Type.i8.toLlvm(&o.builder).arrayType(@as(c_uint, @intCast(padding_len))); - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + const llvm_array_ty = try o.builder.arrayType(padding_len, .i8); + llvm_fields.appendAssumeCapacity(llvm_array_ty.toLlvm(&o.builder).getUndef()); } } @@ -3850,10 +3797,10 @@ pub const Object = struct { if (struct_obj.layout == .Packed) { assert(struct_obj.haveLayout()); const big_bits = struct_obj.backing_int_ty.bitSize(mod); - const int_llvm_ty = (try o.builder.intType(@intCast(big_bits))).toLlvm(&o.builder); + const int_llvm_ty = try o.builder.intType(@intCast(big_bits)); const fields = struct_obj.fields.values(); comptime assert(Type.packed_struct_layout_version == 2); - var running_int: *llvm.Value = int_llvm_ty.constNull(); + var running_int = (try o.builder.intConst(int_llvm_ty, 0)).toLlvm(&o.builder); var running_bits: u16 = 0; for (fields, 0..) |field, i| { if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; @@ -3868,11 +3815,11 @@ pub const Object = struct { non_int_val.constPtrToInt(small_int_ty) else non_int_val.constBitCast(small_int_ty); - const shift_rhs = int_llvm_ty.constInt(running_bits, .False); + const shift_rhs = (try o.builder.intConst(int_llvm_ty, running_bits)).toLlvm(&o.builder); // If the field is as large as the entire packed struct, this // zext would go from, e.g. i16 to i16. This is legal with // constZExtOrBitCast but not legal with constZExt. - const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty); + const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty.toLlvm(&o.builder)); const shifted = extended_int_val.constShl(shift_rhs); running_int = running_int.constOr(shifted); running_bits += ty_bit_size; @@ -3899,10 +3846,10 @@ pub const Object = struct { const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = Builder.Type.i8.toLlvm(&o.builder).arrayType(@as(c_uint, @intCast(padding_len))); + const llvm_array_ty = try o.builder.arrayType(padding_len, .i8); // TODO make this and all other padding elsewhere in debug // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + llvm_fields.appendAssumeCapacity(llvm_array_ty.toLlvm(&o.builder).getUndef()); } const field_llvm_val = try o.lowerValue(.{ @@ -3921,8 +3868,8 @@ pub const Object = struct { offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = Builder.Type.i8.toLlvm(&o.builder).arrayType(@as(c_uint, @intCast(padding_len))); - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + const llvm_array_ty = try o.builder.arrayType(padding_len, .i8); + llvm_fields.appendAssumeCapacity(llvm_array_ty.toLlvm(&o.builder).getUndef()); } } @@ -3985,7 +3932,7 @@ pub const Object = struct { const payload = p: { if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { const padding_len = @as(c_uint, @intCast(layout.payload_size)); - break :p Builder.Type.i8.toLlvm(&o.builder).arrayType(padding_len).getUndef(); + break :p (try o.builder.arrayType(padding_len, .i8)).toLlvm(&o.builder).getUndef(); } const field = try lowerValue(o, .{ .ty = field_ty, .val = tag_and_val.val }); need_unnamed = need_unnamed or o.isUnnamedType(field_ty, field); @@ -3995,7 +3942,7 @@ pub const Object = struct { } const padding_len = @as(c_uint, @intCast(layout.payload_size - field_size)); const fields: [2]*llvm.Value = .{ - field, Builder.Type.i8.toLlvm(&o.builder).arrayType(padding_len).getUndef(), + field, (try o.builder.arrayType(padding_len, .i8)).toLlvm(&o.builder).getUndef(), }; break :p o.context.constStruct(&fields, fields.len, .True); }; @@ -4020,7 +3967,7 @@ pub const Object = struct { fields = .{ payload, llvm_tag_value, undefined }; } if (layout.padding != 0) { - fields[2] = Builder.Type.i8.toLlvm(&o.builder).arrayType(layout.padding).getUndef(); + fields[2] = (try o.builder.arrayType(layout.padding, .i8)).toLlvm(&o.builder).getUndef(); fields_len = 3; } if (need_unnamed) { @@ -4048,27 +3995,8 @@ pub const Object = struct { } fn lowerBigInt(o: *Object, ty: Type, bigint: std.math.big.int.Const) Allocator.Error!*llvm.Value { - const mod = o.module; - const int_info = ty.intInfo(mod); - assert(int_info.bits != 0); - const llvm_type = (try o.builder.intType(@intCast(int_info.bits))).toLlvm(&o.builder); - - const unsigned_val = v: { - if (bigint.limbs.len == 1) { - break :v llvm_type.constInt(bigint.limbs[0], .False); - } - if (@sizeOf(usize) == @sizeOf(u64)) { - break :v llvm_type.constIntOfArbitraryPrecision( - @as(c_uint, @intCast(bigint.limbs.len)), - bigint.limbs.ptr, - ); - } - @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); - }; - if (!bigint.positive) { - return llvm.constNeg(unsigned_val); - } - return unsigned_val; + return (try o.builder.bigIntConst(try o.builder.intType(ty.intInfo(o.module).bits), bigint)) + .toLlvm(&o.builder); } const ParentPtr = struct { @@ -4106,10 +4034,9 @@ pub const Object = struct { } const payload_offset: u8 = if (payload_ty.abiAlignment(mod) > Type.anyerror.abiSize(mod)) 2 else 1; - const llvm_u32 = Builder.Type.i32.toLlvm(&o.builder); const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(payload_offset, .False), + (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), + (try o.builder.intConst(.i32, payload_offset)).toLlvm(&o.builder), }; const eu_llvm_ty = (try o.lowerType(eu_ty)).toLlvm(&o.builder); return eu_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); @@ -4127,11 +4054,9 @@ pub const Object = struct { return parent_llvm_ptr; } - const llvm_u32 = Builder.Type.i32.toLlvm(&o.builder); const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(0, .False), - }; + (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), + } ** 2; const opt_llvm_ty = (try o.lowerType(opt_ty)).toLlvm(&o.builder); return opt_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); }, @@ -4139,9 +4064,8 @@ pub const Object = struct { .elem => |elem_ptr| { const parent_llvm_ptr = try o.lowerParentPtr(elem_ptr.base.toValue(), true); - const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); const indices: [1]*llvm.Value = .{ - llvm_usize.constInt(elem_ptr.index, .False), + (try o.builder.intConst(try o.lowerType(Type.usize), elem_ptr.index)).toLlvm(&o.builder), }; const elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod); const elem_llvm_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); @@ -4152,7 +4076,6 @@ pub const Object = struct { const parent_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); const field_index = @as(u32, @intCast(field_ptr.index)); - const llvm_u32 = Builder.Type.i32.toLlvm(&o.builder); switch (parent_ty.zigTypeTag(mod)) { .Union => { if (parent_ty.containerLayout(mod) == .Packed) { @@ -4170,8 +4093,8 @@ pub const Object = struct { else @intFromBool(layout.tag_align >= layout.payload_align); const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(llvm_pl_index, .False), + (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), + (try o.builder.intConst(.i32, llvm_pl_index)).toLlvm(&o.builder), }; const parent_llvm_ty = (try o.lowerType(parent_ty)).toLlvm(&o.builder); return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); @@ -4179,8 +4102,8 @@ pub const Object = struct { .Struct => { if (parent_ty.containerLayout(mod) == .Packed) { if (!byte_aligned) return parent_llvm_ptr; - const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); - const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize); + const llvm_usize = try o.lowerType(Type.usize); + const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize.toLlvm(&o.builder)); // count bits of fields before this one const prev_bits = b: { var b: usize = 0; @@ -4190,7 +4113,7 @@ pub const Object = struct { } break :b b; }; - const byte_offset = llvm_usize.constInt(prev_bits / 8, .False); + const byte_offset = (try o.builder.intConst(llvm_usize, prev_bits / 8)).toLlvm(&o.builder); const field_addr = base_addr.constAdd(byte_offset); const final_llvm_ty = o.context.pointerType(0); return field_addr.constIntToPtr(final_llvm_ty); @@ -4199,21 +4122,22 @@ pub const Object = struct { const parent_llvm_ty = (try o.lowerType(parent_ty)).toLlvm(&o.builder); if (llvmField(parent_ty, field_index, mod)) |llvm_field| { const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(llvm_field.index, .False), + (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), + (try o.builder.intConst(.i32, llvm_field.index)).toLlvm(&o.builder), }; return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); } else { - const llvm_index = llvm_u32.constInt(@intFromBool(parent_ty.hasRuntimeBitsIgnoreComptime(mod)), .False); - const indices: [1]*llvm.Value = .{llvm_index}; + const indices: [1]*llvm.Value = .{ + (try o.builder.intConst(.i32, @intFromBool(parent_ty.hasRuntimeBitsIgnoreComptime(mod)))).toLlvm(&o.builder), + }; return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); } }, .Pointer => { assert(parent_ty.isSlice(mod)); const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(field_index, .False), + (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), + (try o.builder.intConst(.i32, field_index)).toLlvm(&o.builder), }; const parent_llvm_ty = (try o.lowerType(parent_ty)).toLlvm(&o.builder); return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); @@ -4284,10 +4208,10 @@ pub const Object = struct { // The value cannot be undefined, because we use the `nonnull` annotation // for non-optional pointers. We also need to respect the alignment, even though // the address will never be dereferenced. - const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); + const llvm_usize = try o.lowerType(Type.usize); const llvm_ptr_ty = (try o.lowerType(ptr_ty)).toLlvm(&o.builder); if (ptr_ty.ptrInfo(mod).flags.alignment.toByteUnitsOptional()) |alignment| { - return llvm_usize.constInt(alignment, .False).constIntToPtr(llvm_ptr_ty); + return (try o.builder.intConst(llvm_usize, alignment)).toLlvm(&o.builder).constIntToPtr(llvm_ptr_ty); } // Note that these 0xaa values are appropriate even in release-optimized builds // because we need a well-defined value that is not null, and LLVM does not @@ -4295,13 +4219,13 @@ pub const Object = struct { // instruction is followed by a `wrap_optional`, it will return this value // verbatim, and the result should test as non-null. const target = mod.getTarget(); - const int = switch (target.ptrBitWidth()) { - 16 => llvm_usize.constInt(0xaaaa, .False), - 32 => llvm_usize.constInt(0xaaaaaaaa, .False), - 64 => llvm_usize.constInt(0xaaaaaaaa_aaaaaaaa, .False), + const int = try o.builder.intConst(llvm_usize, @as(u64, switch (target.ptrBitWidth()) { + 16 => 0xaaaa, + 32 => 0xaaaaaaaa, + 64 => 0xaaaaaaaa_aaaaaaaa, else => unreachable, - }; - return int.constIntToPtr(llvm_ptr_ty); + })); + return int.toLlvm(&o.builder).constIntToPtr(llvm_ptr_ty); } fn addAttr(o: *Object, val: *llvm.Value, index: llvm.AttributeIndex, name: []const u8) void { @@ -5118,11 +5042,11 @@ pub const FuncGen = struct { llvm_arg = store_inst; } - const float_ty = (try o.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty, mod).?)).toLlvm(&o.builder); - const array_llvm_ty = float_ty.arrayType(count); + const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty, mod).?); + const array_ty = try o.builder.arrayType(count, float_ty); const alignment = arg_ty.abiAlignment(mod); - const load_inst = self.builder.buildLoad(array_llvm_ty, llvm_arg, ""); + const load_inst = self.builder.buildLoad(array_ty.toLlvm(&o.builder), llvm_arg, ""); load_inst.setAlignment(alignment); try llvm_args.append(load_inst); }, @@ -5138,9 +5062,9 @@ pub const FuncGen = struct { llvm_arg = store_inst; } - const array_llvm_ty = (try o.builder.intType(@intCast(elem_size))).toLlvm(&o.builder).arrayType(arr_len); + const array_ty = try o.builder.arrayType(arr_len, try o.builder.intType(@intCast(elem_size))); const alignment = arg_ty.abiAlignment(mod); - const load_inst = self.builder.buildLoad(array_llvm_ty, llvm_arg, ""); + const load_inst = self.builder.buildLoad(array_ty.toLlvm(&o.builder), llvm_arg, ""); load_inst.setAlignment(alignment); try llvm_args.append(load_inst); }, @@ -5279,7 +5203,7 @@ pub const FuncGen = struct { }); const null_opt_addr_global = try o.getNullOptAddr(); const target = mod.getTarget(); - const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); + const llvm_usize = try o.lowerType(Type.usize); // example: // call fastcc void @test2.panic( // ptr @builtin.panic_messages.integer_overflow__anon_987, ; msg.ptr @@ -5289,7 +5213,7 @@ pub const FuncGen = struct { // ) const args = [4]*llvm.Value{ msg_ptr, - llvm_usize.constInt(msg_len, .False), + (try o.builder.intConst(llvm_usize, msg_len)).toLlvm(&o.builder), fg.context.pointerType(0).constNull(), null_opt_addr_global, }; @@ -5327,8 +5251,8 @@ pub const FuncGen = struct { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. - const err_int = (try o.lowerType(Type.anyerror)).toLlvm(&o.builder); - _ = self.builder.buildRet(err_int.constInt(0, .False)); + const int = try o.builder.intConst(Builder.Type.err_int, 0); + _ = self.builder.buildRet(int.toLlvm(&o.builder)); } else { _ = self.builder.buildRetVoid(); } @@ -5375,8 +5299,8 @@ pub const FuncGen = struct { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. - const err_int = (try o.lowerType(Type.anyerror)).toLlvm(&o.builder); - _ = self.builder.buildRet(err_int.constInt(0, .False)); + const int = try o.builder.intConst(Builder.Type.err_int, 0); + _ = self.builder.buildRet(int.toLlvm(&o.builder)); } else { _ = self.builder.buildRetVoid(); } @@ -5531,22 +5455,22 @@ pub const FuncGen = struct { // of optionals that are not pointers. const is_by_ref = isByRef(scalar_ty, mod); const opt_llvm_ty = (try o.lowerType(scalar_ty)).toLlvm(&o.builder); - const lhs_non_null = self.optIsNonNull(opt_llvm_ty, lhs, is_by_ref); - const rhs_non_null = self.optIsNonNull(opt_llvm_ty, rhs, is_by_ref); - const llvm_i2 = (try o.builder.intType(2)).toLlvm(&o.builder); - const lhs_non_null_i2 = self.builder.buildZExt(lhs_non_null, llvm_i2, ""); - const rhs_non_null_i2 = self.builder.buildZExt(rhs_non_null, llvm_i2, ""); - const lhs_shifted = self.builder.buildShl(lhs_non_null_i2, llvm_i2.constInt(1, .False), ""); + const lhs_non_null = try self.optIsNonNull(opt_llvm_ty, lhs, is_by_ref); + const rhs_non_null = try self.optIsNonNull(opt_llvm_ty, rhs, is_by_ref); + const llvm_i2 = try o.builder.intType(2); + const lhs_non_null_i2 = self.builder.buildZExt(lhs_non_null, llvm_i2.toLlvm(&o.builder), ""); + const rhs_non_null_i2 = self.builder.buildZExt(rhs_non_null, llvm_i2.toLlvm(&o.builder), ""); + const lhs_shifted = self.builder.buildShl(lhs_non_null_i2, (try o.builder.intConst(llvm_i2, 1)).toLlvm(&o.builder), ""); const lhs_rhs_ored = self.builder.buildOr(lhs_shifted, rhs_non_null_i2, ""); const both_null_block = self.context.appendBasicBlock(self.llvm_func, "BothNull"); const mixed_block = self.context.appendBasicBlock(self.llvm_func, "Mixed"); const both_pl_block = self.context.appendBasicBlock(self.llvm_func, "BothNonNull"); const end_block = self.context.appendBasicBlock(self.llvm_func, "End"); const llvm_switch = self.builder.buildSwitch(lhs_rhs_ored, mixed_block, 2); - const llvm_i2_00 = llvm_i2.constInt(0b00, .False); - const llvm_i2_11 = llvm_i2.constInt(0b11, .False); - llvm_switch.addCase(llvm_i2_00, both_null_block); - llvm_switch.addCase(llvm_i2_11, both_pl_block); + const llvm_i2_00 = try o.builder.intConst(llvm_i2, 0b00); + const llvm_i2_11 = try o.builder.intConst(llvm_i2, 0b11); + llvm_switch.addCase(llvm_i2_00.toLlvm(&o.builder), both_null_block); + llvm_switch.addCase(llvm_i2_11.toLlvm(&o.builder), both_pl_block); self.builder.positionBuilderAtEnd(both_null_block); _ = self.builder.buildBr(end_block); @@ -5567,9 +5491,8 @@ pub const FuncGen = struct { mixed_block, both_pl_block_end, }; - const llvm_i1 = Builder.Type.i1.toLlvm(&o.builder); - const llvm_i1_0 = llvm_i1.constInt(0, .False); - const llvm_i1_1 = llvm_i1.constInt(1, .False); + const llvm_i1_0 = Builder.Constant.false.toLlvm(&o.builder); + const llvm_i1_1 = Builder.Constant.true.toLlvm(&o.builder); const incoming_values: [3]*llvm.Value = .{ switch (op) { .eq => llvm_i1_1, @@ -5584,7 +5507,7 @@ pub const FuncGen = struct { payload_cmp, }; - const phi_node = self.builder.buildPhi(llvm_i1, ""); + const phi_node = self.builder.buildPhi(Builder.Type.i1.toLlvm(&o.builder), ""); comptime assert(incoming_values.len == incoming_blocks.len); phi_node.addIncoming( &incoming_values, @@ -5882,8 +5805,8 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.typeOf(ty_op.operand); const array_ty = operand_ty.childType(mod); - const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); - const len = llvm_usize.constInt(array_ty.arrayLen(mod), .False); + const llvm_usize = try o.lowerType(Type.usize); + const len = (try o.builder.intConst(llvm_usize, array_ty.arrayLen(mod))).toLlvm(&o.builder); const slice_llvm_ty = (try o.lowerType(self.typeOfIndex(inst))).toLlvm(&o.builder); const operand = try self.resolveInst(ty_op.operand); if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -5891,8 +5814,8 @@ pub const FuncGen = struct { return self.builder.buildInsertValue(partial, len, 1, ""); } const indices: [2]*llvm.Value = .{ - llvm_usize.constNull(), llvm_usize.constNull(), - }; + (try o.builder.intConst(llvm_usize, 0)).toLlvm(&o.builder), + } ** 2; const array_llvm_ty = (try o.lowerType(array_ty)).toLlvm(&o.builder); const ptr = self.builder.buildInBoundsGEP(array_llvm_ty, operand, &indices, indices.len, ""); const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), ptr, 0, ""); @@ -6024,21 +5947,21 @@ pub const FuncGen = struct { fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: *llvm.Value, ty: Type) Allocator.Error!*llvm.Value { const o = fg.dg.object; const mod = o.module; - const llvm_usize_ty = (try o.lowerType(Type.usize)).toLlvm(&o.builder); + const llvm_usize = try o.lowerType(Type.usize); switch (ty.ptrSize(mod)) { .Slice => { const len = fg.builder.buildExtractValue(ptr, 1, ""); const elem_ty = ty.childType(mod); const abi_size = elem_ty.abiSize(mod); if (abi_size == 1) return len; - const abi_size_llvm_val = llvm_usize_ty.constInt(abi_size, .False); - return fg.builder.buildMul(len, abi_size_llvm_val, ""); + const abi_size_llvm_val = try o.builder.intConst(llvm_usize, abi_size); + return fg.builder.buildMul(len, abi_size_llvm_val.toLlvm(&o.builder), ""); }, .One => { const array_ty = ty.childType(mod); const elem_ty = array_ty.childType(mod); const abi_size = elem_ty.abiSize(mod); - return llvm_usize_ty.constInt(array_ty.arrayLen(mod) * abi_size, .False); + return (try o.builder.intConst(llvm_usize, array_ty.arrayLen(mod) * abi_size)).toLlvm(&o.builder); }, .Many, .C => unreachable, } @@ -6340,10 +6263,10 @@ pub const FuncGen = struct { if (field_offset == 0) { return field_ptr; } - const llvm_usize_ty = (try o.lowerType(Type.usize)).toLlvm(&o.builder); + const llvm_usize = try o.lowerType(Type.usize); - const field_ptr_int = self.builder.buildPtrToInt(field_ptr, llvm_usize_ty, ""); - const base_ptr_int = self.builder.buildNUWSub(field_ptr_int, llvm_usize_ty.constInt(field_offset, .False), ""); + const field_ptr_int = self.builder.buildPtrToInt(field_ptr, llvm_usize.toLlvm(&o.builder), ""); + const base_ptr_int = self.builder.buildNUWSub(field_ptr_int, (try o.builder.intConst(llvm_usize, field_offset)).toLlvm(&o.builder), ""); return self.builder.buildIntToPtr(base_ptr_int, res_ty, ""); } @@ -6919,12 +6842,11 @@ pub const FuncGen = struct { self.builder.buildLoad(optional_llvm_ty, operand, "") else operand; - const llvm_i8 = Builder.Type.i8.toLlvm(&o.builder); - return self.builder.buildICmp(pred, loaded, llvm_i8.constNull(), ""); + return self.builder.buildICmp(pred, loaded, (try o.builder.intConst(.i8, 0)).toLlvm(&o.builder), ""); } const is_by_ref = operand_is_ptr or isByRef(optional_ty, mod); - const non_null_bit = self.optIsNonNull(optional_llvm_ty, operand, is_by_ref); + const non_null_bit = try self.optIsNonNull(optional_llvm_ty, operand, is_by_ref); if (pred == .EQ) { return self.builder.buildNot(non_null_bit, ""); } else { @@ -6949,12 +6871,12 @@ pub const FuncGen = struct { const zero = err_set_ty.constNull(); if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { - const llvm_i1 = Builder.Type.i1.toLlvm(&o.builder); - switch (op) { - .EQ => return llvm_i1.constInt(1, .False), // 0 == 0 - .NE => return llvm_i1.constInt(0, .False), // 0 != 0 + const val: Builder.Constant = switch (op) { + .EQ => .true, // 0 == 0 + .NE => .false, // 0 != 0 else => unreachable, - } + }; + return val.toLlvm(&o.builder); } if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -7007,7 +6929,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = optional_ty.optionalChild(mod); - const non_null_bit = Builder.Type.i8.toLlvm(&o.builder).constInt(1, .False); + const non_null_bit = (try o.builder.intConst(.i8, 1)).toLlvm(&o.builder); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // We have a pointer to a i8. We need to set it to 1 and then return the same pointer. _ = self.builder.buildStore(non_null_bit, operand); @@ -7101,11 +7023,10 @@ pub const FuncGen = struct { const operand_ty = self.typeOf(ty_op.operand); const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { - const err_llvm_ty = (try o.lowerType(Type.anyerror)).toLlvm(&o.builder); if (operand_is_ptr) { return operand; } else { - return err_llvm_ty.constInt(0, .False); + return (try o.builder.intConst(Builder.Type.err_int, 0)).toLlvm(&o.builder); } } @@ -7193,7 +7114,7 @@ pub const FuncGen = struct { const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const payload_ty = self.typeOf(ty_op.operand); - const non_null_bit = Builder.Type.i8.toLlvm(&o.builder).constInt(1, .False); + const non_null_bit = (try o.builder.intConst(.i8, 1)).toLlvm(&o.builder); comptime assert(optional_layout_version == 3); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return non_null_bit; const operand = try self.resolveInst(ty_op.operand); @@ -7278,22 +7199,24 @@ pub const FuncGen = struct { } fn airWasmMemorySize(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const o = self.dg.object; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const index = pl_op.payload; - const llvm_u32 = Builder.Type.i32.toLlvm(&self.dg.object.builder); const llvm_fn = try self.getIntrinsic("llvm.wasm.memory.size", &.{.i32}); - const args: [1]*llvm.Value = .{llvm_u32.constInt(index, .False)}; + const args: [1]*llvm.Value = .{ + (try o.builder.intConst(.i32, index)).toLlvm(&o.builder), + }; return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); } fn airWasmMemoryGrow(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const o = self.dg.object; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const index = pl_op.payload; const operand = try self.resolveInst(pl_op.operand); - const llvm_u32 = Builder.Type.i32.toLlvm(&self.dg.object.builder); const llvm_fn = try self.getIntrinsic("llvm.wasm.memory.grow", &.{.i32}); const args: [2]*llvm.Value = .{ - llvm_u32.constInt(index, .False), + (try o.builder.intConst(.i32, index)).toLlvm(&o.builder), operand, }; return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); @@ -7571,24 +7494,23 @@ pub const FuncGen = struct { return self.buildFloatOp(.floor, inst_ty, 1, .{result}); } if (scalar_ty.isSignedInt(mod)) { - const inst_llvm_ty = (try o.lowerType(inst_ty)).toLlvm(&o.builder); + const inst_llvm_ty = try o.lowerType(inst_ty); const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { const vec_len = inst_ty.vectorLen(mod); - const scalar_llvm_ty = (try o.lowerType(scalar_ty)).toLlvm(&o.builder); const shifts = try self.gpa.alloc(*llvm.Value, vec_len); defer self.gpa.free(shifts); - @memset(shifts, scalar_llvm_ty.constInt(scalar_bit_size_minus_one, .False)); + @memset(shifts, (try o.builder.intConst(try o.lowerType(scalar_ty), scalar_bit_size_minus_one)).toLlvm(&o.builder)); break :const_vector llvm.constVector(shifts.ptr, vec_len); - } else inst_llvm_ty.constInt(scalar_bit_size_minus_one, .False); + } else (try o.builder.intConst(inst_llvm_ty, scalar_bit_size_minus_one)).toLlvm(&o.builder); const div = self.builder.buildSDiv(lhs, rhs, ""); const rem = self.builder.buildSRem(lhs, rhs, ""); const div_sign = self.builder.buildXor(lhs, rhs, ""); const div_sign_mask = self.builder.buildAShr(div_sign, bit_size_minus_one, ""); - const zero = inst_llvm_ty.constNull(); + const zero = inst_llvm_ty.toLlvm(&o.builder).constNull(); const rem_nonzero = self.builder.buildICmp(.NE, rem, zero, ""); const correction = self.builder.buildSelect(rem_nonzero, div_sign_mask, zero, ""); return self.builder.buildNSWAdd(div, correction, ""); @@ -7637,14 +7559,14 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.typeOfIndex(inst); - const inst_llvm_ty = (try o.lowerType(inst_ty)).toLlvm(&o.builder); + const inst_llvm_ty = try o.lowerType(inst_ty); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { const a = try self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs }); const b = try self.buildFloatOp(.add, inst_ty, 2, .{ a, rhs }); const c = try self.buildFloatOp(.fmod, inst_ty, 2, .{ b, rhs }); - const zero = inst_llvm_ty.constNull(); + const zero = inst_llvm_ty.toLlvm(&o.builder).constNull(); const ltz = try self.buildFloatCmp(.lt, inst_ty, .{ lhs, zero }); return self.builder.buildSelect(ltz, c, a, ""); } @@ -7652,20 +7574,19 @@ pub const FuncGen = struct { const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { const vec_len = inst_ty.vectorLen(mod); - const scalar_llvm_ty = (try o.lowerType(scalar_ty)).toLlvm(&o.builder); const shifts = try self.gpa.alloc(*llvm.Value, vec_len); defer self.gpa.free(shifts); - @memset(shifts, scalar_llvm_ty.constInt(scalar_bit_size_minus_one, .False)); + @memset(shifts, (try o.builder.intConst(try o.lowerType(scalar_ty), scalar_bit_size_minus_one)).toLlvm(&o.builder)); break :const_vector llvm.constVector(shifts.ptr, vec_len); - } else inst_llvm_ty.constInt(scalar_bit_size_minus_one, .False); + } else (try o.builder.intConst(inst_llvm_ty, scalar_bit_size_minus_one)).toLlvm(&o.builder); const rem = self.builder.buildSRem(lhs, rhs, ""); const div_sign = self.builder.buildXor(lhs, rhs, ""); const div_sign_mask = self.builder.buildAShr(div_sign, bit_size_minus_one, ""); const rhs_masked = self.builder.buildAnd(rhs, div_sign_mask, ""); - const zero = inst_llvm_ty.constNull(); + const zero = inst_llvm_ty.toLlvm(&o.builder).constNull(); const rem_nonzero = self.builder.buildICmp(.NE, rem, zero, ""); const correction = self.builder.buildSelect(rem_nonzero, rhs_masked, zero, ""); return self.builder.buildNSWAdd(rem, correction, ""); @@ -7789,14 +7710,14 @@ pub const FuncGen = struct { result_vector: *llvm.Value, vector_len: usize, ) !*llvm.Value { + const o = self.dg.object; const args_len = @as(c_uint, @intCast(args_vectors.len)); - const llvm_i32 = Builder.Type.i32.toLlvm(&self.dg.object.builder); assert(args_len <= 3); var i: usize = 0; var result = result_vector; while (i < vector_len) : (i += 1) { - const index_i32 = llvm_i32.constInt(i, .False); + const index_i32 = (try o.builder.intConst(.i32, i)).toLlvm(&o.builder); var args: [3]*llvm.Value = undefined; for (args_vectors, 0..) |arg_vector, k| { @@ -7882,7 +7803,7 @@ pub const FuncGen = struct { .i32, ); - const zero = Builder.Type.i32.toLlvm(&o.builder).constInt(0, .False); + const zero = (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder); const int_pred: llvm.IntPredicate = switch (pred) { .eq => .EQ, .neq => .NE, @@ -7973,17 +7894,17 @@ pub const FuncGen = struct { .neg => { // In this case we can generate a softfloat negation by XORing the // bits with a constant. - const int_llvm_ty = (try o.builder.intType(@intCast(float_bits))).toLlvm(&o.builder); - const one = int_llvm_ty.constInt(1, .False); - const shift_amt = int_llvm_ty.constInt(float_bits - 1, .False); - const sign_mask = one.constShl(shift_amt); + const int_ty = try o.builder.intType(@intCast(float_bits)); + const one = (try o.builder.intConst(int_ty, 1)).toLlvm(&o.builder); + const shift_amt = try o.builder.intConst(int_ty, float_bits - 1); + const sign_mask = one.constShl(shift_amt.toLlvm(&o.builder)); const result = if (ty.zigTypeTag(mod) == .Vector) blk: { const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(mod), sign_mask, ""); - const cast_ty = int_llvm_ty.vectorType(ty.vectorLen(mod)); - const bitcasted_operand = self.builder.buildBitCast(params[0], cast_ty, ""); + const cast_ty = try o.builder.vectorType(.normal, ty.vectorLen(mod), int_ty); + const bitcasted_operand = self.builder.buildBitCast(params[0], cast_ty.toLlvm(&o.builder), ""); break :blk self.builder.buildXor(bitcasted_operand, splat_sign_mask, ""); } else blk: { - const bitcasted_operand = self.builder.buildBitCast(params[0], int_llvm_ty, ""); + const bitcasted_operand = self.builder.buildBitCast(params[0], int_ty.toLlvm(&o.builder), ""); break :blk self.builder.buildXor(bitcasted_operand, sign_mask, ""); }; return self.builder.buildBitCast(result, llvm_ty.toLlvm(&o.builder), ""); @@ -8191,9 +8112,9 @@ pub const FuncGen = struct { // poison value." // However Zig semantics says that saturating shift left can never produce // undefined; instead it saturates. - const lhs_scalar_llvm_ty = (try o.lowerType(lhs_scalar_ty)).toLlvm(&o.builder); - const bits = lhs_scalar_llvm_ty.constInt(lhs_bits, .False); - const lhs_max = lhs_scalar_llvm_ty.constAllOnes(); + const lhs_scalar_llvm_ty = try o.lowerType(lhs_scalar_ty); + const bits = (try o.builder.intConst(lhs_scalar_llvm_ty, lhs_bits)).toLlvm(&o.builder); + const lhs_max = (try o.builder.intConst(lhs_scalar_llvm_ty, -1)).toLlvm(&o.builder); if (rhs_ty.zigTypeTag(mod) == .Vector) { const vec_len = rhs_ty.vectorLen(mod); const bits_vec = self.builder.buildVectorSplat(vec_len, bits, ""); @@ -8382,17 +8303,19 @@ pub const FuncGen = struct { } else { // If the ABI size of the element type is not evenly divisible by size in bits; // a simple bitcast will not work, and we fall back to extractelement. - const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); - const llvm_u32 = Builder.Type.i32.toLlvm(&o.builder); - const zero = llvm_usize.constNull(); + const llvm_usize = try o.lowerType(Type.usize); + const zero = try o.builder.intConst(llvm_usize, 0); const vector_len = operand_ty.arrayLen(mod); var i: u64 = 0; while (i < vector_len) : (i += 1) { - const index_usize = llvm_usize.constInt(i, .False); - const index_u32 = llvm_u32.constInt(i, .False); - const indexes: [2]*llvm.Value = .{ zero, index_usize }; + const index_usize = try o.builder.intConst(llvm_usize, i); + const index_u32 = try o.builder.intConst(.i32, i); + const indexes: [2]*llvm.Value = .{ + zero.toLlvm(&o.builder), + index_usize.toLlvm(&o.builder), + }; const elem_ptr = self.builder.buildInBoundsGEP(llvm_dest_ty, array_ptr, &indexes, indexes.len, ""); - const elem = self.builder.buildExtractElement(operand, index_u32, ""); + const elem = self.builder.buildExtractElement(operand, index_u32.toLlvm(&o.builder), ""); _ = self.builder.buildStore(elem, elem_ptr); } } @@ -8416,19 +8339,21 @@ pub const FuncGen = struct { // a simple bitcast will not work, and we fall back to extractelement. const array_llvm_ty = (try o.lowerType(operand_ty)).toLlvm(&o.builder); const elem_llvm_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); - const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); - const llvm_u32 = Builder.Type.i32.toLlvm(&o.builder); - const zero = llvm_usize.constNull(); + const llvm_usize = try o.lowerType(Type.usize); + const zero = try o.builder.intConst(llvm_usize, 0); const vector_len = operand_ty.arrayLen(mod); var vector = llvm_vector_ty.getUndef(); var i: u64 = 0; while (i < vector_len) : (i += 1) { - const index_usize = llvm_usize.constInt(i, .False); - const index_u32 = llvm_u32.constInt(i, .False); - const indexes: [2]*llvm.Value = .{ zero, index_usize }; + const index_usize = try o.builder.intConst(llvm_usize, i); + const index_u32 = try o.builder.intConst(.i32, i); + const indexes: [2]*llvm.Value = .{ + zero.toLlvm(&o.builder), + index_usize.toLlvm(&o.builder), + }; const elem_ptr = self.builder.buildInBoundsGEP(array_llvm_ty, operand, &indexes, indexes.len, ""); const elem = self.builder.buildLoad(elem_llvm_ty, elem_ptr, ""); - vector = self.builder.buildInsertElement(vector, elem, index_u32, ""); + vector = self.builder.buildInsertElement(vector, elem, index_u32.toLlvm(&o.builder), ""); } return vector; @@ -8563,14 +8488,13 @@ pub const FuncGen = struct { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using // 0xaa or actual undefined for the fill byte. - const u8_llvm_ty = Builder.Type.i8.toLlvm(&o.builder); const fill_byte = if (safety) - u8_llvm_ty.constInt(0xaa, .False) + (try o.builder.intConst(.i8, 0xaa)).toLlvm(&o.builder) else - u8_llvm_ty.getUndef(); + Builder.Type.i8.toLlvm(&o.builder).getUndef(); const operand_size = operand_ty.abiSize(mod); - const usize_llvm_ty = (try o.lowerType(Type.usize)).toLlvm(&o.builder); - const len = usize_llvm_ty.constInt(operand_size, .False); + const usize_ty = try o.lowerType(Type.usize); + const len = (try o.builder.intConst(usize_ty, operand_size)).toLlvm(&o.builder); const dest_ptr_align = ptr_ty.ptrAlignment(mod); _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr(mod)); if (safety and mod.comp.bin_file.options.valgrind) { @@ -8855,7 +8779,6 @@ pub const FuncGen = struct { const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = self.typeOf(bin_op.rhs); const dest_ptr_align = ptr_ty.ptrAlignment(mod); - const u8_llvm_ty = Builder.Type.i8.toLlvm(&o.builder); const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty); const is_volatile = ptr_ty.isVolatilePtr(mod); @@ -8873,9 +8796,9 @@ pub const FuncGen = struct { // extra information to LLVM. However, safety makes the difference between using // 0xaa or actual undefined for the fill byte. const fill_byte = if (safety) - u8_llvm_ty.constInt(0xaa, .False) + (try o.builder.intConst(.i8, 0xaa)).toLlvm(&o.builder) else - u8_llvm_ty.getUndef(); + Builder.Type.i8.toLlvm(&o.builder).getUndef(); const len = try self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); if (intrinsic_len0_traps) { try self.safeWasmMemset(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); @@ -8946,10 +8869,10 @@ pub const FuncGen = struct { const body_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetBody"); const end_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetEnd"); - const llvm_usize_ty = (try o.lowerType(Type.usize)).toLlvm(&o.builder); + const usize_ty = try o.lowerType(Type.usize); const len = switch (ptr_ty.ptrSize(mod)) { .Slice => self.builder.buildExtractValue(dest_slice, 1, ""), - .One => llvm_usize_ty.constInt(ptr_ty.childType(mod).arrayLen(mod), .False), + .One => (try o.builder.intConst(usize_ty, ptr_ty.childType(mod).arrayLen(mod))).toLlvm(&o.builder), .Many, .C => unreachable, }; const elem_llvm_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); @@ -8971,7 +8894,7 @@ pub const FuncGen = struct { it_ptr_alignment, value, elem_abi_alignment, - llvm_usize_ty.constInt(elem_abi_size, .False), + (try o.builder.intConst(usize_ty, elem_abi_size)).toLlvm(&o.builder), is_volatile, ); } else { @@ -8979,7 +8902,9 @@ pub const FuncGen = struct { store_inst.setAlignment(it_ptr_alignment); store_inst.setVolatile(llvm.Bool.fromBool(is_volatile)); } - const one_gep = [_]*llvm.Value{llvm_usize_ty.constInt(1, .False)}; + const one_gep = [_]*llvm.Value{ + (try o.builder.intConst(usize_ty, 1)).toLlvm(&o.builder), + }; const next_ptr = self.builder.buildInBoundsGEP(elem_llvm_ty, it_ptr, &one_gep, one_gep.len, ""); _ = self.builder.buildBr(loop_block); @@ -9194,24 +9119,20 @@ pub const FuncGen = struct { // If not an even byte-multiple, we need zero-extend + shift-left 1 byte // The truncated result at the end will be the correct bswap const scalar_ty = try o.builder.intType(@intCast(bits + 8)); - const scalar_llvm_ty = scalar_ty.toLlvm(&o.builder); if (operand_ty.zigTypeTag(mod) == .Vector) { const vec_len = operand_ty.vectorLen(mod); operand_llvm_ty = try o.builder.vectorType(.normal, vec_len, scalar_ty); const shifts = try self.gpa.alloc(*llvm.Value, vec_len); defer self.gpa.free(shifts); - - for (shifts) |*elem| { - elem.* = scalar_llvm_ty.constInt(8, .False); - } + @memset(shifts, (try o.builder.intConst(scalar_ty, 8)).toLlvm(&o.builder)); const shift_vec = llvm.constVector(shifts.ptr, vec_len); const extended = self.builder.buildZExt(operand, operand_llvm_ty.toLlvm(&o.builder), ""); operand = self.builder.buildShl(extended, shift_vec, ""); } else { - const extended = self.builder.buildZExt(operand, scalar_llvm_ty, ""); - operand = self.builder.buildShl(extended, scalar_llvm_ty.constInt(8, .False), ""); + const extended = self.builder.buildZExt(operand, scalar_ty.toLlvm(&o.builder), ""); + operand = self.builder.buildShl(extended, (try o.builder.intConst(scalar_ty, 8)).toLlvm(&o.builder), ""); operand_llvm_ty = scalar_ty; } bits = bits + 8; @@ -9263,14 +9184,14 @@ pub const FuncGen = struct { self.builder.positionBuilderAtEnd(end_block); - const llvm_type = Builder.Type.i1.toLlvm(&o.builder); const incoming_values: [2]*llvm.Value = .{ - llvm_type.constInt(1, .False), llvm_type.constInt(0, .False), + Builder.Constant.true.toLlvm(&o.builder), + Builder.Constant.false.toLlvm(&o.builder), }; const incoming_blocks: [2]*llvm.BasicBlock = .{ valid_block, invalid_block, }; - const phi_node = self.builder.buildPhi(llvm_type, ""); + const phi_node = self.builder.buildPhi(Builder.Type.i1.toLlvm(&o.builder), ""); phi_node.addIncoming(&incoming_values, &incoming_blocks, 2); return phi_node; } @@ -9346,10 +9267,10 @@ pub const FuncGen = struct { switch_instr.addCase(this_tag_int_value, named_block); } self.builder.positionBuilderAtEnd(named_block); - _ = self.builder.buildRet(Builder.Type.i1.toLlvm(&o.builder).constInt(1, .False)); + _ = self.builder.buildRet(Builder.Constant.true.toLlvm(&o.builder)); self.builder.positionBuilderAtEnd(unnamed_block); - _ = self.builder.buildRet(Builder.Type.i1.toLlvm(&o.builder).constInt(0, .False)); + _ = self.builder.buildRet(Builder.Constant.false.toLlvm(&o.builder)); try o.builder.llvm_globals.append(self.gpa, fn_val); _ = try o.builder.addGlobal(llvm_fn_name, global); @@ -9384,7 +9305,7 @@ pub const FuncGen = struct { const slice_ty = Type.slice_const_u8_sentinel_0; const ret_ty = try o.lowerType(slice_ty); const llvm_ret_ty = ret_ty.toLlvm(&o.builder); - const usize_llvm_ty = (try o.lowerType(Type.usize)).toLlvm(&o.builder); + const usize_ty = try o.lowerType(Type.usize); const slice_alignment = slice_ty.abiAlignment(mod); const fn_type = try o.builder.fnType(ret_ty, &.{ @@ -9421,9 +9342,9 @@ pub const FuncGen = struct { const tag_int_value = fn_val.getParam(0); const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @as(c_uint, @intCast(enum_type.names.len))); - const array_ptr_indices = [_]*llvm.Value{ - usize_llvm_ty.constNull(), usize_llvm_ty.constNull(), - }; + const array_ptr_indices: [2]*llvm.Value = .{ + (try o.builder.intConst(usize_ty, 0)).toLlvm(&o.builder), + } ** 2; for (enum_type.names, 0..) |name_ip, field_index_usize| { const field_index = @as(u32, @intCast(field_index_usize)); @@ -9439,7 +9360,7 @@ pub const FuncGen = struct { const slice_fields = [_]*llvm.Value{ str_init_llvm_ty.constInBoundsGEP(str_global, &array_ptr_indices, array_ptr_indices.len), - usize_llvm_ty.constInt(name.len, .False), + (try o.builder.intConst(usize_ty, name.len)).toLlvm(&o.builder), }; const slice_init = llvm_ret_ty.constNamedStruct(&slice_fields, slice_fields.len); const slice_global = o.llvm_module.addGlobal(slice_init.typeOf(), ""); @@ -9555,16 +9476,14 @@ pub const FuncGen = struct { const values = try self.gpa.alloc(*llvm.Value, mask_len); defer self.gpa.free(values); - const llvm_i32 = Builder.Type.i32.toLlvm(&o.builder); - for (values, 0..) |*val, i| { const elem = try mask.elemValue(mod, i); if (elem.isUndef(mod)) { - val.* = llvm_i32.getUndef(); + val.* = Builder.Type.i32.toLlvm(&o.builder).getUndef(); } else { const int = elem.toSignedInt(mod); const unsigned = if (int >= 0) @as(u32, @intCast(int)) else @as(u32, @intCast(~int + a_len)); - val.* = llvm_i32.constInt(unsigned, .False); + val.* = (try o.builder.intConst(.i32, unsigned)).toLlvm(&o.builder); } } @@ -9592,13 +9511,13 @@ pub const FuncGen = struct { accum_init: *llvm.Value, ) !*llvm.Value { const o = self.dg.object; - const llvm_usize_ty = (try o.lowerType(Type.usize)).toLlvm(&o.builder); - const llvm_vector_len = llvm_usize_ty.constInt(vector_len, .False); + const usize_ty = try o.lowerType(Type.usize); + const llvm_vector_len = try o.builder.intConst(usize_ty, vector_len); const llvm_result_ty = accum_init.typeOf(); // Allocate and initialize our mutable variables - const i_ptr = try self.buildAlloca(llvm_usize_ty, null); - _ = self.builder.buildStore(llvm_usize_ty.constInt(0, .False), i_ptr); + const i_ptr = try self.buildAlloca(usize_ty.toLlvm(&o.builder), null); + _ = self.builder.buildStore((try o.builder.intConst(usize_ty, 0)).toLlvm(&o.builder), i_ptr); const accum_ptr = try self.buildAlloca(llvm_result_ty, null); _ = self.builder.buildStore(accum_init, accum_ptr); @@ -9610,8 +9529,8 @@ pub const FuncGen = struct { self.builder.positionBuilderAtEnd(loop); // while (i < vec.len) - const i = self.builder.buildLoad(llvm_usize_ty, i_ptr, ""); - const cond = self.builder.buildICmp(.ULT, i, llvm_vector_len, ""); + const i = self.builder.buildLoad(usize_ty.toLlvm(&o.builder), i_ptr, ""); + const cond = self.builder.buildICmp(.ULT, i, llvm_vector_len.toLlvm(&o.builder), ""); const loop_then = self.context.appendBasicBlock(self.llvm_func, "ReduceLoopThen"); _ = self.builder.buildCondBr(cond, loop_then, loop_exit); @@ -9627,7 +9546,7 @@ pub const FuncGen = struct { _ = self.builder.buildStore(new_accum, accum_ptr); // i += 1 - const new_i = self.builder.buildAdd(i, llvm_usize_ty.constInt(1, .False), ""); + const new_i = self.builder.buildAdd(i, (try o.builder.intConst(usize_ty, 1)).toLlvm(&o.builder), ""); _ = self.builder.buildStore(new_i, i_ptr); _ = self.builder.buildBr(loop); } @@ -9731,13 +9650,11 @@ pub const FuncGen = struct { switch (result_ty.zigTypeTag(mod)) { .Vector => { - const llvm_u32 = Builder.Type.i32.toLlvm(&o.builder); - var vector = llvm_result_ty.getUndef(); for (elements, 0..) |elem, i| { - const index_u32 = llvm_u32.constInt(i, .False); + const index_u32 = try o.builder.intConst(.i32, i); const llvm_elem = try self.resolveInst(elem); - vector = self.builder.buildInsertElement(vector, llvm_elem, index_u32, ""); + vector = self.builder.buildInsertElement(vector, llvm_elem, index_u32.toLlvm(&o.builder), ""); } return vector; }, @@ -9746,10 +9663,10 @@ pub const FuncGen = struct { const struct_obj = mod.typeToStruct(result_ty).?; assert(struct_obj.haveLayout()); const big_bits = struct_obj.backing_int_ty.bitSize(mod); - const int_llvm_ty = (try o.builder.intType(@intCast(big_bits))).toLlvm(&o.builder); + const int_ty = try o.builder.intType(@intCast(big_bits)); const fields = struct_obj.fields.values(); comptime assert(Type.packed_struct_layout_version == 2); - var running_int: *llvm.Value = int_llvm_ty.constNull(); + var running_int = (try o.builder.intConst(int_ty, 0)).toLlvm(&o.builder); var running_bits: u16 = 0; for (elements, 0..) |elem, i| { const field = fields[i]; @@ -9762,12 +9679,12 @@ pub const FuncGen = struct { self.builder.buildPtrToInt(non_int_val, small_int_ty, "") else self.builder.buildBitCast(non_int_val, small_int_ty, ""); - const shift_rhs = int_llvm_ty.constInt(running_bits, .False); + const shift_rhs = try o.builder.intConst(int_ty, running_bits); // If the field is as large as the entire packed struct, this // zext would go from, e.g. i16 to i16. This is legal with // constZExtOrBitCast but not legal with constZExt. - const extended_int_val = self.builder.buildZExtOrBitCast(small_int_val, int_llvm_ty, ""); - const shifted = self.builder.buildShl(extended_int_val, shift_rhs, ""); + const extended_int_val = self.builder.buildZExtOrBitCast(small_int_val, int_ty.toLlvm(&o.builder), ""); + const shifted = self.builder.buildShl(extended_int_val, shift_rhs.toLlvm(&o.builder), ""); running_int = self.builder.buildOr(running_int, shifted, ""); running_bits += ty_bit_size; } @@ -9775,18 +9692,20 @@ pub const FuncGen = struct { } if (isByRef(result_ty, mod)) { - const llvm_u32 = Builder.Type.i32.toLlvm(&o.builder); // TODO in debug builds init to undef so that the padding will be 0xaa // even if we fully populate the fields. const alloca_inst = try self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); - var indices: [2]*llvm.Value = .{ llvm_u32.constNull(), undefined }; + var indices: [2]*llvm.Value = .{ + (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), + undefined, + }; for (elements, 0..) |elem, i| { if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); const llvm_i = llvmField(result_ty, i, mod).?.index; - indices[1] = llvm_u32.constInt(llvm_i, .False); + indices[1] = (try o.builder.intConst(.i32, llvm_i)).toLlvm(&o.builder); const field_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); const field_ptr_ty = try mod.ptrType(.{ .child = self.typeOf(elem).toIntern(), @@ -9815,7 +9734,7 @@ pub const FuncGen = struct { .Array => { assert(isByRef(result_ty, mod)); - const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); + const usize_ty = try o.lowerType(Type.usize); const alloca_inst = try self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); const array_info = result_ty.arrayInfo(mod); @@ -9825,8 +9744,8 @@ pub const FuncGen = struct { for (elements, 0..) |elem, i| { const indices: [2]*llvm.Value = .{ - llvm_usize.constNull(), - llvm_usize.constInt(@as(c_uint, @intCast(i)), .False), + (try o.builder.intConst(usize_ty, 0)).toLlvm(&o.builder), + (try o.builder.intConst(usize_ty, i)).toLlvm(&o.builder), }; const elem_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); const llvm_elem = try self.resolveInst(elem); @@ -9834,8 +9753,8 @@ pub const FuncGen = struct { } if (array_info.sentinel) |sent_val| { const indices: [2]*llvm.Value = .{ - llvm_usize.constNull(), - llvm_usize.constInt(@as(c_uint, @intCast(array_info.len)), .False), + (try o.builder.intConst(usize_ty, 0)).toLlvm(&o.builder), + (try o.builder.intConst(usize_ty, array_info.len)).toLlvm(&o.builder), }; const elem_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); const llvm_elem = try self.resolveValue(.{ @@ -9858,7 +9777,7 @@ pub const FuncGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; const union_ty = self.typeOfIndex(inst); - const union_llvm_ty = (try o.lowerType(union_ty)).toLlvm(&o.builder); + const union_llvm_ty = try o.lowerType(union_ty); const layout = union_ty.unionGetLayout(mod); const union_obj = mod.typeToUnion(union_ty).?; @@ -9889,14 +9808,14 @@ pub const FuncGen = struct { return null; } assert(!isByRef(union_ty, mod)); - return union_llvm_ty.constInt(tag_int, .False); + return (try o.builder.intConst(union_llvm_ty, tag_int)).toLlvm(&o.builder); } assert(isByRef(union_ty, mod)); // The llvm type of the alloca will be the named LLVM union type, and will not // necessarily match the format that we need, depending on which tag is active. // We must construct the correct unnamed struct type here, in order to then set // the fields appropriately. - const result_ptr = try self.buildAlloca(union_llvm_ty, layout.abi_align); + const result_ptr = try self.buildAlloca(union_llvm_ty.toLlvm(&o.builder), layout.abi_align); const llvm_payload = try self.resolveInst(extra.init); assert(union_obj.haveFieldTypes()); const field = union_obj.fields.values()[extra.field_index]; @@ -9936,8 +9855,6 @@ pub const FuncGen = struct { // Now we follow the layout as expressed above with GEP instructions to set the // tag and the payload. - const index_type = Builder.Type.i32.toLlvm(&o.builder); - const field_ptr_ty = try mod.ptrType(.{ .child = field.ty.toIntern(), .flags = .{ @@ -9946,10 +9863,8 @@ pub const FuncGen = struct { }); if (layout.tag_size == 0) { const indices: [3]*llvm.Value = .{ - index_type.constNull(), - index_type.constNull(), - index_type.constNull(), - }; + (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), + } ** 3; const len: c_uint = if (field_size == layout.payload_size) 2 else 3; const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, result_ptr, &indices, len, ""); try self.store(field_ptr, field_ptr_ty, llvm_payload, .NotAtomic); @@ -9958,9 +9873,9 @@ pub const FuncGen = struct { { const indices: [3]*llvm.Value = .{ - index_type.constNull(), - index_type.constInt(@intFromBool(layout.tag_align >= layout.payload_align), .False), - index_type.constNull(), + (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), + (try o.builder.intConst(.i32, @intFromBool(layout.tag_align >= layout.payload_align))).toLlvm(&o.builder), + (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), }; const len: c_uint = if (field_size == layout.payload_size) 2 else 3; const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, result_ptr, &indices, len, ""); @@ -9968,13 +9883,13 @@ pub const FuncGen = struct { } { const indices: [2]*llvm.Value = .{ - index_type.constNull(), - index_type.constInt(@intFromBool(layout.tag_align < layout.payload_align), .False), + (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), + (try o.builder.intConst(.i32, @intFromBool(layout.tag_align < layout.payload_align))).toLlvm(&o.builder), }; const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, result_ptr, &indices, indices.len, ""); - const tag_llvm_ty = (try o.lowerType(union_obj.tag_ty)).toLlvm(&o.builder); - const llvm_tag = tag_llvm_ty.constInt(tag_int, .False); - const store_inst = self.builder.buildStore(llvm_tag, field_ptr); + const tag_ty = try o.lowerType(union_obj.tag_ty); + const llvm_tag = try o.builder.intConst(tag_ty, tag_int); + const store_inst = self.builder.buildStore(llvm_tag.toLlvm(&o.builder), field_ptr); store_inst.setAlignment(union_obj.tag_ty.abiAlignment(mod)); } @@ -10031,12 +9946,11 @@ pub const FuncGen = struct { const ptr = try self.resolveInst(prefetch.ptr); - const llvm_u32 = Builder.Type.i32.toLlvm(&o.builder); const params = [_]*llvm.Value{ ptr, - llvm_u32.constInt(@intFromEnum(prefetch.rw), .False), - llvm_u32.constInt(prefetch.locality, .False), - llvm_u32.constInt(@intFromEnum(prefetch.cache), .False), + (try o.builder.intConst(.i32, @intFromEnum(prefetch.rw))).toLlvm(&o.builder), + (try o.builder.intConst(.i32, prefetch.locality)).toLlvm(&o.builder), + (try o.builder.intConst(.i32, @intFromEnum(prefetch.cache))).toLlvm(&o.builder), }; _ = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); return null; @@ -10053,13 +9967,11 @@ pub const FuncGen = struct { } fn amdgcnWorkIntrinsic(self: *FuncGen, dimension: u32, default: u32, comptime basename: []const u8) !?*llvm.Value { - const llvm_u32 = Builder.Type.i32.toLlvm(&self.dg.object.builder); - const llvm_fn_name = switch (dimension) { 0 => basename ++ ".x", 1 => basename ++ ".y", 2 => basename ++ ".z", - else => return llvm_u32.constInt(default, .False), + else => return (try self.dg.object.builder.intConst(.i32, default)).toLlvm(&self.dg.object.builder), }; const args: [0]*llvm.Value = .{}; @@ -10084,9 +9996,8 @@ pub const FuncGen = struct { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const dimension = pl_op.payload; - const llvm_u32 = Builder.Type.i32.toLlvm(&o.builder); if (dimension >= 3) { - return llvm_u32.constInt(1, .False); + return (try o.builder.intConst(.i32, 1)).toLlvm(&o.builder); } // Fetch the dispatch pointer, which points to this structure: @@ -10099,7 +10010,9 @@ pub const FuncGen = struct { // Load the work_group_* member from the struct as u16. // Just treat the dispatch pointer as an array of u16 to keep things simple. const offset = 2 + dimension; - const index = [_]*llvm.Value{llvm_u32.constInt(offset, .False)}; + const index = [_]*llvm.Value{ + (try o.builder.intConst(.i32, offset)).toLlvm(&o.builder), + }; const llvm_u16 = Builder.Type.i16.toLlvm(&o.builder); const workgroup_size_ptr = self.builder.buildInBoundsGEP(llvm_u16, dispatch_ptr, &index, index.len, ""); const workgroup_size = self.builder.buildLoad(llvm_u16, workgroup_size_ptr, ""); @@ -10145,18 +10058,17 @@ pub const FuncGen = struct { opt_llvm_ty: *llvm.Type, opt_handle: *llvm.Value, is_by_ref: bool, - ) *llvm.Value { - const non_null_llvm_ty = Builder.Type.i8.toLlvm(&self.dg.object.builder); + ) Allocator.Error!*llvm.Value { const field = b: { if (is_by_ref) { const field_ptr = self.builder.buildStructGEP(opt_llvm_ty, opt_handle, 1, ""); - break :b self.builder.buildLoad(non_null_llvm_ty, field_ptr, ""); + break :b self.builder.buildLoad(Builder.Type.i8.toLlvm(&self.dg.object.builder), field_ptr, ""); } break :b self.builder.buildExtractValue(opt_handle, 1, ""); }; comptime assert(optional_layout_version == 3); - return self.builder.buildICmp(.NE, field, non_null_llvm_ty.constInt(0, .False), ""); + return self.builder.buildICmp(.NE, field, (try self.dg.object.builder.intConst(.i8, 0)).toLlvm(&self.dg.object.builder), ""); } /// Assumes the optional is not pointer-like and payload has bits. @@ -10254,9 +10166,9 @@ pub const FuncGen = struct { const byte_offset = struct_ty.packedStructFieldByteOffset(field_index, mod); if (byte_offset == 0) return struct_ptr; const byte_llvm_ty = Builder.Type.i8.toLlvm(&o.builder); - const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); - const llvm_index = llvm_usize.constInt(byte_offset, .False); - const indices: [1]*llvm.Value = .{llvm_index}; + const usize_ty = try o.lowerType(Type.usize); + const llvm_index = try o.builder.intConst(usize_ty, byte_offset); + const indices: [1]*llvm.Value = .{llvm_index.toLlvm(&o.builder)}; return self.builder.buildInBoundsGEP(byte_llvm_ty, struct_ptr, &indices, indices.len, ""); }, else => { @@ -10269,9 +10181,8 @@ pub const FuncGen = struct { // end of the struct. Treat our struct pointer as an array of two and get // the index to the element at index `1` to get a pointer to the end of // the struct. - const llvm_u32 = Builder.Type.i32.toLlvm(&o.builder); - const llvm_index = llvm_u32.constInt(@intFromBool(struct_ty.hasRuntimeBitsIgnoreComptime(mod)), .False); - const indices: [1]*llvm.Value = .{llvm_index}; + const llvm_index = try o.builder.intConst(.i32, @intFromBool(struct_ty.hasRuntimeBitsIgnoreComptime(mod))); + const indices: [1]*llvm.Value = .{llvm_index.toLlvm(&o.builder)}; return self.builder.buildInBoundsGEP(struct_llvm_ty, struct_ptr, &indices, indices.len, ""); } }, @@ -10311,14 +10222,14 @@ pub const FuncGen = struct { const pointee_llvm_ty = (try o.lowerType(pointee_type)).toLlvm(&o.builder); const result_align = @max(ptr_alignment, pointee_type.abiAlignment(mod)); const result_ptr = try fg.buildAlloca(pointee_llvm_ty, result_align); - const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); + const usize_ty = try o.lowerType(Type.usize); const size_bytes = pointee_type.abiSize(mod); _ = fg.builder.buildMemCpy( result_ptr, result_align, ptr, ptr_alignment, - llvm_usize.constInt(size_bytes, .False), + (try o.builder.intConst(usize_ty, size_bytes)).toLlvm(&o.builder), is_volatile, ); return result_ptr; @@ -10340,15 +10251,15 @@ pub const FuncGen = struct { assert(info.flags.vector_index != .runtime); if (info.flags.vector_index != .none) { - const index_u32 = Builder.Type.i32.toLlvm(&o.builder).constInt(@intFromEnum(info.flags.vector_index), .False); - const vec_elem_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); - const vec_ty = vec_elem_ty.vectorType(info.packed_offset.host_size); + const index_u32 = try o.builder.intConst(.i32, @intFromEnum(info.flags.vector_index)); + const vec_elem_ty = try o.lowerType(elem_ty); + const vec_ty = try o.builder.vectorType(.normal, info.packed_offset.host_size, vec_elem_ty); - const loaded_vector = self.builder.buildLoad(vec_ty, ptr, ""); + const loaded_vector = self.builder.buildLoad(vec_ty.toLlvm(&o.builder), ptr, ""); loaded_vector.setAlignment(ptr_alignment); loaded_vector.setVolatile(ptr_volatile); - return self.builder.buildExtractElement(loaded_vector, index_u32, ""); + return self.builder.buildExtractElement(loaded_vector, index_u32.toLlvm(&o.builder), ""); } if (info.packed_offset.host_size == 0) { @@ -10417,15 +10328,15 @@ pub const FuncGen = struct { assert(info.flags.vector_index != .runtime); if (info.flags.vector_index != .none) { - const index_u32 = Builder.Type.i32.toLlvm(&o.builder).constInt(@intFromEnum(info.flags.vector_index), .False); - const vec_elem_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); - const vec_ty = vec_elem_ty.vectorType(info.packed_offset.host_size); + const index_u32 = try o.builder.intConst(.i32, @intFromEnum(info.flags.vector_index)); + const vec_elem_ty = try o.lowerType(elem_ty); + const vec_ty = try o.builder.vectorType(.normal, info.packed_offset.host_size, vec_elem_ty); - const loaded_vector = self.builder.buildLoad(vec_ty, ptr, ""); + const loaded_vector = self.builder.buildLoad(vec_ty.toLlvm(&o.builder), ptr, ""); loaded_vector.setAlignment(ptr_alignment); loaded_vector.setVolatile(ptr_volatile); - const modified_vector = self.builder.buildInsertElement(loaded_vector, elem, index_u32, ""); + const modified_vector = self.builder.buildInsertElement(loaded_vector, elem, index_u32.toLlvm(&o.builder), ""); const store_inst = self.builder.buildStore(modified_vector, ptr); assert(ordering == .NotAtomic); @@ -10481,7 +10392,7 @@ pub const FuncGen = struct { ptr_alignment, elem, elem_ty.abiAlignment(mod), - (try o.lowerType(Type.usize)).toLlvm(&o.builder).constInt(size_bytes, .False), + (try o.builder.intConst(try o.lowerType(Type.usize), size_bytes)).toLlvm(&o.builder), info.flags.is_volatile, ); } @@ -10489,10 +10400,10 @@ pub const FuncGen = struct { fn valgrindMarkUndef(fg: *FuncGen, ptr: *llvm.Value, len: *llvm.Value) Allocator.Error!void { const VG_USERREQ__MAKE_MEM_UNDEFINED = 1296236545; const o = fg.dg.object; - const usize_llvm_ty = (try o.lowerType(Type.usize)).toLlvm(&o.builder); - const zero = usize_llvm_ty.constInt(0, .False); - const req = usize_llvm_ty.constInt(VG_USERREQ__MAKE_MEM_UNDEFINED, .False); - const ptr_as_usize = fg.builder.buildPtrToInt(ptr, usize_llvm_ty, ""); + const usize_ty = try o.lowerType(Type.usize); + const zero = (try o.builder.intConst(usize_ty, 0)).toLlvm(&o.builder); + const req = (try o.builder.intConst(usize_ty, VG_USERREQ__MAKE_MEM_UNDEFINED)).toLlvm(&o.builder); + const ptr_as_usize = fg.builder.buildPtrToInt(ptr, usize_ty.toLlvm(&o.builder), ""); _ = try valgrindClientRequest(fg, zero, req, ptr_as_usize, len, zero, zero, zero); } @@ -10511,21 +10422,20 @@ pub const FuncGen = struct { const target = mod.getTarget(); if (!target_util.hasValgrindSupport(target)) return default_value; - const usize_ty = try o.lowerType(Type.usize); - const usize_llvm_ty = usize_ty.toLlvm(&o.builder); + const llvm_usize = try o.lowerType(Type.usize); const usize_alignment = @as(c_uint, @intCast(Type.usize.abiSize(mod))); - const array_llvm_ty = usize_llvm_ty.arrayType(6); + const array_llvm_ty = (try o.builder.arrayType(6, llvm_usize)).toLlvm(&o.builder); const array_ptr = fg.valgrind_client_request_array orelse a: { const array_ptr = try fg.buildAlloca(array_llvm_ty, usize_alignment); fg.valgrind_client_request_array = array_ptr; break :a array_ptr; }; const array_elements = [_]*llvm.Value{ request, a1, a2, a3, a4, a5 }; - const zero = usize_llvm_ty.constInt(0, .False); + const zero = (try o.builder.intConst(llvm_usize, 0)).toLlvm(&o.builder); for (array_elements, 0..) |elem, i| { const indexes = [_]*llvm.Value{ - zero, usize_llvm_ty.constInt(@as(c_uint, @intCast(i)), .False), + zero, (try o.builder.intConst(llvm_usize, i)).toLlvm(&o.builder), }; const elem_ptr = fg.builder.buildInBoundsGEP(array_llvm_ty, array_ptr, &indexes, indexes.len, ""); const store_inst = fg.builder.buildStore(elem, elem_ptr); @@ -10563,8 +10473,8 @@ pub const FuncGen = struct { else => unreachable, }; - const fn_llvm_ty = (try o.builder.fnType(usize_ty, &(.{usize_ty} ** 2), .normal)).toLlvm(&o.builder); - const array_ptr_as_usize = fg.builder.buildPtrToInt(array_ptr, usize_llvm_ty, ""); + const fn_llvm_ty = (try o.builder.fnType(llvm_usize, &(.{llvm_usize} ** 2), .normal)).toLlvm(&o.builder); + const array_ptr_as_usize = fg.builder.buildPtrToInt(array_ptr, llvm_usize.toLlvm(&o.builder), ""); const args = [_]*llvm.Value{ array_ptr_as_usize, default_value }; const asm_fn = llvm.getInlineAsm( fn_llvm_ty, diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 91735c0fe0..95af18e726 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -6,6 +6,7 @@ llvm_module: *llvm.Module, di_builder: ?*llvm.DIBuilder = null, llvm_types: std.ArrayListUnmanaged(*llvm.Type) = .{}, llvm_globals: std.ArrayListUnmanaged(*llvm.Value) = .{}, +llvm_constants: std.ArrayListUnmanaged(*llvm.Value) = .{}, source_filename: String = .none, data_layout: String = .none, @@ -29,6 +30,11 @@ aliases: std.ArrayListUnmanaged(Alias) = .{}, objects: std.ArrayListUnmanaged(Object) = .{}, functions: std.ArrayListUnmanaged(Function) = .{}, +constant_map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, +constant_items: std.MultiArrayList(Constant.Item) = .{}, +constant_extra: std.ArrayListUnmanaged(u32) = .{}, +constant_limbs: std.ArrayListUnmanaged(std.math.big.Limb) = .{}, + pub const String = enum(u32) { none = std.math.maxInt(u31), empty, @@ -612,10 +618,6 @@ pub const Global = struct { builder.llvm_globals.items[index].setValueName2(slice.ptr, slice.len); } }; - - fn deinit(self: *Global, _: Allocator) void { - self.* = undefined; - } }; pub const Alias = struct { @@ -642,7 +644,7 @@ pub const Object = struct { global: Global.Index, thread_local: ThreadLocal = .default, mutability: enum { global, constant } = .global, - init: void = {}, + init: Constant = .no_init, pub const Index = enum(u32) { _, @@ -664,10 +666,8 @@ pub const Object = struct { pub const Function = struct { global: Global.Index, body: ?void = null, - - fn deinit(self: *Function, _: Allocator) void { - self.* = undefined; - } + instructions: std.ArrayListUnmanaged(Instruction) = .{}, + blocks: std.ArrayListUnmanaged(Block) = .{}, pub const Index = enum(u32) { _, @@ -684,6 +684,130 @@ pub const Function = struct { return self.ptrConst(builder).global.toLlvm(builder); } }; + + pub const Instruction = struct { + tag: Tag, + + pub const Tag = enum { + arg, + block, + }; + + pub const Index = enum(u31) { _ }; + }; + + pub const Block = struct { + body: std.ArrayListUnmanaged(Instruction.Index) = .{}, + + pub const Index = enum(u31) { _ }; + }; + + pub fn deinit(self: *Function, gpa: Allocator) void { + self.instructions.deinit(gpa); + self.blocks.deinit(gpa); + self.* = undefined; + } +}; + +pub const Constant = enum(u32) { + false, + true, + none, + no_init = 1 << 31, + _, + + const first_global: Constant = @enumFromInt(1 << 30); + + pub const Tag = enum(u6) { + integer_positive, + integer_negative, + null, + none, + structure, + array, + vector, + zeroinitializer, + global, + undef, + poison, + blockaddress, + dso_local_equivalent, + no_cfi, + trunc, + zext, + sext, + fptrunc, + fpext, + fptoui, + fptosi, + uitofp, + sitofp, + ptrtoint, + inttoptr, + bitcast, + addrspacecast, + getelementptr, + icmp, + fcmp, + extractelement, + insertelement, + shufflevector, + add, + sub, + mul, + shl, + lshr, + ashr, + @"and", + @"or", + xor, + }; + + pub const Item = struct { + tag: Tag, + data: u32, + }; + + pub const Integer = packed struct(u64) { + type: Type, + limbs_len: u32, + + pub const limbs = @divExact(@bitSizeOf(Integer), @bitSizeOf(std.math.big.Limb)); + }; + + pub fn unwrap(self: Constant) union(enum) { + constant: u30, + global: Global.Index, + } { + return if (@intFromEnum(self) < @intFromEnum(first_global)) + .{ .constant = @intCast(@intFromEnum(self)) } + else + .{ .global = @enumFromInt(@intFromEnum(self) - @intFromEnum(first_global)) }; + } + + pub fn toLlvm(self: Constant, builder: *const Builder) *llvm.Value { + assert(builder.useLibLlvm()); + return switch (self.unwrap()) { + .constant => |constant| builder.llvm_constants.items[constant], + .global => |global| global.toLlvm(builder), + }; + } +}; + +pub const Value = enum(u32) { + _, + + const first_constant: Value = @enumFromInt(1 << 31); + + pub fn unwrap(self: Value) union(enum) { + instruction: Function.Instruction.Index, + constant: Constant, + } { + return if (@intFromEnum(self) < @intFromEnum(first_constant)) + .{ .instruction = @intFromEnum(self) } + else + .{ .constant = @enumFromInt(@intFromEnum(self) - @intFromEnum(first_constant)) }; + } }; pub fn init(self: *Builder) Allocator.Error!void { @@ -711,11 +835,15 @@ pub fn init(self: *Builder) Allocator.Error!void { inline for (.{0}) |addr_space| assert(self.ptrTypeAssumeCapacity(@enumFromInt(addr_space)) == .ptr); } + + assert(try self.intConst(.i1, 0) == .false); + assert(try self.intConst(.i1, 1) == .true); } pub fn deinit(self: *Builder) void { self.llvm_types.deinit(self.gpa); self.llvm_globals.deinit(self.gpa); + self.llvm_constants.deinit(self.gpa); self.string_map.deinit(self.gpa); self.string_bytes.deinit(self.gpa); @@ -731,11 +859,210 @@ pub fn deinit(self: *Builder) void { self.next_unique_global_id.deinit(self.gpa); self.aliases.deinit(self.gpa); self.objects.deinit(self.gpa); + for (self.functions.items) |*function| function.deinit(self.gpa); self.functions.deinit(self.gpa); + self.constant_map.deinit(self.gpa); + self.constant_items.deinit(self.gpa); + self.constant_extra.deinit(self.gpa); + self.constant_limbs.deinit(self.gpa); + self.* = undefined; } +pub fn initializeLLVMTarget(self: *const Builder, arch: std.Target.Cpu.Arch) void { + if (!self.useLibLlvm()) return; + switch (arch) { + .aarch64, .aarch64_be, .aarch64_32 => { + llvm.LLVMInitializeAArch64Target(); + llvm.LLVMInitializeAArch64TargetInfo(); + llvm.LLVMInitializeAArch64TargetMC(); + llvm.LLVMInitializeAArch64AsmPrinter(); + llvm.LLVMInitializeAArch64AsmParser(); + }, + .amdgcn => { + llvm.LLVMInitializeAMDGPUTarget(); + llvm.LLVMInitializeAMDGPUTargetInfo(); + llvm.LLVMInitializeAMDGPUTargetMC(); + llvm.LLVMInitializeAMDGPUAsmPrinter(); + llvm.LLVMInitializeAMDGPUAsmParser(); + }, + .thumb, .thumbeb, .arm, .armeb => { + llvm.LLVMInitializeARMTarget(); + llvm.LLVMInitializeARMTargetInfo(); + llvm.LLVMInitializeARMTargetMC(); + llvm.LLVMInitializeARMAsmPrinter(); + llvm.LLVMInitializeARMAsmParser(); + }, + .avr => { + llvm.LLVMInitializeAVRTarget(); + llvm.LLVMInitializeAVRTargetInfo(); + llvm.LLVMInitializeAVRTargetMC(); + llvm.LLVMInitializeAVRAsmPrinter(); + llvm.LLVMInitializeAVRAsmParser(); + }, + .bpfel, .bpfeb => { + llvm.LLVMInitializeBPFTarget(); + llvm.LLVMInitializeBPFTargetInfo(); + llvm.LLVMInitializeBPFTargetMC(); + llvm.LLVMInitializeBPFAsmPrinter(); + llvm.LLVMInitializeBPFAsmParser(); + }, + .hexagon => { + llvm.LLVMInitializeHexagonTarget(); + llvm.LLVMInitializeHexagonTargetInfo(); + llvm.LLVMInitializeHexagonTargetMC(); + llvm.LLVMInitializeHexagonAsmPrinter(); + llvm.LLVMInitializeHexagonAsmParser(); + }, + .lanai => { + llvm.LLVMInitializeLanaiTarget(); + llvm.LLVMInitializeLanaiTargetInfo(); + llvm.LLVMInitializeLanaiTargetMC(); + llvm.LLVMInitializeLanaiAsmPrinter(); + llvm.LLVMInitializeLanaiAsmParser(); + }, + .mips, .mipsel, .mips64, .mips64el => { + llvm.LLVMInitializeMipsTarget(); + llvm.LLVMInitializeMipsTargetInfo(); + llvm.LLVMInitializeMipsTargetMC(); + llvm.LLVMInitializeMipsAsmPrinter(); + llvm.LLVMInitializeMipsAsmParser(); + }, + .msp430 => { + llvm.LLVMInitializeMSP430Target(); + llvm.LLVMInitializeMSP430TargetInfo(); + llvm.LLVMInitializeMSP430TargetMC(); + llvm.LLVMInitializeMSP430AsmPrinter(); + llvm.LLVMInitializeMSP430AsmParser(); + }, + .nvptx, .nvptx64 => { + llvm.LLVMInitializeNVPTXTarget(); + llvm.LLVMInitializeNVPTXTargetInfo(); + llvm.LLVMInitializeNVPTXTargetMC(); + llvm.LLVMInitializeNVPTXAsmPrinter(); + // There is no LLVMInitializeNVPTXAsmParser function available. + }, + .powerpc, .powerpcle, .powerpc64, .powerpc64le => { + llvm.LLVMInitializePowerPCTarget(); + llvm.LLVMInitializePowerPCTargetInfo(); + llvm.LLVMInitializePowerPCTargetMC(); + llvm.LLVMInitializePowerPCAsmPrinter(); + llvm.LLVMInitializePowerPCAsmParser(); + }, + .riscv32, .riscv64 => { + llvm.LLVMInitializeRISCVTarget(); + llvm.LLVMInitializeRISCVTargetInfo(); + llvm.LLVMInitializeRISCVTargetMC(); + llvm.LLVMInitializeRISCVAsmPrinter(); + llvm.LLVMInitializeRISCVAsmParser(); + }, + .sparc, .sparc64, .sparcel => { + llvm.LLVMInitializeSparcTarget(); + llvm.LLVMInitializeSparcTargetInfo(); + llvm.LLVMInitializeSparcTargetMC(); + llvm.LLVMInitializeSparcAsmPrinter(); + llvm.LLVMInitializeSparcAsmParser(); + }, + .s390x => { + llvm.LLVMInitializeSystemZTarget(); + llvm.LLVMInitializeSystemZTargetInfo(); + llvm.LLVMInitializeSystemZTargetMC(); + llvm.LLVMInitializeSystemZAsmPrinter(); + llvm.LLVMInitializeSystemZAsmParser(); + }, + .wasm32, .wasm64 => { + llvm.LLVMInitializeWebAssemblyTarget(); + llvm.LLVMInitializeWebAssemblyTargetInfo(); + llvm.LLVMInitializeWebAssemblyTargetMC(); + llvm.LLVMInitializeWebAssemblyAsmPrinter(); + llvm.LLVMInitializeWebAssemblyAsmParser(); + }, + .x86, .x86_64 => { + llvm.LLVMInitializeX86Target(); + llvm.LLVMInitializeX86TargetInfo(); + llvm.LLVMInitializeX86TargetMC(); + llvm.LLVMInitializeX86AsmPrinter(); + llvm.LLVMInitializeX86AsmParser(); + }, + .xtensa => { + if (build_options.llvm_has_xtensa) { + llvm.LLVMInitializeXtensaTarget(); + llvm.LLVMInitializeXtensaTargetInfo(); + llvm.LLVMInitializeXtensaTargetMC(); + llvm.LLVMInitializeXtensaAsmPrinter(); + llvm.LLVMInitializeXtensaAsmParser(); + } + }, + .xcore => { + llvm.LLVMInitializeXCoreTarget(); + llvm.LLVMInitializeXCoreTargetInfo(); + llvm.LLVMInitializeXCoreTargetMC(); + llvm.LLVMInitializeXCoreAsmPrinter(); + // There is no LLVMInitializeXCoreAsmParser function. + }, + .m68k => { + if (build_options.llvm_has_m68k) { + llvm.LLVMInitializeM68kTarget(); + llvm.LLVMInitializeM68kTargetInfo(); + llvm.LLVMInitializeM68kTargetMC(); + llvm.LLVMInitializeM68kAsmPrinter(); + llvm.LLVMInitializeM68kAsmParser(); + } + }, + .csky => { + if (build_options.llvm_has_csky) { + llvm.LLVMInitializeCSKYTarget(); + llvm.LLVMInitializeCSKYTargetInfo(); + llvm.LLVMInitializeCSKYTargetMC(); + // There is no LLVMInitializeCSKYAsmPrinter function. + llvm.LLVMInitializeCSKYAsmParser(); + } + }, + .ve => { + llvm.LLVMInitializeVETarget(); + llvm.LLVMInitializeVETargetInfo(); + llvm.LLVMInitializeVETargetMC(); + llvm.LLVMInitializeVEAsmPrinter(); + llvm.LLVMInitializeVEAsmParser(); + }, + .arc => { + if (build_options.llvm_has_arc) { + llvm.LLVMInitializeARCTarget(); + llvm.LLVMInitializeARCTargetInfo(); + llvm.LLVMInitializeARCTargetMC(); + llvm.LLVMInitializeARCAsmPrinter(); + // There is no LLVMInitializeARCAsmParser function. + } + }, + + // LLVM backends that have no initialization functions. + .tce, + .tcele, + .r600, + .le32, + .le64, + .amdil, + .amdil64, + .hsail, + .hsail64, + .shave, + .spir, + .spir64, + .kalimba, + .renderscript32, + .renderscript64, + .dxil, + .loongarch32, + .loongarch64, + => {}, + + .spu_2 => unreachable, // LLVM does not support this backend + .spirv32 => unreachable, // LLVM does not support this backend + .spirv64 => unreachable, // LLVM does not support this backend + } +} + pub fn string(self: *Builder, bytes: []const u8) Allocator.Error!String { try self.string_bytes.ensureUnusedCapacity(self.gpa, bytes.len + 1); try self.string_indices.ensureUnusedCapacity(self.gpa, 1); @@ -899,6 +1226,112 @@ pub fn getGlobal(self: *const Builder, name: String) ?Global.Index { return @enumFromInt(self.globals.getIndex(name) orelse return null); } +pub fn intConst(self: *Builder, ty: Type, value: anytype) Allocator.Error!Constant { + var limbs: [ + switch (@typeInfo(@TypeOf(value))) { + .Int => |info| std.math.big.int.calcTwosCompLimbCount(info.bits), + .ComptimeInt => std.math.big.int.calcLimbLen(value), + else => @compileError("intConst expected an integral value, got " ++ + @typeName(@TypeOf(value))), + } + ]std.math.big.Limb = undefined; + return self.bigIntConst(ty, std.math.big.int.Mutable.init(&limbs, value).toConst()); +} + +pub fn bigIntConst(self: *Builder, ty: Type, value: std.math.big.int.Const) Allocator.Error!Constant { + try self.constant_map.ensureUnusedCapacity(self.gpa, 1); + try self.constant_items.ensureUnusedCapacity(self.gpa, 1); + try self.constant_limbs.ensureUnusedCapacity(self.gpa, Constant.Integer.limbs + value.limbs.len); + if (self.useLibLlvm()) try self.llvm_constants.ensureUnusedCapacity(self.gpa, 1); + return self.bigIntConstAssumeCapacity(ty, value); +} + +pub fn dump(self: *Builder, writer: anytype) @TypeOf(writer).Error!void { + if (self.source_filename != .none) try writer.print( + \\; ModuleID = '{s}' + \\source_filename = {"} + \\ + , .{ self.source_filename.toSlice(self).?, self.source_filename.fmt(self) }); + if (self.data_layout != .none) try writer.print( + \\target datalayout = {"} + \\ + , .{self.data_layout.fmt(self)}); + if (self.target_triple != .none) try writer.print( + \\target triple = {"} + \\ + , .{self.target_triple.fmt(self)}); + try writer.writeByte('\n'); + for (self.types.keys(), self.types.values()) |id, ty| try writer.print( + \\%{} = type {} + \\ + , .{ id.fmt(self), ty.fmt(self) }); + try writer.writeByte('\n'); + for (self.objects.items) |object| { + const global = self.globals.entries.get(@intFromEnum(object.global)); + try writer.print( + \\@{} ={}{}{}{}{}{}{}{} {s} {%}{,} + \\ + , .{ + global.key.fmt(self), + global.value.linkage, + global.value.preemption, + global.value.visibility, + global.value.dll_storage_class, + object.thread_local, + global.value.unnamed_addr, + global.value.addr_space, + global.value.externally_initialized, + @tagName(object.mutability), + global.value.type.fmt(self), + global.value.alignment, + }); + } + try writer.writeByte('\n'); + for (self.functions.items) |function| { + const global = self.globals.entries.get(@intFromEnum(function.global)); + const item = self.type_items.items[@intFromEnum(global.value.type)]; + const extra = self.typeExtraDataTrail(Type.Function, item.data); + const params: []const Type = + @ptrCast(self.type_extra.items[extra.end..][0..extra.data.params_len]); + try writer.print( + \\{s} {}{}{}{}{} @{}( + , .{ + if (function.body) |_| "define" else "declare", + global.value.linkage, + global.value.preemption, + global.value.visibility, + global.value.dll_storage_class, + extra.data.ret.fmt(self), + global.key.fmt(self), + }); + for (params, 0..) |param, index| { + if (index > 0) try writer.writeAll(", "); + try writer.print("{%} %{d}", .{ param.fmt(self), index }); + } + switch (item.tag) { + .function => {}, + .vararg_function => { + if (params.len > 0) try writer.writeAll(", "); + try writer.writeAll("..."); + }, + else => unreachable, + } + try writer.print(") {}{}", .{ + global.value.unnamed_addr, + global.value.alignment, + }); + if (function.body) |_| try writer.print( + \\{{ + \\ ret {%} + \\}} + \\ + , .{ + extra.data.ret.fmt(self), + }); + try writer.writeByte('\n'); + } +} + fn ensureUnusedCapacityGlobal(self: *Builder, name: String) Allocator.Error!void { if (self.useLibLlvm()) try self.llvm_globals.ensureUnusedCapacity(self.gpa, 1); try self.string_map.ensureUnusedCapacity(self.gpa, 1); @@ -1002,6 +1435,7 @@ fn fnTypeAssumeCapacity( } fn intTypeAssumeCapacity(self: *Builder, bits: u24) Type { + assert(bits > 0); const result = self.typeNoExtraAssumeCapacity(.{ .tag = .integer, .data = bits }); if (self.useLibLlvm() and result.new) self.llvm_types.appendAssumeCapacity(self.llvm_context.intType(bits)); @@ -1162,10 +1596,16 @@ fn structTypeAssumeCapacity( }); self.type_extra.appendSliceAssumeCapacity(@ptrCast(fields)); if (self.useLibLlvm()) { - const llvm_fields = try self.gpa.alloc(*llvm.Type, fields.len); - defer self.gpa.free(llvm_fields); + const ExpectedContents = [32]*llvm.Type; + var stack align(@alignOf(ExpectedContents)) = + std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); + const allocator = stack.get(); + + const llvm_fields = try allocator.alloc(*llvm.Type, fields.len); + defer allocator.free(llvm_fields); for (llvm_fields, fields) |*llvm_field, field| llvm_field.* = self.llvm_types.items[@intFromEnum(field)]; + self.llvm_types.appendAssumeCapacity(self.llvm_context.structType( llvm_fields.ptr, @intCast(llvm_fields.len), @@ -1277,90 +1717,114 @@ fn isValidIdentifier(id: []const u8) bool { return true; } -pub fn dump(self: *Builder, writer: anytype) @TypeOf(writer).Error!void { - if (self.source_filename != .none) try writer.print( - \\; ModuleID = '{s}' - \\source_filename = {"} - \\ - , .{ self.source_filename.toSlice(self).?, self.source_filename.fmt(self) }); - if (self.data_layout != .none) try writer.print( - \\target datalayout = {"} - \\ - , .{self.data_layout.fmt(self)}); - if (self.target_triple != .none) try writer.print( - \\target triple = {"} - \\ - , .{self.target_triple.fmt(self)}); - try writer.writeByte('\n'); - for (self.types.keys(), self.types.values()) |id, ty| try writer.print( - \\%{} = type {} - \\ - , .{ id.fmt(self), ty.fmt(self) }); - try writer.writeByte('\n'); - for (self.objects.items) |object| { - const global = self.globals.entries.get(@intFromEnum(object.global)); - try writer.print( - \\@{} ={}{}{}{}{}{}{}{} {s} {%}{,} - \\ - , .{ - global.key.fmt(self), - global.value.linkage, - global.value.preemption, - global.value.visibility, - global.value.dll_storage_class, - object.thread_local, - global.value.unnamed_addr, - global.value.addr_space, - global.value.externally_initialized, - @tagName(object.mutability), - global.value.type.fmt(self), - global.value.alignment, - }); - } - try writer.writeByte('\n'); - for (self.functions.items) |function| { - const global = self.globals.entries.get(@intFromEnum(function.global)); - const item = self.type_items.items[@intFromEnum(global.value.type)]; - const extra = self.typeExtraDataTrail(Type.Function, item.data); - const params: []const Type = - @ptrCast(self.type_extra.items[extra.end..][0..extra.data.params_len]); - try writer.print( - \\{s} {}{}{}{}{} @{}( - , .{ - if (function.body) |_| "define" else "declare", - global.value.linkage, - global.value.preemption, - global.value.visibility, - global.value.dll_storage_class, - extra.data.ret.fmt(self), - global.key.fmt(self), - }); - for (params, 0..) |param, index| { - if (index > 0) try writer.writeAll(", "); - try writer.print("{%} %{d}", .{ param.fmt(self), index }); +fn bigIntConstAssumeCapacity( + self: *Builder, + ty: Type, + value: std.math.big.int.Const, +) if (build_options.have_llvm) Allocator.Error!Constant else Constant { + const type_item = self.type_items.items[@intFromEnum(ty)]; + assert(type_item.tag == .integer); + const bits = type_item.data; + + const ExpectedContents = extern struct { + limbs: [64 / @sizeOf(std.math.big.Limb)]std.math.big.Limb, + llvm_limbs: if (build_options.have_llvm) [64 / @sizeOf(u64)]u64 else void, + }; + var stack align(@alignOf(ExpectedContents)) = + std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); + const allocator = stack.get(); + + var limbs: []std.math.big.Limb = &.{}; + defer allocator.free(limbs); + const canonical_value = if (value.fitsInTwosComp(.signed, bits)) value else canon: { + assert(value.fitsInTwosComp(.unsigned, bits)); + limbs = try allocator.alloc(std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(bits)); + var temp_value = std.math.big.int.Mutable.init(limbs, 0); + temp_value.truncate(value, .signed, bits); + break :canon temp_value.toConst(); + }; + assert(canonical_value.fitsInTwosComp(.signed, bits)); + + const ExtraPtr = *align(@alignOf(std.math.big.Limb)) Constant.Integer; + const Key = struct { tag: Constant.Tag, type: Type, limbs: []const std.math.big.Limb }; + const tag: Constant.Tag = switch (canonical_value.positive) { + true => .integer_positive, + false => .integer_negative, + }; + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: Key) u32 { + var hasher = std.hash.Wyhash.init(std.hash.uint32(@intFromEnum(key.tag))); + hasher.update(std.mem.asBytes(&key.type)); + hasher.update(std.mem.sliceAsBytes(key.limbs)); + return @truncate(hasher.final()); } - switch (item.tag) { - .function => {}, - .vararg_function => { - if (params.len > 0) try writer.writeAll(", "); - try writer.writeAll("..."); - }, - else => unreachable, + pub fn eql(ctx: @This(), lhs: Key, _: void, rhs_index: usize) bool { + if (lhs.tag != ctx.builder.constant_items.items(.tag)[rhs_index]) return false; + const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index]; + const rhs_extra: ExtraPtr = @ptrCast( + ctx.builder.constant_limbs.items[rhs_data..][0..Constant.Integer.limbs], + ); + const rhs_limbs = ctx.builder.constant_limbs + .items[rhs_data + Constant.Integer.limbs ..][0..rhs_extra.limbs_len]; + return lhs.type == rhs_extra.type and std.mem.eql(std.math.big.Limb, lhs.limbs, rhs_limbs); } - try writer.print(") {}{}", .{ - global.value.unnamed_addr, - global.value.alignment, - }); - if (function.body) |_| try writer.print( - \\{{ - \\ ret {%} - \\}} - \\ - , .{ - extra.data.ret.fmt(self), + }; + + const data = Key{ .tag = tag, .type = ty, .limbs = canonical_value.limbs }; + const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.constant_items.appendAssumeCapacity(.{ + .tag = tag, + .data = @intCast(self.constant_limbs.items.len), }); - try writer.writeByte('\n'); + const extra: ExtraPtr = @ptrCast( + self.constant_limbs.addManyAsArrayAssumeCapacity(Constant.Integer.limbs), + ); + extra.* = .{ .type = ty, .limbs_len = @intCast(canonical_value.limbs.len) }; + self.constant_limbs.appendSliceAssumeCapacity(canonical_value.limbs); + if (self.useLibLlvm()) { + const llvm_type = ty.toLlvm(self); + if (canonical_value.to(c_longlong)) |small| { + self.llvm_constants.appendAssumeCapacity(llvm_type.constInt(@bitCast(small), .True)); + } else |_| if (canonical_value.to(c_ulonglong)) |small| { + self.llvm_constants.appendAssumeCapacity(llvm_type.constInt(small, .False)); + } else |_| { + const llvm_limbs = try allocator.alloc(u64, std.math.divCeil( + usize, + canonical_value.bitCountTwosComp(), + @bitSizeOf(u64), + ) catch unreachable); + defer allocator.free(llvm_limbs); + var limb_index: usize = 0; + var borrow: std.math.big.Limb = 0; + for (llvm_limbs) |*result_limb| { + var llvm_limb: u64 = 0; + inline for (0..Constant.Integer.limbs) |shift| { + const limb = if (limb_index < canonical_value.limbs.len) + canonical_value.limbs[limb_index] + else + 0; + limb_index += 1; + llvm_limb |= @as(u64, limb) << shift * @bitSizeOf(std.math.big.Limb); + } + if (!canonical_value.positive) { + const overflow = @subWithOverflow(borrow, llvm_limb); + llvm_limb = overflow[0]; + borrow -%= overflow[1]; + assert(borrow == 0 or borrow == std.math.maxInt(u64)); + } + result_limb.* = llvm_limb; + } + self.llvm_constants.appendAssumeCapacity( + llvm_type.constIntOfArbitraryPrecision(@intCast(llvm_limbs.len), llvm_limbs.ptr), + ); + } + } } + return @enumFromInt(gop.index); } inline fn useLibLlvm(self: *const Builder) bool { diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index 5d04ec930b..e5fa8ba265 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -280,6 +280,9 @@ pub const Value = opaque { pub const attachMetaData = ZigLLVMAttachMetaData; extern fn ZigLLVMAttachMetaData(GlobalVar: *Value, DIG: *DIGlobalVariableExpression) void; + + pub const dump = LLVMDumpValue; + extern fn LLVMDumpValue(Val: *Value) void; }; pub const Type = opaque { @@ -353,6 +356,9 @@ pub const Type = opaque { ConstantIndices: [*]const *Value, NumIndices: c_uint, ) *Value; + + pub const dump = LLVMDumpType; + extern fn LLVMDumpType(Ty: *Type) void; }; pub const Module = opaque { -- cgit v1.2.3 From ff8a49448c70ffe73826c7987522ed63fddd654f Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 10 Jul 2023 10:52:17 -0400 Subject: llvm: finish converting `lowerValue` --- src/Module.zig | 12 +- src/Sema.zig | 8 +- src/codegen/llvm.zig | 1517 ++++++++++++++++-------------- src/codegen/llvm/Builder.zig | 2041 +++++++++++++++++++++++++++++++++++++---- src/codegen/llvm/bindings.zig | 75 +- 5 files changed, 2751 insertions(+), 902 deletions(-) (limited to 'src') diff --git a/src/Module.zig b/src/Module.zig index ea444d3cc4..41236880c5 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -835,10 +835,6 @@ pub const Decl = struct { assert(decl.has_tv); return @as(u32, @intCast(decl.alignment.toByteUnitsOptional() orelse decl.ty.abiAlignment(mod))); } - - pub fn intern(decl: *Decl, mod: *Module) Allocator.Error!void { - decl.val = (try decl.val.intern(decl.ty, mod)).toValue(); - } }; /// This state is attached to every Decl when Module emit_h is non-null. @@ -4204,7 +4200,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { try wip_captures.finalize(); for (comptime_mutable_decls.items) |decl_index| { const decl = mod.declPtr(decl_index); - try decl.intern(mod); + _ = try decl.internValue(mod); } new_decl.analysis = .complete; } else |err| switch (err) { @@ -4315,7 +4311,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { try wip_captures.finalize(); for (comptime_mutable_decls.items) |ct_decl_index| { const ct_decl = mod.declPtr(ct_decl_index); - try ct_decl.intern(mod); + _ = try ct_decl.internValue(mod); } const align_src: LazySrcLoc = .{ .node_offset_var_decl_align = 0 }; const section_src: LazySrcLoc = .{ .node_offset_var_decl_section = 0 }; @@ -5362,7 +5358,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato try wip_captures.finalize(); for (comptime_mutable_decls.items) |ct_decl_index| { const ct_decl = mod.declPtr(ct_decl_index); - try ct_decl.intern(mod); + _ = try ct_decl.internValue(mod); } // Copy the block into place and mark that as the main block. @@ -6369,7 +6365,7 @@ pub fn markDeclAlive(mod: *Module, decl: *Decl) Allocator.Error!void { if (decl.alive) return; decl.alive = true; - try decl.intern(mod); + _ = try decl.internValue(mod); // This is the first time we are marking this Decl alive. We must // therefore recurse into its value and mark any Decl it references diff --git a/src/Sema.zig b/src/Sema.zig index 3f8b936e0b..08d5f02a17 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -3899,7 +3899,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com try mod.declareDeclDependency(sema.owner_decl_index, decl_index); const decl = mod.declPtr(decl_index); - if (iac.is_const) try decl.intern(mod); + if (iac.is_const) _ = try decl.internValue(mod); const final_elem_ty = decl.ty; const final_ptr_ty = try mod.ptrType(.{ .child = final_elem_ty.toIntern(), @@ -33577,7 +33577,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi try wip_captures.finalize(); for (comptime_mutable_decls.items) |ct_decl_index| { const ct_decl = mod.declPtr(ct_decl_index); - try ct_decl.intern(mod); + _ = try ct_decl.internValue(mod); } } else { if (fields_bit_sum > std.math.maxInt(u16)) { @@ -34645,7 +34645,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void try wip_captures.finalize(); for (comptime_mutable_decls.items) |ct_decl_index| { const ct_decl = mod.declPtr(ct_decl_index); - try ct_decl.intern(mod); + _ = try ct_decl.internValue(mod); } struct_obj.have_field_inits = true; @@ -34744,7 +34744,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { try wip_captures.finalize(); for (comptime_mutable_decls.items) |ct_decl_index| { const ct_decl = mod.declPtr(ct_decl_index); - try ct_decl.intern(mod); + _ = try ct_decl.internValue(mod); } try union_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index b4bde977dd..0ac30e00e7 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -579,7 +579,7 @@ pub const Object = struct { /// The LLVM global table which holds the names corresponding to Zig errors. /// Note that the values are not added until flushModule, when all errors in /// the compilation are known. - error_name_table: ?*llvm.Value, + error_name_table: Builder.Variable.Index, /// This map is usually very close to empty. It tracks only the cases when a /// second extern Decl could not be emitted with the correct name due to a /// name collision. @@ -763,7 +763,7 @@ pub const Object = struct { .named_enum_map = .{}, .type_map = .{}, .di_type_map = .{}, - .error_name_table = null, + .error_name_table = .none, .extern_collisions = .{}, .null_opt_addr = null, }; @@ -803,51 +803,85 @@ pub const Object = struct { return slice.ptr; } - fn genErrorNameTable(o: *Object) !void { + fn genErrorNameTable(o: *Object) Allocator.Error!void { // If o.error_name_table is null, there was no instruction that actually referenced the error table. - const error_name_table_ptr_global = o.error_name_table orelse return; + const error_name_table_ptr_global = o.error_name_table; + if (error_name_table_ptr_global == .none) return; const mod = o.module; + const error_name_list = mod.global_error_set.keys(); + const llvm_errors = try mod.gpa.alloc(Builder.Constant, error_name_list.len); + defer mod.gpa.free(llvm_errors); + // TODO: Address space - const llvm_usize_ty = try o.lowerType(Type.usize); - const llvm_slice_ty = (try o.builder.structType(.normal, &.{ .ptr, llvm_usize_ty })).toLlvm(&o.builder); const slice_ty = Type.slice_const_u8_sentinel_0; const slice_alignment = slice_ty.abiAlignment(mod); + const llvm_usize_ty = try o.lowerType(Type.usize); + const llvm_slice_ty = try o.lowerType(slice_ty); + const llvm_table_ty = try o.builder.arrayType(error_name_list.len, llvm_slice_ty); - const error_name_list = mod.global_error_set.keys(); - const llvm_errors = try mod.gpa.alloc(*llvm.Value, error_name_list.len); - defer mod.gpa.free(llvm_errors); - - llvm_errors[0] = llvm_slice_ty.getUndef(); + llvm_errors[0] = try o.builder.undefConst(llvm_slice_ty); for (llvm_errors[1..], error_name_list[1..]) |*llvm_error, name_nts| { - const name = mod.intern_pool.stringToSlice(name_nts); - const str_init = o.context.constString(name.ptr, @as(c_uint, @intCast(name.len)), .False); - const str_global = o.llvm_module.addGlobal(str_init.typeOf(), ""); - str_global.setInitializer(str_init); + const name = try o.builder.string(mod.intern_pool.stringToSlice(name_nts)); + const str_init = try o.builder.stringNullConst(name); + const str_ty = str_init.typeOf(&o.builder); + const str_global = o.llvm_module.addGlobal(str_ty.toLlvm(&o.builder), ""); + str_global.setInitializer(str_init.toLlvm(&o.builder)); str_global.setLinkage(.Private); str_global.setGlobalConstant(.True); str_global.setUnnamedAddr(.True); str_global.setAlignment(1); - const slice_fields = [_]*llvm.Value{ - str_global, - (try o.builder.intConst(llvm_usize_ty, name.len)).toLlvm(&o.builder), + var global = Builder.Global{ + .linkage = .private, + .unnamed_addr = .unnamed_addr, + .type = str_ty, + .alignment = comptime Builder.Alignment.fromByteUnits(1), + .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, }; - llvm_error.* = llvm_slice_ty.constNamedStruct(&slice_fields, slice_fields.len); - } + var variable = Builder.Variable{ + .global = @enumFromInt(o.builder.globals.count()), + .mutability = .constant, + .init = str_init, + }; + try o.builder.llvm_globals.append(o.gpa, str_global); + const str_global_index = try o.builder.addGlobal(.none, global); + try o.builder.variables.append(o.gpa, variable); - const error_name_table_init = llvm_slice_ty.constArray(llvm_errors.ptr, @as(c_uint, @intCast(error_name_list.len))); + llvm_error.* = try o.builder.structConst(llvm_slice_ty, &.{ + str_global_index.toConst(), + try o.builder.intConst(llvm_usize_ty, name.toSlice(&o.builder).?.len), + }); + } - const error_name_table_global = o.llvm_module.addGlobal(error_name_table_init.typeOf(), ""); - error_name_table_global.setInitializer(error_name_table_init); + const error_name_table_init = try o.builder.arrayConst(llvm_table_ty, llvm_errors); + const error_name_table_global = o.llvm_module.addGlobal(llvm_table_ty.toLlvm(&o.builder), ""); + error_name_table_global.setInitializer(error_name_table_init.toLlvm(&o.builder)); error_name_table_global.setLinkage(.Private); error_name_table_global.setGlobalConstant(.True); error_name_table_global.setUnnamedAddr(.True); error_name_table_global.setAlignment(slice_alignment); // TODO: Dont hardcode + var global = Builder.Global{ + .linkage = .private, + .unnamed_addr = .unnamed_addr, + .type = llvm_table_ty, + .alignment = Builder.Alignment.fromByteUnits(slice_alignment), + .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, + }; + var variable = Builder.Variable{ + .global = @enumFromInt(o.builder.globals.count()), + .mutability = .constant, + .init = error_name_table_init, + }; + try o.builder.llvm_globals.append(o.gpa, error_name_table_global); + _ = try o.builder.addGlobal(.none, global); + try o.builder.variables.append(o.gpa, variable); + const error_name_table_ptr = error_name_table_global; - error_name_table_ptr_global.setInitializer(error_name_table_ptr); + error_name_table_ptr_global.ptr(&o.builder).init = variable.global.toConst(); + error_name_table_ptr_global.toLlvm(&o.builder).setInitializer(error_name_table_ptr); } fn genCmpLtErrorsLenFunction(object: *Object) !void { @@ -1116,9 +1150,9 @@ pub const Object = struct { .err_msg = null, }; - const function_index = try o.resolveLlvmFunction(decl_index); - const function = function_index.ptr(&o.builder); - const llvm_func = function.global.toLlvm(&o.builder); + const function = try o.resolveLlvmFunction(decl_index); + const global = function.ptrConst(&o.builder).global; + const llvm_func = global.toLlvm(&o.builder); if (func.analysis(ip).is_noinline) { o.addFnAttr(llvm_func, "noinline"); @@ -1155,8 +1189,10 @@ pub const Object = struct { o.addFnAttrString(llvm_func, "no-stack-arg-probe", ""); } - if (ip.stringToSliceUnwrap(decl.@"linksection")) |section| + if (ip.stringToSliceUnwrap(decl.@"linksection")) |section| { + global.ptr(&o.builder).section = try o.builder.string(section); llvm_func.setSection(section); + } // Remove all the basic blocks of a function in order to start over, generating // LLVM IR from an empty function body. @@ -1166,7 +1202,7 @@ pub const Object = struct { const builder = o.context.createBuilder(); - function.body = {}; + function.ptr(&o.builder).body = {}; const entry_block = o.context.appendBasicBlock(llvm_func, "Entry"); builder.positionBuilderAtEnd(entry_block); @@ -1487,8 +1523,8 @@ pub const Object = struct { const gpa = mod.gpa; // If the module does not already have the function, we ignore this function call // because we call `updateDeclExports` at the end of `updateFunc` and `updateDecl`. - const global_index = self.decl_map.get(decl_index) orelse return; - const llvm_global = global_index.toLlvm(&self.builder); + const global = self.decl_map.get(decl_index) orelse return; + const llvm_global = global.toLlvm(&self.builder); const decl = mod.declPtr(decl_index); if (decl.isExtern(mod)) { const decl_name = decl_name: { @@ -1511,18 +1547,17 @@ pub const Object = struct { } } - try global_index.rename(&self.builder, decl_name); - const decl_name_slice = decl_name.toSlice(&self.builder).?; - const global = global_index.ptr(&self.builder); - global.unnamed_addr = .default; + try global.rename(&self.builder, decl_name); + global.ptr(&self.builder).unnamed_addr = .default; llvm_global.setUnnamedAddr(.False); - global.linkage = .external; + global.ptr(&self.builder).linkage = .external; llvm_global.setLinkage(.External); if (mod.wantDllExports()) { - global.dll_storage_class = .default; + global.ptr(&self.builder).dll_storage_class = .default; llvm_global.setDLLStorageClass(.Default); } if (self.di_map.get(decl)) |di_node| { + const decl_name_slice = decl_name.toSlice(&self.builder).?; if (try decl.isFunction(mod)) { const di_func = @as(*llvm.DISubprogram, @ptrCast(di_node)); const linkage_name = llvm.MDString.get(self.context, decl_name_slice.ptr, decl_name_slice.len); @@ -1533,21 +1568,31 @@ pub const Object = struct { di_global.replaceLinkageName(linkage_name); } } - if (decl.val.getVariable(mod)) |variable| { - if (variable.is_threadlocal) { + if (decl.val.getVariable(mod)) |decl_var| { + if (decl_var.is_threadlocal) { + global.ptrConst(&self.builder).kind.variable.ptr(&self.builder).thread_local = + .generaldynamic; llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { + global.ptrConst(&self.builder).kind.variable.ptr(&self.builder).thread_local = + .default; llvm_global.setThreadLocalMode(.NotThreadLocal); } - if (variable.is_weak_linkage) { + if (decl_var.is_weak_linkage) { + global.ptr(&self.builder).linkage = .extern_weak; llvm_global.setLinkage(.ExternalWeak); } } + global.ptr(&self.builder).updateAttributes(); } else if (exports.len != 0) { const exp_name = try self.builder.string(mod.intern_pool.stringToSlice(exports[0].opts.name)); - try global_index.rename(&self.builder, exp_name); + try global.rename(&self.builder, exp_name); + global.ptr(&self.builder).unnamed_addr = .default; llvm_global.setUnnamedAddr(.False); - if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport); + if (mod.wantDllExports()) { + global.ptr(&self.builder).dll_storage_class = .dllexport; + llvm_global.setDLLStorageClass(.DLLExport); + } if (self.di_map.get(decl)) |di_node| { const exp_name_slice = exp_name.toSlice(&self.builder).?; if (try decl.isFunction(mod)) { @@ -1562,23 +1607,45 @@ pub const Object = struct { } switch (exports[0].opts.linkage) { .Internal => unreachable, - .Strong => llvm_global.setLinkage(.External), - .Weak => llvm_global.setLinkage(.WeakODR), - .LinkOnce => llvm_global.setLinkage(.LinkOnceODR), + .Strong => { + global.ptr(&self.builder).linkage = .external; + llvm_global.setLinkage(.External); + }, + .Weak => { + global.ptr(&self.builder).linkage = .weak_odr; + llvm_global.setLinkage(.WeakODR); + }, + .LinkOnce => { + global.ptr(&self.builder).linkage = .linkonce_odr; + llvm_global.setLinkage(.LinkOnceODR); + }, } switch (exports[0].opts.visibility) { - .default => llvm_global.setVisibility(.Default), - .hidden => llvm_global.setVisibility(.Hidden), - .protected => llvm_global.setVisibility(.Protected), + .default => { + global.ptr(&self.builder).visibility = .default; + llvm_global.setVisibility(.Default); + }, + .hidden => { + global.ptr(&self.builder).visibility = .hidden; + llvm_global.setVisibility(.Hidden); + }, + .protected => { + global.ptr(&self.builder).visibility = .protected; + llvm_global.setVisibility(.Protected); + }, } if (mod.intern_pool.stringToSliceUnwrap(exports[0].opts.section)) |section| { + global.ptr(&self.builder).section = try self.builder.string(section); llvm_global.setSection(section); } - if (decl.val.getVariable(mod)) |variable| { - if (variable.is_threadlocal) { + if (decl.val.getVariable(mod)) |decl_var| { + if (decl_var.is_threadlocal) { + global.ptrConst(&self.builder).kind.variable.ptr(&self.builder).thread_local = + .generaldynamic; llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } } + global.ptr(&self.builder).updateAttributes(); // If a Decl is exported more than one time (which is rare), // we add aliases for all but the first export. @@ -1602,18 +1669,28 @@ pub const Object = struct { } } else { const fqn = try self.builder.string(mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod))); - try global_index.rename(&self.builder, fqn); + try global.rename(&self.builder, fqn); + global.ptr(&self.builder).linkage = .internal; llvm_global.setLinkage(.Internal); - if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default); + if (mod.wantDllExports()) { + global.ptr(&self.builder).dll_storage_class = .default; + llvm_global.setDLLStorageClass(.Default); + } + global.ptr(&self.builder).unnamed_addr = .unnamed_addr; llvm_global.setUnnamedAddr(.True); - if (decl.val.getVariable(mod)) |variable| { + if (decl.val.getVariable(mod)) |decl_var| { const single_threaded = mod.comp.bin_file.options.single_threaded; - if (variable.is_threadlocal and !single_threaded) { + if (decl_var.is_threadlocal and !single_threaded) { + global.ptrConst(&self.builder).kind.variable.ptr(&self.builder).thread_local = + .generaldynamic; llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { + global.ptrConst(&self.builder).kind.variable.ptr(&self.builder).thread_local = + .default; llvm_global.setThreadLocalMode(.NotThreadLocal); } } + global.ptr(&self.builder).updateAttributes(); } } @@ -2658,31 +2735,44 @@ pub const Object = struct { const mod = o.module; const target = mod.getTarget(); const ty = try mod.intern(.{ .opt_type = .usize_type }); - const null_opt_usize = try mod.intern(.{ .opt = .{ + + const llvm_init = try o.lowerValue(try mod.intern(.{ .opt = .{ .ty = ty, .val = .none, - } }); - - const llvm_init = try o.lowerValue(.{ - .ty = ty.toType(), - .val = null_opt_usize.toValue(), - }); + } })); + const llvm_ty = llvm_init.typeOf(&o.builder); const llvm_wanted_addrspace = toLlvmAddressSpace(.generic, target); const llvm_actual_addrspace = toLlvmGlobalAddressSpace(.generic, target); - const global = o.llvm_module.addGlobalInAddressSpace( - llvm_init.typeOf(), + const llvm_alignment = ty.toType().abiAlignment(mod); + const llvm_global = o.llvm_module.addGlobalInAddressSpace( + llvm_ty.toLlvm(&o.builder), "", @intFromEnum(llvm_actual_addrspace), ); - global.setLinkage(.Internal); - global.setUnnamedAddr(.True); - global.setAlignment(ty.toType().abiAlignment(mod)); - global.setInitializer(llvm_init); + llvm_global.setLinkage(.Internal); + llvm_global.setUnnamedAddr(.True); + llvm_global.setAlignment(llvm_alignment); + llvm_global.setInitializer(llvm_init.toLlvm(&o.builder)); + + var global = Builder.Global{ + .linkage = .internal, + .unnamed_addr = .unnamed_addr, + .type = llvm_ty, + .alignment = Builder.Alignment.fromByteUnits(llvm_alignment), + .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, + }; + var variable = Builder.Variable{ + .global = @enumFromInt(o.builder.globals.count()), + .init = llvm_init, + }; + try o.builder.llvm_globals.append(o.gpa, llvm_global); + _ = try o.builder.addGlobal(.none, global); + try o.builder.variables.append(o.gpa, variable); const addrspace_casted_global = if (llvm_wanted_addrspace != llvm_actual_addrspace) - global.constAddrSpaceCast(o.context.pointerType(@intFromEnum(llvm_wanted_addrspace))) + llvm_global.constAddrSpaceCast((try o.builder.ptrType(llvm_wanted_addrspace)).toLlvm(&o.builder)) else - global; + llvm_global; o.null_opt_addr = addrspace_casted_global; return addrspace_casted_global; @@ -2691,7 +2781,7 @@ pub const Object = struct { /// If the llvm function does not exist, create it. /// Note that this can be called before the function's semantic analysis has /// completed, so if any attributes rely on that, they must be done in updateFunc, not here. - fn resolveLlvmFunction(o: *Object, decl_index: Module.Decl.Index) !Builder.Function.Index { + fn resolveLlvmFunction(o: *Object, decl_index: Module.Decl.Index) Allocator.Error!Builder.Function.Index { const mod = o.module; const gpa = o.gpa; const decl = mod.declPtr(decl_index); @@ -2722,7 +2812,9 @@ pub const Object = struct { const is_extern = decl.isExtern(mod); if (!is_extern) { + global.linkage = .internal; llvm_fn.setLinkage(.Internal); + global.unnamed_addr = .unnamed_addr; llvm_fn.setUnnamedAddr(.True); } else { if (target.isWasm()) { @@ -2767,7 +2859,8 @@ pub const Object = struct { } if (fn_info.alignment.toByteUnitsOptional()) |a| { - llvm_fn.setAlignment(@as(c_uint, @intCast(a))); + global.alignment = Builder.Alignment.fromByteUnits(a); + llvm_fn.setAlignment(@intCast(a)); } // Function attributes that are independent of analysis results of the function body. @@ -2864,9 +2957,9 @@ pub const Object = struct { } } - fn resolveGlobalDecl(o: *Object, decl_index: Module.Decl.Index) Error!Builder.Object.Index { + fn resolveGlobalDecl(o: *Object, decl_index: Module.Decl.Index) Allocator.Error!Builder.Variable.Index { const gop = try o.decl_map.getOrPut(o.gpa, decl_index); - if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.object; + if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.variable; errdefer assert(o.decl_map.remove(decl_index)); const mod = o.module; @@ -2880,9 +2973,9 @@ pub const Object = struct { var global = Builder.Global{ .addr_space = toLlvmGlobalAddressSpace(decl.@"addrspace", target), .type = try o.lowerType(decl.ty), - .kind = .{ .object = @enumFromInt(o.builder.objects.items.len) }, + .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, }; - var object = Builder.Object{ + var variable = Builder.Variable{ .global = @enumFromInt(o.builder.globals.count()), }; @@ -2903,16 +2996,16 @@ pub const Object = struct { llvm_global.setUnnamedAddr(.False); global.linkage = .external; llvm_global.setLinkage(.External); - if (decl.val.getVariable(mod)) |variable| { + if (decl.val.getVariable(mod)) |decl_var| { const single_threaded = mod.comp.bin_file.options.single_threaded; - if (variable.is_threadlocal and !single_threaded) { - object.thread_local = .generaldynamic; + if (decl_var.is_threadlocal and !single_threaded) { + variable.thread_local = .generaldynamic; llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { - object.thread_local = .default; + variable.thread_local = .default; llvm_global.setThreadLocalMode(.NotThreadLocal); } - if (variable.is_weak_linkage) { + if (decl_var.is_weak_linkage) { global.linkage = .extern_weak; llvm_global.setLinkage(.ExternalWeak); } @@ -2926,17 +3019,8 @@ pub const Object = struct { try o.builder.llvm_globals.append(o.gpa, llvm_global); gop.value_ptr.* = try o.builder.addGlobal(name, global); - try o.builder.objects.append(o.gpa, object); - return global.kind.object; - } - - fn isUnnamedType(o: *Object, ty: Type, val: *llvm.Value) bool { - // Once `lowerType` succeeds, successive calls to it with the same Zig type - // are guaranteed to succeed. So if a call to `lowerType` fails here it means - // it is the first time lowering the type, which means the value can't possible - // have that type. - const llvm_ty = (o.lowerType(ty) catch return true).toLlvm(&o.builder); - return val.typeOf() != llvm_ty; + try o.builder.variables.append(o.gpa, variable); + return global.kind.variable; } fn lowerType(o: *Object, t: Type) Allocator.Error!Builder.Type { @@ -3069,14 +3153,17 @@ pub const Object = struct { => unreachable, else => switch (mod.intern_pool.indexToKey(t.toIntern())) { .int_type => |int_type| try o.builder.intType(int_type.bits), - .ptr_type => |ptr_type| switch (ptr_type.flags.size) { - .One, .Many, .C => try o.builder.ptrType( + .ptr_type => |ptr_type| type: { + const ptr_ty = try o.builder.ptrType( toLlvmAddressSpace(ptr_type.flags.address_space, target), - ), - .Slice => try o.builder.structType(.normal, &.{ - .ptr, - try o.lowerType(Type.usize), - }), + ); + break :type switch (ptr_type.flags.size) { + .One, .Many, .C => ptr_ty, + .Slice => try o.builder.structType(.normal, &.{ + ptr_ty, + try o.lowerType(Type.usize), + }), + }; }, .array_type => |array_type| o.builder.arrayType( array_type.len + @intFromBool(array_type.sentinel != .none), @@ -3094,13 +3181,16 @@ pub const Object = struct { if (t.optionalReprIsPayload(mod)) return payload_ty; comptime assert(optional_layout_version == 3); - var fields_buf: [3]Builder.Type = .{ payload_ty, .i8, .none }; + var fields: [3]Builder.Type = .{ payload_ty, .i8, undefined }; + var fields_len: usize = 2; const offset = child_ty.toType().abiSize(mod) + 1; const abi_size = t.abiSize(mod); - const padding = abi_size - offset; - if (padding == 0) return o.builder.structType(.normal, fields_buf[0..2]); - fields_buf[2] = try o.builder.arrayType(padding, .i8); - return o.builder.structType(.normal, fields_buf[0..3]); + const padding_len = abi_size - offset; + if (padding_len > 0) { + fields[2] = try o.builder.arrayType(padding_len, .i8); + fields_len = 3; + } + return o.builder.structType(.normal, fields[0..fields_len]); }, .anyframe_type => @panic("TODO implement lowerType for AnyFrame types"), .error_union_type => |error_union_type| { @@ -3115,30 +3205,30 @@ pub const Object = struct { const payload_size = error_union_type.payload_type.toType().abiSize(mod); const error_size = Type.err_int.abiSize(mod); - var fields_buf: [3]Builder.Type = undefined; - if (error_align > payload_align) { - fields_buf[0] = error_type; - fields_buf[1] = payload_type; + var fields: [3]Builder.Type = undefined; + var fields_len: usize = 2; + const padding_len = if (error_align > payload_align) pad: { + fields[0] = error_type; + fields[1] = payload_type; const payload_end = std.mem.alignForward(u64, error_size, payload_align) + payload_size; const abi_size = std.mem.alignForward(u64, payload_end, error_align); - const padding = abi_size - payload_end; - if (padding == 0) return o.builder.structType(.normal, fields_buf[0..2]); - fields_buf[2] = try o.builder.arrayType(padding, .i8); - return o.builder.structType(.normal, fields_buf[0..3]); - } else { - fields_buf[0] = payload_type; - fields_buf[1] = error_type; + break :pad abi_size - payload_end; + } else pad: { + fields[0] = payload_type; + fields[1] = error_type; const error_end = std.mem.alignForward(u64, payload_size, error_align) + error_size; const abi_size = std.mem.alignForward(u64, error_end, payload_align); - const padding = abi_size - error_end; - if (padding == 0) return o.builder.structType(.normal, fields_buf[0..2]); - fields_buf[2] = try o.builder.arrayType(padding, .i8); - return o.builder.structType(.normal, fields_buf[0..3]); + break :pad abi_size - error_end; + }; + if (padding_len > 0) { + fields[2] = try o.builder.arrayType(padding_len, .i8); + fields_len = 3; } + return o.builder.structType(.normal, fields[0..fields_len]); }, .simple_type => unreachable, .struct_type => |struct_type| { @@ -3371,6 +3461,7 @@ pub const Object = struct { fn lowerTypeFn(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type { const mod = o.module; const ip = &mod.intern_pool; + const target = mod.getTarget(); const ret_ty = try lowerFnRetTy(o, fn_info); var llvm_params = std.ArrayListUnmanaged(Builder.Type){}; @@ -3404,7 +3495,11 @@ pub const Object = struct { )); }, .slice => { - try llvm_params.appendSlice(o.gpa, &.{ .ptr, try o.lowerType(Type.usize) }); + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); + try llvm_params.appendSlice(o.gpa, &.{ + try o.builder.ptrType(toLlvmAddressSpace(param_ty.ptrAddressSpace(mod), target)), + try o.lowerType(Type.usize), + }); }, .multiple_llvm_types => { try llvm_params.appendSlice(o.gpa, it.types_buffer[0..it.types_len]); @@ -3433,20 +3528,23 @@ pub const Object = struct { ); } - fn lowerValue(o: *Object, arg_tv: TypedValue) Error!*llvm.Value { + fn lowerValue(o: *Object, arg_val: InternPool.Index) Error!Builder.Constant { const mod = o.module; - const gpa = o.gpa; const target = mod.getTarget(); - var tv = arg_tv; - switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { - .runtime_value => |rt| tv.val = rt.val.toValue(), + + var val = arg_val.toValue(); + const arg_val_key = mod.intern_pool.indexToKey(arg_val); + switch (arg_val_key) { + .runtime_value => |rt| val = rt.val.toValue(), else => {}, } - if (tv.val.isUndefDeep(mod)) { - return (try o.lowerType(tv.ty)).toLlvm(&o.builder).getUndef(); + if (val.isUndefDeep(mod)) { + return o.builder.undefConst(try o.lowerType(arg_val_key.typeOf().toType())); } - switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { + const val_key = mod.intern_pool.indexToKey(val.toIntern()); + const ty = val_key.typeOf().toType(); + return switch (val_key) { .int_type, .ptr_type, .array_type, @@ -3474,8 +3572,8 @@ pub const Object = struct { .@"unreachable", .generic_poison, => unreachable, // non-runtime values - .false => return Builder.Constant.false.toLlvm(&o.builder), - .true => return Builder.Constant.true.toLlvm(&o.builder), + .false => .false, + .true => .true, }, .variable, .enum_literal, @@ -3486,259 +3584,266 @@ pub const Object = struct { const fn_decl = mod.declPtr(fn_decl_index); try mod.markDeclAlive(fn_decl); const function_index = try o.resolveLlvmFunction(fn_decl_index); - return function_index.toLlvm(&o.builder); + return function_index.ptrConst(&o.builder).global.toConst(); }, .func => |func| { const fn_decl_index = func.owner_decl; const fn_decl = mod.declPtr(fn_decl_index); try mod.markDeclAlive(fn_decl); const function_index = try o.resolveLlvmFunction(fn_decl_index); - return function_index.toLlvm(&o.builder); + return function_index.ptrConst(&o.builder).global.toConst(); }, .int => { var bigint_space: Value.BigIntSpace = undefined; - const bigint = tv.val.toBigInt(&bigint_space, mod); - return lowerBigInt(o, tv.ty, bigint); + const bigint = val.toBigInt(&bigint_space, mod); + return lowerBigInt(o, ty, bigint); }, .err => |err| { const int = try mod.getErrorValue(err.name); const llvm_int = try o.builder.intConst(Builder.Type.err_int, int); - return llvm_int.toLlvm(&o.builder); + return llvm_int; }, .error_union => |error_union| { - const err_tv: TypedValue = switch (error_union.val) { - .err_name => |err_name| .{ - .ty = tv.ty.errorUnionSet(mod), - .val = (try mod.intern(.{ .err = .{ - .ty = tv.ty.errorUnionSet(mod).toIntern(), - .name = err_name, - } })).toValue(), - }, - .payload => .{ - .ty = Type.err_int, - .val = try mod.intValue(Type.err_int, 0), - }, + const err_val = switch (error_union.val) { + .err_name => |err_name| try mod.intern(.{ .err = .{ + .ty = ty.errorUnionSet(mod).toIntern(), + .name = err_name, + } }), + .payload => (try mod.intValue(Type.err_int, 0)).toIntern(), }; - const payload_type = tv.ty.errorUnionPayload(mod); + const payload_type = ty.errorUnionPayload(mod); if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - return o.lowerValue(err_tv); + return o.lowerValue(err_val); } const payload_align = payload_type.abiAlignment(mod); - const error_align = err_tv.ty.abiAlignment(mod); - const llvm_error_value = try o.lowerValue(err_tv); - const llvm_payload_value = try o.lowerValue(.{ - .ty = payload_type, - .val = switch (error_union.val) { - .err_name => try mod.intern(.{ .undef = payload_type.toIntern() }), - .payload => |payload| payload, - }.toValue(), + const error_align = Type.err_int.abiAlignment(mod); + const llvm_error_value = try o.lowerValue(err_val); + const llvm_payload_value = try o.lowerValue(switch (error_union.val) { + .err_name => try mod.intern(.{ .undef = payload_type.toIntern() }), + .payload => |payload| payload, }); - var fields_buf: [3]*llvm.Value = undefined; - - const llvm_ty = (try o.lowerType(tv.ty)).toLlvm(&o.builder); - const llvm_field_count = llvm_ty.countStructElementTypes(); - if (llvm_field_count > 2) { - assert(llvm_field_count == 3); - fields_buf[2] = llvm_ty.structGetTypeAtIndex(2).getUndef(); - } + var fields: [3]Builder.Type = undefined; + var vals: [3]Builder.Constant = undefined; if (error_align > payload_align) { - fields_buf[0] = llvm_error_value; - fields_buf[1] = llvm_payload_value; - return o.context.constStruct(&fields_buf, llvm_field_count, .False); + vals[0] = llvm_error_value; + vals[1] = llvm_payload_value; } else { - fields_buf[0] = llvm_payload_value; - fields_buf[1] = llvm_error_value; - return o.context.constStruct(&fields_buf, llvm_field_count, .False); + vals[0] = llvm_payload_value; + vals[1] = llvm_error_value; + } + fields[0] = vals[0].typeOf(&o.builder); + fields[1] = vals[1].typeOf(&o.builder); + + const llvm_ty = try o.lowerType(ty); + const llvm_ty_fields = llvm_ty.structFields(&o.builder); + if (llvm_ty_fields.len > 2) { + assert(llvm_ty_fields.len == 3); + fields[2] = llvm_ty_fields[2]; + vals[2] = try o.builder.undefConst(fields[2]); } + return o.builder.structConst(try o.builder.structType( + llvm_ty.structKind(&o.builder), + fields[0..llvm_ty_fields.len], + ), vals[0..llvm_ty_fields.len]); }, - .enum_tag => |enum_tag| return o.lowerValue(.{ - .ty = mod.intern_pool.typeOf(enum_tag.int).toType(), - .val = enum_tag.int.toValue(), - }), - .float => return switch (tv.ty.floatBits(target)) { - 16 => int: { - const repr: i16 = @bitCast(tv.val.toFloat(f16, mod)); - break :int try o.builder.intConst(.i16, repr); - }, - 32 => int: { - const repr: i32 = @bitCast(tv.val.toFloat(f32, mod)); - break :int try o.builder.intConst(.i32, repr); - }, - 64 => int: { - const repr: i64 = @bitCast(tv.val.toFloat(f64, mod)); - break :int try o.builder.intConst(.i64, repr); - }, - 80 => int: { - const repr: i80 = @bitCast(tv.val.toFloat(f80, mod)); - break :int try o.builder.intConst(.i80, repr); - }, - 128 => int: { - const repr: i128 = @bitCast(tv.val.toFloat(f128, mod)); - break :int try o.builder.intConst(.i128, repr); - }, + .enum_tag => |enum_tag| o.lowerValue(enum_tag.int), + .float => switch (ty.floatBits(target)) { + 16 => if (backendSupportsF16(target)) + try o.builder.halfConst(val.toFloat(f16, mod)) + else + try o.builder.intConst(.i16, @as(i16, @bitCast(val.toFloat(f16, mod)))), + 32 => try o.builder.floatConst(val.toFloat(f32, mod)), + 64 => try o.builder.doubleConst(val.toFloat(f64, mod)), + 80 => if (backendSupportsF80(target)) + try o.builder.x86_fp80Const(val.toFloat(f80, mod)) + else + try o.builder.intConst(.i80, @as(i80, @bitCast(val.toFloat(f80, mod)))), + 128 => try o.builder.fp128Const(val.toFloat(f128, mod)), else => unreachable, - }.toLlvm(&o.builder).constBitCast((try o.lowerType(tv.ty)).toLlvm(&o.builder)), + }, .ptr => |ptr| { - const ptr_tv: TypedValue = switch (ptr.len) { - .none => tv, - else => .{ .ty = tv.ty.slicePtrFieldType(mod), .val = tv.val.slicePtr(mod) }, + const ptr_ty = switch (ptr.len) { + .none => ty, + else => ty.slicePtrFieldType(mod), }; - const llvm_ptr_val = switch (ptr.addr) { - .decl => |decl| try o.lowerDeclRefValue(ptr_tv, decl), - .mut_decl => |mut_decl| try o.lowerDeclRefValue(ptr_tv, mut_decl.decl), - .int => |int| try o.lowerIntAsPtr(int.toValue()), + const ptr_val = switch (ptr.addr) { + .decl => |decl| try o.lowerDeclRefValue(ptr_ty, decl), + .mut_decl => |mut_decl| try o.lowerDeclRefValue(ptr_ty, mut_decl.decl), + .int => |int| try o.lowerIntAsPtr(int), .eu_payload, .opt_payload, .elem, .field, - => try o.lowerParentPtr(ptr_tv.val, ptr_tv.ty.ptrInfo(mod).packed_offset.bit_offset % 8 == 0), + => try o.lowerParentPtr(val, ty.ptrInfo(mod).packed_offset.bit_offset % 8 == 0), .comptime_field => unreachable, }; switch (ptr.len) { - .none => return llvm_ptr_val, - else => { - const fields: [2]*llvm.Value = .{ - llvm_ptr_val, - try o.lowerValue(.{ .ty = Type.usize, .val = ptr.len.toValue() }), - }; - return o.context.constStruct(&fields, fields.len, .False); - }, + .none => return ptr_val, + else => return o.builder.structConst(try o.lowerType(ty), &.{ + ptr_val, try o.lowerValue(ptr.len), + }), } }, .opt => |opt| { comptime assert(optional_layout_version == 3); - const payload_ty = tv.ty.optionalChild(mod); + const payload_ty = ty.optionalChild(mod); - const non_null_bit = (try o.builder.intConst(.i8, @intFromBool(opt.val != .none))).toLlvm(&o.builder); + const non_null_bit = try o.builder.intConst(.i8, @intFromBool(opt.val != .none)); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return non_null_bit; } - const llvm_ty = (try o.lowerType(tv.ty)).toLlvm(&o.builder); - if (tv.ty.optionalReprIsPayload(mod)) return switch (opt.val) { - .none => llvm_ty.constNull(), - else => |payload| o.lowerValue(.{ .ty = payload_ty, .val = payload.toValue() }), + const llvm_ty = try o.lowerType(ty); + if (ty.optionalReprIsPayload(mod)) return switch (opt.val) { + .none => switch (llvm_ty.tag(&o.builder)) { + .integer => try o.builder.intConst(llvm_ty, 0), + .pointer => try o.builder.nullConst(llvm_ty), + .structure => try o.builder.zeroInitConst(llvm_ty), + else => unreachable, + }, + else => |payload| try o.lowerValue(payload), }; assert(payload_ty.zigTypeTag(mod) != .Fn); - const llvm_field_count = llvm_ty.countStructElementTypes(); - var fields_buf: [3]*llvm.Value = undefined; - fields_buf[0] = try o.lowerValue(.{ - .ty = payload_ty, - .val = switch (opt.val) { - .none => try mod.intern(.{ .undef = payload_ty.toIntern() }), - else => |payload| payload, - }.toValue(), + var fields: [3]Builder.Type = undefined; + var vals: [3]Builder.Constant = undefined; + vals[0] = try o.lowerValue(switch (opt.val) { + .none => try mod.intern(.{ .undef = payload_ty.toIntern() }), + else => |payload| payload, }); - fields_buf[1] = non_null_bit; - if (llvm_field_count > 2) { - assert(llvm_field_count == 3); - fields_buf[2] = llvm_ty.structGetTypeAtIndex(2).getUndef(); + vals[1] = non_null_bit; + fields[0] = vals[0].typeOf(&o.builder); + fields[1] = vals[1].typeOf(&o.builder); + + const llvm_ty_fields = llvm_ty.structFields(&o.builder); + if (llvm_ty_fields.len > 2) { + assert(llvm_ty_fields.len == 3); + fields[2] = llvm_ty_fields[2]; + vals[2] = try o.builder.undefConst(fields[2]); } - return o.context.constStruct(&fields_buf, llvm_field_count, .False); + return o.builder.structConst(try o.builder.structType( + llvm_ty.structKind(&o.builder), + fields[0..llvm_ty_fields.len], + ), vals[0..llvm_ty_fields.len]); }, - .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(tv.ty.toIntern())) { - .array_type => switch (aggregate.storage) { - .bytes => |bytes| return o.context.constString( - bytes.ptr, - @as(c_uint, @intCast(tv.ty.arrayLenIncludingSentinel(mod))), - .True, // Don't null terminate. Bytes has the sentinel, if any. - ), - .elems => |elem_vals| { - const elem_ty = tv.ty.childType(mod); - const llvm_elems = try gpa.alloc(*llvm.Value, elem_vals.len); - defer gpa.free(llvm_elems); + .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .array_type => |array_type| switch (aggregate.storage) { + .bytes => |bytes| try o.builder.stringConst(try o.builder.string(bytes)), + .elems => |elems| { + const array_ty = try o.lowerType(ty); + const elem_ty = array_ty.childType(&o.builder); + assert(elems.len == array_ty.aggregateLen(&o.builder)); + + const ExpectedContents = extern struct { + vals: [Builder.expected_fields_len]Builder.Constant, + fields: [Builder.expected_fields_len]Builder.Type, + }; + var stack align(@max( + @alignOf(std.heap.StackFallbackAllocator(0)), + @alignOf(ExpectedContents), + )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa); + const allocator = stack.get(); + const vals = try allocator.alloc(Builder.Constant, elems.len); + defer allocator.free(vals); + const fields = try allocator.alloc(Builder.Type, elems.len); + defer allocator.free(fields); + var need_unnamed = false; - for (elem_vals, 0..) |elem_val, i| { - llvm_elems[i] = try o.lowerValue(.{ .ty = elem_ty, .val = elem_val.toValue() }); - need_unnamed = need_unnamed or o.isUnnamedType(elem_ty, llvm_elems[i]); - } - if (need_unnamed) { - return o.context.constStruct( - llvm_elems.ptr, - @as(c_uint, @intCast(llvm_elems.len)), - .True, - ); - } else { - const llvm_elem_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); - return llvm_elem_ty.constArray( - llvm_elems.ptr, - @as(c_uint, @intCast(llvm_elems.len)), - ); + for (vals, fields, elems) |*result_val, *result_field, elem| { + result_val.* = try o.lowerValue(elem); + result_field.* = result_val.typeOf(&o.builder); + if (result_field.* != elem_ty) need_unnamed = true; } + return if (need_unnamed) try o.builder.structConst( + try o.builder.structType(.normal, fields), + vals, + ) else try o.builder.arrayConst(array_ty, vals); }, - .repeated_elem => |val| { - const elem_ty = tv.ty.childType(mod); - const sentinel = tv.ty.sentinel(mod); - const len = @as(usize, @intCast(tv.ty.arrayLen(mod))); - const len_including_sent = len + @intFromBool(sentinel != null); - const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent); - defer gpa.free(llvm_elems); + .repeated_elem => |elem| { + const len: usize = @intCast(array_type.len); + const len_including_sentinel: usize = + @intCast(len + @intFromBool(array_type.sentinel != .none)); + const array_ty = try o.lowerType(ty); + const elem_ty = array_ty.childType(&o.builder); + + const ExpectedContents = extern struct { + vals: [Builder.expected_fields_len]Builder.Constant, + fields: [Builder.expected_fields_len]Builder.Type, + }; + var stack align(@max( + @alignOf(std.heap.StackFallbackAllocator(0)), + @alignOf(ExpectedContents), + )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa); + const allocator = stack.get(); + const vals = try allocator.alloc(Builder.Constant, len_including_sentinel); + defer allocator.free(vals); + const fields = try allocator.alloc(Builder.Type, len_including_sentinel); + defer allocator.free(fields); var need_unnamed = false; - if (len != 0) { - for (llvm_elems[0..len]) |*elem| { - elem.* = try o.lowerValue(.{ .ty = elem_ty, .val = val.toValue() }); - } - need_unnamed = need_unnamed or o.isUnnamedType(elem_ty, llvm_elems[0]); - } - - if (sentinel) |sent| { - llvm_elems[len] = try o.lowerValue(.{ .ty = elem_ty, .val = sent }); - need_unnamed = need_unnamed or o.isUnnamedType(elem_ty, llvm_elems[len]); + @memset(vals[0..len], try o.lowerValue(elem)); + @memset(fields[0..len], vals[0].typeOf(&o.builder)); + if (fields[0] != elem_ty) need_unnamed = true; + + if (array_type.sentinel != .none) { + vals[len] = try o.lowerValue(array_type.sentinel); + fields[len] = vals[len].typeOf(&o.builder); + if (fields[len] != elem_ty) need_unnamed = true; } - if (need_unnamed) { - return o.context.constStruct( - llvm_elems.ptr, - @as(c_uint, @intCast(llvm_elems.len)), - .True, - ); - } else { - const llvm_elem_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); - return llvm_elem_ty.constArray( - llvm_elems.ptr, - @as(c_uint, @intCast(llvm_elems.len)), - ); - } + return if (need_unnamed) try o.builder.structConst( + try o.builder.structType(.@"packed", fields), + vals, + ) else try o.builder.arrayConst(array_ty, vals); }, }, .vector_type => |vector_type| { - const elem_ty = vector_type.child.toType(); - const llvm_elems = try gpa.alloc(*llvm.Value, vector_type.len); - defer gpa.free(llvm_elems); - for (llvm_elems, 0..) |*llvm_elem, i| { - llvm_elem.* = switch (aggregate.storage) { - .bytes => |bytes| (try o.builder.intConst(.i8, bytes[i])).toLlvm(&o.builder), - .elems => |elems| try o.lowerValue(.{ - .ty = elem_ty, - .val = elems[i].toValue(), - }), - .repeated_elem => |elem| try o.lowerValue(.{ - .ty = elem_ty, - .val = elem.toValue(), - }), - }; + const ExpectedContents = [Builder.expected_fields_len]Builder.Constant; + var stack align(@max( + @alignOf(std.heap.StackFallbackAllocator(0)), + @alignOf(ExpectedContents), + )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa); + const allocator = stack.get(); + const vals = try allocator.alloc(Builder.Constant, vector_type.len); + defer allocator.free(vals); + + switch (aggregate.storage) { + .bytes => |bytes| for (vals, bytes) |*result_val, byte| { + result_val.* = try o.builder.intConst(.i8, byte); + }, + .elems => |elems| for (vals, elems) |*result_val, elem| { + result_val.* = try o.lowerValue(elem); + }, + .repeated_elem => |elem| @memset(vals, try o.lowerValue(elem)), } - return llvm.constVector( - llvm_elems.ptr, - @as(c_uint, @intCast(llvm_elems.len)), - ); + return o.builder.vectorConst(try o.lowerType(ty), vals); }, .anon_struct_type => |tuple| { - var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; - defer llvm_fields.deinit(gpa); + const struct_ty = try o.lowerType(ty); + const llvm_len = struct_ty.aggregateLen(&o.builder); - try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len); + const ExpectedContents = extern struct { + vals: [Builder.expected_fields_len]Builder.Constant, + fields: [Builder.expected_fields_len]Builder.Type, + }; + var stack align(@max( + @alignOf(std.heap.StackFallbackAllocator(0)), + @alignOf(ExpectedContents), + )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa); + const allocator = stack.get(); + const vals = try allocator.alloc(Builder.Constant, llvm_len); + defer allocator.free(vals); + const fields = try allocator.alloc(Builder.Type, llvm_len); + defer allocator.free(fields); comptime assert(struct_layout_version == 2); + var llvm_index: usize = 0; var offset: u64 = 0; var big_align: u32 = 0; var need_unnamed = false; - - for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { + for (tuple.types, tuple.values, 0..) |field_ty, field_val, field_index| { if (field_val != .none) continue; if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; @@ -3749,20 +3854,20 @@ pub const Object = struct { const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = try o.builder.arrayType(padding_len, .i8); // TODO make this and all other padding elsewhere in debug // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.toLlvm(&o.builder).getUndef()); + fields[llvm_index] = try o.builder.arrayType(padding_len, .i8); + vals[llvm_index] = try o.builder.undefConst(fields[llvm_index]); + assert(fields[llvm_index] == struct_ty.structFields(&o.builder)[llvm_index]); + llvm_index += 1; } - const field_llvm_val = try o.lowerValue(.{ - .ty = field_ty.toType(), - .val = try tv.val.fieldValue(mod, i), - }); - - need_unnamed = need_unnamed or o.isUnnamedType(field_ty.toType(), field_llvm_val); - - llvm_fields.appendAssumeCapacity(field_llvm_val); + vals[llvm_index] = + try o.lowerValue((try val.fieldValue(mod, field_index)).toIntern()); + fields[llvm_index] = vals[llvm_index].typeOf(&o.builder); + if (fields[llvm_index] != struct_ty.structFields(&o.builder)[llvm_index]) + need_unnamed = true; + llvm_index += 1; offset += field_ty.toType().abiSize(mod); } @@ -3771,73 +3876,71 @@ pub const Object = struct { offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = try o.builder.arrayType(padding_len, .i8); - llvm_fields.appendAssumeCapacity(llvm_array_ty.toLlvm(&o.builder).getUndef()); + fields[llvm_index] = try o.builder.arrayType(padding_len, .i8); + vals[llvm_index] = try o.builder.undefConst(fields[llvm_index]); + assert(fields[llvm_index] == struct_ty.structFields(&o.builder)[llvm_index]); + llvm_index += 1; } } + assert(llvm_index == llvm_len); - if (need_unnamed) { - return o.context.constStruct( - llvm_fields.items.ptr, - @as(c_uint, @intCast(llvm_fields.items.len)), - .False, - ); - } else { - const llvm_struct_ty = (try o.lowerType(tv.ty)).toLlvm(&o.builder); - return llvm_struct_ty.constNamedStruct( - llvm_fields.items.ptr, - @as(c_uint, @intCast(llvm_fields.items.len)), - ); - } + return try o.builder.structConst(if (need_unnamed) + try o.builder.structType(struct_ty.structKind(&o.builder), fields) + else + struct_ty, vals); }, .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - const llvm_struct_ty = (try o.lowerType(tv.ty)).toLlvm(&o.builder); - + assert(struct_obj.haveLayout()); + const struct_ty = try o.lowerType(ty); if (struct_obj.layout == .Packed) { - assert(struct_obj.haveLayout()); - const big_bits = struct_obj.backing_int_ty.bitSize(mod); - const int_llvm_ty = try o.builder.intType(@intCast(big_bits)); - const fields = struct_obj.fields.values(); comptime assert(Type.packed_struct_layout_version == 2); - var running_int = (try o.builder.intConst(int_llvm_ty, 0)).toLlvm(&o.builder); + var running_int = try o.builder.intConst(struct_ty, 0); var running_bits: u16 = 0; - for (fields, 0..) |field, i| { + for (struct_obj.fields.values(), 0..) |field, field_index| { if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - const non_int_val = try o.lowerValue(.{ - .ty = field.ty, - .val = try tv.val.fieldValue(mod, i), - }); - const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod))); - const small_int_ty = (try o.builder.intType(@intCast(ty_bit_size))).toLlvm(&o.builder); - const small_int_val = if (field.ty.isPtrAtRuntime(mod)) - non_int_val.constPtrToInt(small_int_ty) - else - non_int_val.constBitCast(small_int_ty); - const shift_rhs = (try o.builder.intConst(int_llvm_ty, running_bits)).toLlvm(&o.builder); - // If the field is as large as the entire packed struct, this - // zext would go from, e.g. i16 to i16. This is legal with - // constZExtOrBitCast but not legal with constZExt. - const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty.toLlvm(&o.builder)); - const shifted = extended_int_val.constShl(shift_rhs); - running_int = running_int.constOr(shifted); + const non_int_val = + try o.lowerValue((try val.fieldValue(mod, field_index)).toIntern()); + const ty_bit_size: u16 = @intCast(field.ty.bitSize(mod)); + const small_int_ty = try o.builder.intType(ty_bit_size); + const small_int_val = try o.builder.castConst( + if (field.ty.isPtrAtRuntime(mod)) .ptrtoint else .bitcast, + non_int_val, + small_int_ty, + ); + const shift_rhs = try o.builder.intConst(struct_ty, running_bits); + const extended_int_val = + try o.builder.convConst(.unsigned, small_int_val, struct_ty); + const shifted = try o.builder.binConst(.shl, extended_int_val, shift_rhs); + running_int = try o.builder.binConst(.@"or", running_int, shifted); running_bits += ty_bit_size; } return running_int; } + const llvm_len = struct_ty.aggregateLen(&o.builder); - const llvm_field_count = llvm_struct_ty.countStructElementTypes(); - var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count); - defer llvm_fields.deinit(gpa); + const ExpectedContents = extern struct { + vals: [Builder.expected_fields_len]Builder.Constant, + fields: [Builder.expected_fields_len]Builder.Type, + }; + var stack align(@max( + @alignOf(std.heap.StackFallbackAllocator(0)), + @alignOf(ExpectedContents), + )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa); + const allocator = stack.get(); + const vals = try allocator.alloc(Builder.Constant, llvm_len); + defer allocator.free(vals); + const fields = try allocator.alloc(Builder.Type, llvm_len); + defer allocator.free(fields); comptime assert(struct_layout_version == 2); + var llvm_index: usize = 0; var offset: u64 = 0; var big_align: u32 = 0; var need_unnamed = false; - - var it = struct_obj.runtimeFieldIterator(mod); - while (it.next()) |field_and_index| { + var field_it = struct_obj.runtimeFieldIterator(mod); + while (field_it.next()) |field_and_index| { const field = field_and_index.field; const field_align = field.alignment(mod, struct_obj.layout); big_align = @max(big_align, field_align); @@ -3846,20 +3949,22 @@ pub const Object = struct { const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = try o.builder.arrayType(padding_len, .i8); // TODO make this and all other padding elsewhere in debug // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.toLlvm(&o.builder).getUndef()); + fields[llvm_index] = try o.builder.arrayType(padding_len, .i8); + vals[llvm_index] = try o.builder.undefConst(fields[llvm_index]); + assert(fields[llvm_index] == + struct_ty.structFields(&o.builder)[llvm_index]); + llvm_index += 1; } - const field_llvm_val = try o.lowerValue(.{ - .ty = field.ty, - .val = try tv.val.fieldValue(mod, field_and_index.index), - }); - - need_unnamed = need_unnamed or o.isUnnamedType(field.ty, field_llvm_val); - - llvm_fields.appendAssumeCapacity(field_llvm_val); + vals[llvm_index] = try o.lowerValue( + (try val.fieldValue(mod, field_and_index.index)).toIntern(), + ); + fields[llvm_index] = vals[llvm_index].typeOf(&o.builder); + if (fields[llvm_index] != struct_ty.structFields(&o.builder)[llvm_index]) + need_unnamed = true; + llvm_index += 1; offset += field.ty.abiSize(mod); } @@ -3868,135 +3973,118 @@ pub const Object = struct { offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = try o.builder.arrayType(padding_len, .i8); - llvm_fields.appendAssumeCapacity(llvm_array_ty.toLlvm(&o.builder).getUndef()); + fields[llvm_index] = try o.builder.arrayType(padding_len, .i8); + vals[llvm_index] = try o.builder.undefConst(fields[llvm_index]); + assert(fields[llvm_index] == struct_ty.structFields(&o.builder)[llvm_index]); + llvm_index += 1; } } + assert(llvm_index == llvm_len); - if (need_unnamed) { - return o.context.constStruct( - llvm_fields.items.ptr, - @as(c_uint, @intCast(llvm_fields.items.len)), - .False, - ); - } else { - return llvm_struct_ty.constNamedStruct( - llvm_fields.items.ptr, - @as(c_uint, @intCast(llvm_fields.items.len)), - ); - } + return try o.builder.structConst(if (need_unnamed) + try o.builder.structType(struct_ty.structKind(&o.builder), fields) + else + struct_ty, vals); }, else => unreachable, }, - .un => { - const llvm_union_ty = (try o.lowerType(tv.ty)).toLlvm(&o.builder); - const tag_and_val: Value.Payload.Union.Data = switch (tv.val.toIntern()) { - .none => tv.val.castTag(.@"union").?.data, - else => switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { - .un => |un| .{ .tag = un.tag.toValue(), .val = un.val.toValue() }, - else => unreachable, - }, - }; - - const layout = tv.ty.unionGetLayout(mod); + .un => |un| { + const union_ty = try o.lowerType(ty); + const layout = ty.unionGetLayout(mod); + if (layout.payload_size == 0) return o.lowerValue(un.tag); - if (layout.payload_size == 0) { - return lowerValue(o, .{ - .ty = tv.ty.unionTagTypeSafety(mod).?, - .val = tag_and_val.tag, - }); - } - const union_obj = mod.typeToUnion(tv.ty).?; - const field_index = tv.ty.unionTagFieldIndex(tag_and_val.tag, o.module).?; + const union_obj = mod.typeToUnion(ty).?; + const field_index = ty.unionTagFieldIndex(un.tag.toValue(), o.module).?; assert(union_obj.haveFieldTypes()); const field_ty = union_obj.fields.values()[field_index].ty; if (union_obj.layout == .Packed) { - if (!field_ty.hasRuntimeBits(mod)) - return llvm_union_ty.constNull(); - const non_int_val = try lowerValue(o, .{ .ty = field_ty, .val = tag_and_val.val }); - const ty_bit_size = @as(u16, @intCast(field_ty.bitSize(mod))); - const small_int_ty = (try o.builder.intType(@intCast(ty_bit_size))).toLlvm(&o.builder); - const small_int_val = if (field_ty.isPtrAtRuntime(mod)) - non_int_val.constPtrToInt(small_int_ty) - else - non_int_val.constBitCast(small_int_ty); - return small_int_val.constZExtOrBitCast(llvm_union_ty); + if (!field_ty.hasRuntimeBits(mod)) return o.builder.intConst(union_ty, 0); + const small_int_val = try o.builder.castConst( + if (field_ty.isPtrAtRuntime(mod)) .ptrtoint else .bitcast, + try o.lowerValue(un.val), + try o.builder.intType(@intCast(field_ty.bitSize(mod))), + ); + return o.builder.convConst(.unsigned, small_int_val, union_ty); } // Sometimes we must make an unnamed struct because LLVM does // not support bitcasting our payload struct to the true union payload type. // Instead we use an unnamed struct and every reference to the global // must pointer cast to the expected type before accessing the union. - var need_unnamed: bool = layout.most_aligned_field != field_index; + var need_unnamed = layout.most_aligned_field != field_index; const payload = p: { if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { - const padding_len = @as(c_uint, @intCast(layout.payload_size)); - break :p (try o.builder.arrayType(padding_len, .i8)).toLlvm(&o.builder).getUndef(); + const padding_len = layout.payload_size; + break :p try o.builder.undefConst(try o.builder.arrayType(padding_len, .i8)); } - const field = try lowerValue(o, .{ .ty = field_ty, .val = tag_and_val.val }); - need_unnamed = need_unnamed or o.isUnnamedType(field_ty, field); + const payload = try o.lowerValue(un.val); + const payload_ty = payload.typeOf(&o.builder); + if (payload_ty != union_ty.structFields(&o.builder)[ + @intFromBool(layout.tag_align >= layout.payload_align) + ]) need_unnamed = true; const field_size = field_ty.abiSize(mod); - if (field_size == layout.payload_size) { - break :p field; - } - const padding_len = @as(c_uint, @intCast(layout.payload_size - field_size)); - const fields: [2]*llvm.Value = .{ - field, (try o.builder.arrayType(padding_len, .i8)).toLlvm(&o.builder).getUndef(), - }; - break :p o.context.constStruct(&fields, fields.len, .True); + if (field_size == layout.payload_size) break :p payload; + const padding_len = layout.payload_size - field_size; + const padding_ty = try o.builder.arrayType(padding_len, .i8); + break :p try o.builder.structConst( + try o.builder.structType(.@"packed", &.{ payload_ty, padding_ty }), + &.{ payload, try o.builder.undefConst(padding_ty) }, + ); }; + const payload_ty = payload.typeOf(&o.builder); - if (layout.tag_size == 0) { - const fields: [1]*llvm.Value = .{payload}; - if (need_unnamed) { - return o.context.constStruct(&fields, fields.len, .False); - } else { - return llvm_union_ty.constNamedStruct(&fields, fields.len); - } - } - const llvm_tag_value = try lowerValue(o, .{ - .ty = tv.ty.unionTagTypeSafety(mod).?, - .val = tag_and_val.tag, - }); - var fields: [3]*llvm.Value = undefined; - var fields_len: c_uint = 2; + if (layout.tag_size == 0) return o.builder.structConst(if (need_unnamed) + try o.builder.structType(union_ty.structKind(&o.builder), &.{payload_ty}) + else + union_ty, &.{payload}); + const tag = try o.lowerValue(un.tag); + const tag_ty = tag.typeOf(&o.builder); + var fields: [3]Builder.Type = undefined; + var vals: [3]Builder.Constant = undefined; + var len: usize = 2; if (layout.tag_align >= layout.payload_align) { - fields = .{ llvm_tag_value, payload, undefined }; + fields = .{ tag_ty, payload_ty, undefined }; + vals = .{ tag, payload, undefined }; } else { - fields = .{ payload, llvm_tag_value, undefined }; + fields = .{ payload_ty, tag_ty, undefined }; + vals = .{ payload, tag, undefined }; } if (layout.padding != 0) { - fields[2] = (try o.builder.arrayType(layout.padding, .i8)).toLlvm(&o.builder).getUndef(); - fields_len = 3; - } - if (need_unnamed) { - return o.context.constStruct(&fields, fields_len, .False); - } else { - return llvm_union_ty.constNamedStruct(&fields, fields_len); + fields[2] = try o.builder.arrayType(layout.padding, .i8); + vals[2] = try o.builder.undefConst(fields[2]); + len = 3; } + return try o.builder.structConst(if (need_unnamed) + try o.builder.structType(union_ty.structKind(&o.builder), fields[0..len]) + else + union_ty, vals[0..len]); }, .memoized_call => unreachable, - } + }; } - fn lowerIntAsPtr(o: *Object, val: Value) Allocator.Error!*llvm.Value { + fn lowerIntAsPtr(o: *Object, val: InternPool.Index) Allocator.Error!Builder.Constant { const mod = o.module; - switch (mod.intern_pool.indexToKey(val.toIntern())) { - .undef => return o.context.pointerType(0).getUndef(), + switch (mod.intern_pool.indexToKey(val)) { + .undef => return o.builder.undefConst(.ptr), .int => { var bigint_space: Value.BigIntSpace = undefined; - const bigint = val.toBigInt(&bigint_space, mod); + const bigint = val.toValue().toBigInt(&bigint_space, mod); const llvm_int = try lowerBigInt(o, Type.usize, bigint); - return llvm_int.constIntToPtr(o.context.pointerType(0)); + return o.builder.castConst(.inttoptr, llvm_int, .ptr); }, else => unreachable, } } - fn lowerBigInt(o: *Object, ty: Type, bigint: std.math.big.int.Const) Allocator.Error!*llvm.Value { - return (try o.builder.bigIntConst(try o.builder.intType(ty.intInfo(o.module).bits), bigint)) - .toLlvm(&o.builder); + fn lowerBigInt( + o: *Object, + ty: Type, + bigint: std.math.big.int.Const, + ) Allocator.Error!Builder.Constant { + const mod = o.module; + return o.builder.bigIntConst(try o.builder.intType(ty.intInfo(mod).bits), bigint); } const ParentPtr = struct { @@ -4004,45 +4092,41 @@ pub const Object = struct { llvm_ptr: *llvm.Value, }; - fn lowerParentPtrDecl( - o: *Object, - ptr_val: Value, - decl_index: Module.Decl.Index, - ) Error!*llvm.Value { + fn lowerParentPtrDecl(o: *Object, decl_index: Module.Decl.Index) Allocator.Error!Builder.Constant { const mod = o.module; const decl = mod.declPtr(decl_index); try mod.markDeclAlive(decl); const ptr_ty = try mod.singleMutPtrType(decl.ty); - return try o.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index); + return o.lowerDeclRefValue(ptr_ty, decl_index); } - fn lowerParentPtr(o: *Object, ptr_val: Value, byte_aligned: bool) Error!*llvm.Value { + fn lowerParentPtr(o: *Object, ptr_val: Value, byte_aligned: bool) Allocator.Error!Builder.Constant { const mod = o.module; return switch (mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr) { - .decl => |decl| o.lowerParentPtrDecl(ptr_val, decl), - .mut_decl => |mut_decl| o.lowerParentPtrDecl(ptr_val, mut_decl.decl), - .int => |int| o.lowerIntAsPtr(int.toValue()), + .decl => |decl| o.lowerParentPtrDecl(decl), + .mut_decl => |mut_decl| o.lowerParentPtrDecl(mut_decl.decl), + .int => |int| try o.lowerIntAsPtr(int), .eu_payload => |eu_ptr| { - const parent_llvm_ptr = try o.lowerParentPtr(eu_ptr.toValue(), true); + const parent_ptr = try o.lowerParentPtr(eu_ptr.toValue(), true); const eu_ty = mod.intern_pool.typeOf(eu_ptr).toType().childType(mod); const payload_ty = eu_ty.errorUnionPayload(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // In this case, we represent pointer to error union the same as pointer // to the payload. - return parent_llvm_ptr; + return parent_ptr; } - const payload_offset: u8 = if (payload_ty.abiAlignment(mod) > Type.anyerror.abiSize(mod)) 2 else 1; - const indices: [2]*llvm.Value = .{ - (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), - (try o.builder.intConst(.i32, payload_offset)).toLlvm(&o.builder), - }; - const eu_llvm_ty = (try o.lowerType(eu_ty)).toLlvm(&o.builder); - return eu_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + return o.builder.gepConst(.inbounds, try o.lowerType(eu_ty), parent_ptr, &.{ + try o.builder.intConst(.i32, 0), + try o.builder.intConst(.i32, @as( + i32, + if (payload_ty.abiAlignment(mod) > Type.err_int.abiSize(mod)) 2 else 1, + )), + }); }, .opt_payload => |opt_ptr| { - const parent_llvm_ptr = try o.lowerParentPtr(opt_ptr.toValue(), true); + const parent_ptr = try o.lowerParentPtr(opt_ptr.toValue(), true); const opt_ty = mod.intern_pool.typeOf(opt_ptr).toType().childType(mod); const payload_ty = opt_ty.optionalChild(mod); @@ -4051,96 +4135,87 @@ pub const Object = struct { { // In this case, we represent pointer to optional the same as pointer // to the payload. - return parent_llvm_ptr; + return parent_ptr; } - const indices: [2]*llvm.Value = .{ - (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), - } ** 2; - const opt_llvm_ty = (try o.lowerType(opt_ty)).toLlvm(&o.builder); - return opt_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + return o.builder.gepConst(.inbounds, try o.lowerType(opt_ty), parent_ptr, &(.{ + try o.builder.intConst(.i32, 0), + } ** 2)); }, .comptime_field => unreachable, .elem => |elem_ptr| { - const parent_llvm_ptr = try o.lowerParentPtr(elem_ptr.base.toValue(), true); - - const indices: [1]*llvm.Value = .{ - (try o.builder.intConst(try o.lowerType(Type.usize), elem_ptr.index)).toLlvm(&o.builder), - }; + const parent_ptr = try o.lowerParentPtr(elem_ptr.base.toValue(), true); const elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod); - const elem_llvm_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); - return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + + return o.builder.gepConst(.inbounds, try o.lowerType(elem_ty), parent_ptr, &.{ + try o.builder.intConst(try o.lowerType(Type.usize), elem_ptr.index), + }); }, .field => |field_ptr| { - const parent_llvm_ptr = try o.lowerParentPtr(field_ptr.base.toValue(), byte_aligned); + const parent_ptr = try o.lowerParentPtr(field_ptr.base.toValue(), byte_aligned); const parent_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); - const field_index = @as(u32, @intCast(field_ptr.index)); + const field_index: u32 = @intCast(field_ptr.index); switch (parent_ty.zigTypeTag(mod)) { .Union => { if (parent_ty.containerLayout(mod) == .Packed) { - return parent_llvm_ptr; + return parent_ptr; } const layout = parent_ty.unionGetLayout(mod); if (layout.payload_size == 0) { // In this case a pointer to the union and a pointer to any // (void) payload is the same. - return parent_llvm_ptr; + return parent_ptr; } - const llvm_pl_index = if (layout.tag_size == 0) - 0 - else - @intFromBool(layout.tag_align >= layout.payload_align); - const indices: [2]*llvm.Value = .{ - (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), - (try o.builder.intConst(.i32, llvm_pl_index)).toLlvm(&o.builder), - }; - const parent_llvm_ty = (try o.lowerType(parent_ty)).toLlvm(&o.builder); - return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + + return o.builder.gepConst(.inbounds, try o.lowerType(parent_ty), parent_ptr, &.{ + try o.builder.intConst(.i32, 0), + try o.builder.intConst(.i32, @intFromBool( + layout.tag_size > 0 and layout.tag_align >= layout.payload_align, + )), + }); }, .Struct => { if (parent_ty.containerLayout(mod) == .Packed) { - if (!byte_aligned) return parent_llvm_ptr; + if (!byte_aligned) return parent_ptr; const llvm_usize = try o.lowerType(Type.usize); - const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize.toLlvm(&o.builder)); + const base_addr = + try o.builder.castConst(.ptrtoint, parent_ptr, llvm_usize); // count bits of fields before this one const prev_bits = b: { var b: usize = 0; for (parent_ty.structFields(mod).values()[0..field_index]) |field| { if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - b += @as(usize, @intCast(field.ty.bitSize(mod))); + b += @intCast(field.ty.bitSize(mod)); } break :b b; }; - const byte_offset = (try o.builder.intConst(llvm_usize, prev_bits / 8)).toLlvm(&o.builder); - const field_addr = base_addr.constAdd(byte_offset); - const final_llvm_ty = o.context.pointerType(0); - return field_addr.constIntToPtr(final_llvm_ty); + const byte_offset = try o.builder.intConst(llvm_usize, prev_bits / 8); + const field_addr = try o.builder.binConst(.add, base_addr, byte_offset); + return o.builder.castConst(.inttoptr, field_addr, .ptr); } - const parent_llvm_ty = (try o.lowerType(parent_ty)).toLlvm(&o.builder); - if (llvmField(parent_ty, field_index, mod)) |llvm_field| { - const indices: [2]*llvm.Value = .{ - (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), - (try o.builder.intConst(.i32, llvm_field.index)).toLlvm(&o.builder), - }; - return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - } else { - const indices: [1]*llvm.Value = .{ - (try o.builder.intConst(.i32, @intFromBool(parent_ty.hasRuntimeBitsIgnoreComptime(mod)))).toLlvm(&o.builder), - }; - return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - } + return o.builder.gepConst( + .inbounds, + try o.lowerType(parent_ty), + parent_ptr, + if (llvmField(parent_ty, field_index, mod)) |llvm_field| &.{ + try o.builder.intConst(.i32, 0), + try o.builder.intConst(.i32, llvm_field.index), + } else &.{ + try o.builder.intConst(.i32, @intFromBool( + parent_ty.hasRuntimeBitsIgnoreComptime(mod), + )), + }, + ); }, .Pointer => { assert(parent_ty.isSlice(mod)); - const indices: [2]*llvm.Value = .{ - (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), - (try o.builder.intConst(.i32, field_index)).toLlvm(&o.builder), - }; - const parent_llvm_ty = (try o.lowerType(parent_ty)).toLlvm(&o.builder); - return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + return o.builder.gepConst(.inbounds, try o.lowerType(parent_ty), parent_ptr, &.{ + try o.builder.intConst(.i32, 0), + try o.builder.intConst(.i32, field_index), + }); }, else => unreachable, } @@ -4148,11 +4223,7 @@ pub const Object = struct { }; } - fn lowerDeclRefValue( - o: *Object, - tv: TypedValue, - decl_index: Module.Decl.Index, - ) Error!*llvm.Value { + fn lowerDeclRefValue(o: *Object, ty: Type, decl_index: Module.Decl.Index) Allocator.Error!Builder.Constant { const mod = o.module; // In the case of something like: @@ -4163,69 +4234,63 @@ pub const Object = struct { const decl = mod.declPtr(decl_index); if (decl.val.getFunction(mod)) |func| { if (func.owner_decl != decl_index) { - return o.lowerDeclRefValue(tv, func.owner_decl); + return o.lowerDeclRefValue(ty, func.owner_decl); } } else if (decl.val.getExternFunc(mod)) |func| { if (func.decl != decl_index) { - return o.lowerDeclRefValue(tv, func.decl); + return o.lowerDeclRefValue(ty, func.decl); } } const is_fn_body = decl.ty.zigTypeTag(mod) == .Fn; if ((!is_fn_body and !decl.ty.hasRuntimeBits(mod)) or (is_fn_body and mod.typeToFunc(decl.ty).?.is_generic)) - { - return o.lowerPtrToVoid(tv.ty); - } + return o.lowerPtrToVoid(ty); try mod.markDeclAlive(decl); - const llvm_decl_val = if (is_fn_body) - (try o.resolveLlvmFunction(decl_index)).toLlvm(&o.builder) + const llvm_global = if (is_fn_body) + (try o.resolveLlvmFunction(decl_index)).ptrConst(&o.builder).global else - (try o.resolveGlobalDecl(decl_index)).toLlvm(&o.builder); + (try o.resolveGlobalDecl(decl_index)).ptrConst(&o.builder).global; const target = mod.getTarget(); const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); - const llvm_val = if (llvm_wanted_addrspace != llvm_actual_addrspace) blk: { - const llvm_decl_wanted_ptr_ty = o.context.pointerType(@intFromEnum(llvm_wanted_addrspace)); - break :blk llvm_decl_val.constAddrSpaceCast(llvm_decl_wanted_ptr_ty); - } else llvm_decl_val; - - const llvm_type = (try o.lowerType(tv.ty)).toLlvm(&o.builder); - if (tv.ty.zigTypeTag(mod) == .Int) { - return llvm_val.constPtrToInt(llvm_type); - } else { - return llvm_val.constBitCast(llvm_type); - } + const llvm_val = if (llvm_wanted_addrspace != llvm_actual_addrspace) try o.builder.castConst( + .addrspacecast, + llvm_global.toConst(), + try o.builder.ptrType(llvm_wanted_addrspace), + ) else llvm_global.toConst(); + + return o.builder.convConst(if (ty.isAbiInt(mod)) switch (ty.intInfo(mod).signedness) { + .signed => .signed, + .unsigned => .unsigned, + } else .unneeded, llvm_val, try o.lowerType(ty)); } - fn lowerPtrToVoid(o: *Object, ptr_ty: Type) !*llvm.Value { + fn lowerPtrToVoid(o: *Object, ptr_ty: Type) Allocator.Error!Builder.Constant { const mod = o.module; // Even though we are pointing at something which has zero bits (e.g. `void`), // Pointers are defined to have bits. So we must return something here. // The value cannot be undefined, because we use the `nonnull` annotation // for non-optional pointers. We also need to respect the alignment, even though // the address will never be dereferenced. - const llvm_usize = try o.lowerType(Type.usize); - const llvm_ptr_ty = (try o.lowerType(ptr_ty)).toLlvm(&o.builder); - if (ptr_ty.ptrInfo(mod).flags.alignment.toByteUnitsOptional()) |alignment| { - return (try o.builder.intConst(llvm_usize, alignment)).toLlvm(&o.builder).constIntToPtr(llvm_ptr_ty); - } - // Note that these 0xaa values are appropriate even in release-optimized builds - // because we need a well-defined value that is not null, and LLVM does not - // have an "undef_but_not_null" attribute. As an example, if this `alloc` AIR - // instruction is followed by a `wrap_optional`, it will return this value - // verbatim, and the result should test as non-null. - const target = mod.getTarget(); - const int = try o.builder.intConst(llvm_usize, @as(u64, switch (target.ptrBitWidth()) { + const int: u64 = ptr_ty.ptrInfo(mod).flags.alignment.toByteUnitsOptional() orelse + // Note that these 0xaa values are appropriate even in release-optimized builds + // because we need a well-defined value that is not null, and LLVM does not + // have an "undef_but_not_null" attribute. As an example, if this `alloc` AIR + // instruction is followed by a `wrap_optional`, it will return this value + // verbatim, and the result should test as non-null. + switch (mod.getTarget().ptrBitWidth()) { 16 => 0xaaaa, 32 => 0xaaaaaaaa, 64 => 0xaaaaaaaa_aaaaaaaa, else => unreachable, - })); - return int.toLlvm(&o.builder).constIntToPtr(llvm_ptr_ty); + }; + const llvm_usize = try o.lowerType(Type.usize); + const llvm_ptr_ty = try o.lowerType(ptr_ty); + return o.builder.castConst(.inttoptr, try o.builder.intConst(llvm_usize, int), llvm_ptr_ty); } fn addAttr(o: *Object, val: *llvm.Value, index: llvm.AttributeIndex, name: []const u8) void { @@ -4436,26 +4501,29 @@ pub const DeclGen = struct { _ = try o.resolveLlvmFunction(extern_func.decl); } else { const target = mod.getTarget(); - const object_index = try o.resolveGlobalDecl(decl_index); - const object = object_index.ptr(&o.builder); - const global = object.global.ptr(&o.builder); - var llvm_global = object.global.toLlvm(&o.builder); - global.alignment = Builder.Alignment.fromByteUnits(decl.getAlignment(mod)); + const object = try o.resolveGlobalDecl(decl_index); + const global = object.ptrConst(&o.builder).global; + var llvm_global = global.toLlvm(&o.builder); + global.ptr(&o.builder).alignment = Builder.Alignment.fromByteUnits(decl.getAlignment(mod)); llvm_global.setAlignment(decl.getAlignment(mod)); - if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s| llvm_global.setSection(s); + if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |section| { + global.ptr(&o.builder).section = try o.builder.string(section); + llvm_global.setSection(section); + } assert(decl.has_tv); - const init_val = if (decl.val.getVariable(mod)) |variable| init_val: { - object.mutability = .global; - break :init_val variable.init; + const init_val = if (decl.val.getVariable(mod)) |decl_var| init_val: { + object.ptr(&o.builder).mutability = .global; + break :init_val decl_var.init; } else init_val: { - object.mutability = .constant; + object.ptr(&o.builder).mutability = .constant; llvm_global.setGlobalConstant(.True); break :init_val decl.val.toIntern(); }; if (init_val != .none) { - const llvm_init = try o.lowerValue(.{ .ty = decl.ty, .val = init_val.toValue() }); - if (llvm_global.globalGetValueType() == llvm_init.typeOf()) { - llvm_global.setInitializer(llvm_init); + const llvm_init = try o.lowerValue(init_val); + if (llvm_global.globalGetValueType() == llvm_init.typeOf(&o.builder).toLlvm(&o.builder)) { + object.ptr(&o.builder).init = llvm_init; + llvm_global.setInitializer(llvm_init.toLlvm(&o.builder)); } else { // LLVM does not allow us to change the type of globals. So we must // create a new global with the correct type, copy all its attributes, @@ -4472,20 +4540,21 @@ pub const DeclGen = struct { // Related: https://github.com/ziglang/zig/issues/13265 const llvm_global_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); const new_global = o.llvm_module.addGlobalInAddressSpace( - llvm_init.typeOf(), + llvm_init.typeOf(&o.builder).toLlvm(&o.builder), "", @intFromEnum(llvm_global_addrspace), ); new_global.setLinkage(llvm_global.getLinkage()); new_global.setUnnamedAddr(llvm_global.getUnnamedAddress()); new_global.setAlignment(llvm_global.getAlignment()); - if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s| - new_global.setSection(s); - new_global.setInitializer(llvm_init); + if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |section| + new_global.setSection(section); + new_global.setInitializer(llvm_init.toLlvm(&o.builder)); // TODO: How should this work then the address space of a global changed? llvm_global.replaceAllUsesWith(new_global); new_global.takeName(llvm_global); - o.builder.llvm_globals.items[@intFromEnum(object.global)] = new_global; + o.builder.llvm_globals.items[@intFromEnum(object.ptrConst(&o.builder).global)] = + new_global; llvm_global.deleteGlobal(); llvm_global = new_global; } @@ -4601,24 +4670,45 @@ pub const FuncGen = struct { fn resolveValue(self: *FuncGen, tv: TypedValue) !*llvm.Value { const o = self.dg.object; const mod = o.module; - const llvm_val = try o.lowerValue(tv); - if (!isByRef(tv.ty, mod)) return llvm_val; + const llvm_val = try o.lowerValue(tv.val.toIntern()); + if (!isByRef(tv.ty, mod)) return llvm_val.toLlvm(&o.builder); // We have an LLVM value but we need to create a global constant and // set the value as its initializer, and then return a pointer to the global. const target = mod.getTarget(); const llvm_wanted_addrspace = toLlvmAddressSpace(.generic, target); const llvm_actual_addrspace = toLlvmGlobalAddressSpace(.generic, target); - const global = o.llvm_module.addGlobalInAddressSpace(llvm_val.typeOf(), "", @intFromEnum(llvm_actual_addrspace)); - global.setInitializer(llvm_val); - global.setLinkage(.Private); - global.setGlobalConstant(.True); - global.setUnnamedAddr(.True); - global.setAlignment(tv.ty.abiAlignment(mod)); + const llvm_ty = llvm_val.typeOf(&o.builder); + const llvm_alignment = tv.ty.abiAlignment(mod); + const llvm_global = o.llvm_module.addGlobalInAddressSpace(llvm_ty.toLlvm(&o.builder), "", @intFromEnum(llvm_actual_addrspace)); + llvm_global.setInitializer(llvm_val.toLlvm(&o.builder)); + llvm_global.setLinkage(.Private); + llvm_global.setGlobalConstant(.True); + llvm_global.setUnnamedAddr(.True); + llvm_global.setAlignment(llvm_alignment); + + var global = Builder.Global{ + .linkage = .private, + .unnamed_addr = .unnamed_addr, + .type = llvm_ty, + .alignment = Builder.Alignment.fromByteUnits(llvm_alignment), + .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, + }; + var variable = Builder.Variable{ + .global = @enumFromInt(o.builder.globals.count()), + .mutability = .constant, + .init = llvm_val, + }; + try o.builder.llvm_globals.append(o.gpa, llvm_global); + _ = try o.builder.addGlobal(.none, global); + try o.builder.variables.append(o.gpa, variable); + const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace) - global.constAddrSpaceCast(self.context.pointerType(@intFromEnum(llvm_wanted_addrspace))) + llvm_global.constAddrSpaceCast( + (try o.builder.ptrType(llvm_wanted_addrspace)).toLlvm(&o.builder), + ) else - global; + llvm_global; return addrspace_casted_ptr; } @@ -5197,10 +5287,7 @@ pub const FuncGen = struct { const msg_decl_index = mod.panic_messages[@intFromEnum(panic_id)].unwrap().?; const msg_decl = mod.declPtr(msg_decl_index); const msg_len = msg_decl.ty.childType(mod).arrayLen(mod); - const msg_ptr = try o.lowerValue(.{ - .ty = msg_decl.ty, - .val = msg_decl.val, - }); + const msg_ptr = try o.lowerValue(try msg_decl.internValue(mod)); const null_opt_addr_global = try o.getNullOptAddr(); const target = mod.getTarget(); const llvm_usize = try o.lowerType(Type.usize); @@ -5212,9 +5299,9 @@ pub const FuncGen = struct { // ptr @2, ; addr (null ?usize) // ) const args = [4]*llvm.Value{ - msg_ptr, + msg_ptr.toLlvm(&o.builder), (try o.builder.intConst(llvm_usize, msg_len)).toLlvm(&o.builder), - fg.context.pointerType(0).constNull(), + (try o.builder.nullConst(.ptr)).toLlvm(&o.builder), null_opt_addr_global, }; const panic_func = mod.funcInfo(mod.panic_func_index); @@ -5672,8 +5759,8 @@ pub const FuncGen = struct { if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { const is_err = err: { - const err_set_ty = (try o.lowerType(Type.anyerror)).toLlvm(&o.builder); - const zero = err_set_ty.constNull(); + const err_set_ty = Builder.Type.err_int.toLlvm(&o.builder); + const zero = (try o.builder.intConst(Builder.Type.err_int, 0)).toLlvm(&o.builder); if (!payload_has_bits) { // TODO add alignment to this load const loaded = if (operand_is_ptr) @@ -6034,7 +6121,10 @@ pub const FuncGen = struct { const array_llvm_ty = (try o.lowerType(array_ty)).toLlvm(&o.builder); const elem_ty = array_ty.childType(mod); if (isByRef(array_ty, mod)) { - const indices: [2]*llvm.Value = .{ Builder.Type.i32.toLlvm(&o.builder).constNull(), rhs }; + const indices: [2]*llvm.Value = .{ + (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), + rhs, + }; if (isByRef(elem_ty, mod)) { const elem_ptr = self.builder.buildInBoundsGEP(array_llvm_ty, array_llvm_val, &indices, indices.len, ""); if (canElideLoad(self, body_tail)) @@ -6082,7 +6172,10 @@ pub const FuncGen = struct { // TODO: when we go fully opaque pointers in LLVM 16 we can remove this branch const ptr = if (ptr_ty.isSinglePointer(mod)) ptr: { // If this is a single-item pointer to an array, we need another index in the GEP. - const indices: [2]*llvm.Value = .{ Builder.Type.i32.toLlvm(&o.builder).constNull(), rhs }; + const indices: [2]*llvm.Value = .{ + (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), + rhs, + }; break :ptr self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); } else ptr: { const indices: [1]*llvm.Value = .{rhs}; @@ -6105,7 +6198,8 @@ pub const FuncGen = struct { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(mod); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return o.lowerPtrToVoid(ptr_ty); + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) + return (try o.lowerPtrToVoid(ptr_ty)).toLlvm(&o.builder); const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -6116,7 +6210,10 @@ pub const FuncGen = struct { const llvm_elem_ty = (try o.lowerPtrElemTy(elem_ty)).toLlvm(&o.builder); if (ptr_ty.isSinglePointer(mod)) { // If this is a single-item pointer to an array, we need another index in the GEP. - const indices: [2]*llvm.Value = .{ Builder.Type.i32.toLlvm(&o.builder).constNull(), rhs }; + const indices: [2]*llvm.Value = .{ + (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), + rhs, + }; return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); } else { const indices: [1]*llvm.Value = .{rhs}; @@ -6829,8 +6926,11 @@ pub const FuncGen = struct { operand; if (payload_ty.isSlice(mod)) { const slice_ptr = self.builder.buildExtractValue(loaded, 0, ""); - const ptr_ty = (try o.lowerType(payload_ty.slicePtrFieldType(mod))).toLlvm(&o.builder); - return self.builder.buildICmp(pred, slice_ptr, ptr_ty.constNull(), ""); + const ptr_ty = try o.builder.ptrType(toLlvmAddressSpace( + payload_ty.ptrAddressSpace(mod), + mod.getTarget(), + )); + return self.builder.buildICmp(pred, slice_ptr, (try o.builder.nullConst(ptr_ty)).toLlvm(&o.builder), ""); } return self.builder.buildICmp(pred, loaded, optional_llvm_ty.constNull(), ""); } @@ -6867,8 +6967,7 @@ pub const FuncGen = struct { const operand_ty = self.typeOf(un_op); const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; const payload_ty = err_union_ty.errorUnionPayload(mod); - const err_set_ty = (try o.lowerType(Type.anyerror)).toLlvm(&o.builder); - const zero = err_set_ty.constNull(); + const zero = (try o.builder.intConst(Builder.Type.err_int, 0)).toLlvm(&o.builder); if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { const val: Builder.Constant = switch (op) { @@ -6892,7 +6991,7 @@ pub const FuncGen = struct { if (operand_is_ptr or isByRef(err_union_ty, mod)) { const err_union_llvm_ty = (try o.lowerType(err_union_ty)).toLlvm(&o.builder); const err_field_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, err_field_index, ""); - const loaded = self.builder.buildLoad(err_set_ty, err_field_ptr, ""); + const loaded = self.builder.buildLoad(Builder.Type.err_int.toLlvm(&o.builder), err_field_ptr, ""); return self.builder.buildICmp(op, loaded, zero, ""); } @@ -7057,9 +7156,9 @@ pub const FuncGen = struct { const err_union_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = err_union_ty.errorUnionPayload(mod); - const non_error_val = try o.lowerValue(.{ .ty = Type.anyerror, .val = try mod.intValue(Type.err_int, 0) }); + const non_error_val = try o.lowerValue((try mod.intValue(Type.err_int, 0)).toIntern()); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - _ = self.builder.buildStore(non_error_val, operand); + _ = self.builder.buildStore(non_error_val.toLlvm(&o.builder), operand); return operand; } const err_union_llvm_ty = (try o.lowerType(err_union_ty)).toLlvm(&o.builder); @@ -7067,7 +7166,7 @@ pub const FuncGen = struct { const error_offset = errUnionErrorOffset(payload_ty, mod); // First set the non-error value. const non_null_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, error_offset, ""); - const store_inst = self.builder.buildStore(non_error_val, non_null_ptr); + const store_inst = self.builder.buildStore(non_error_val.toLlvm(&o.builder), non_null_ptr); store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); } // Then return the payload pointer (only if it is used). @@ -7146,7 +7245,7 @@ pub const FuncGen = struct { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return operand; } - const ok_err_code = (try o.lowerType(Type.anyerror)).toLlvm(&o.builder).constNull(); + const ok_err_code = (try o.builder.intConst(Builder.Type.err_int, 0)).toLlvm(&o.builder); const err_un_llvm_ty = (try o.lowerType(err_un_ty)).toLlvm(&o.builder); const payload_offset = errUnionPayloadOffset(payload_ty, mod); @@ -7606,7 +7705,10 @@ pub const FuncGen = struct { switch (ptr_ty.ptrSize(mod)) { .One => { // It's a pointer to an array, so according to LLVM we need an extra GEP index. - const indices: [2]*llvm.Value = .{ Builder.Type.i32.toLlvm(&o.builder).constNull(), offset }; + const indices: [2]*llvm.Value = .{ + (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), + offset, + }; return self.builder.buildInBoundsGEP(llvm_elem_ty, ptr, &indices, indices.len, ""); }, .C, .Many => { @@ -7635,7 +7737,8 @@ pub const FuncGen = struct { .One => { // It's a pointer to an array, so according to LLVM we need an extra GEP index. const indices: [2]*llvm.Value = .{ - Builder.Type.i32.toLlvm(&o.builder).constNull(), negative_offset, + (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), + negative_offset, }; return self.builder.buildInBoundsGEP(llvm_elem_ty, ptr, &indices, indices.len, ""); }, @@ -8448,7 +8551,7 @@ pub const FuncGen = struct { const ptr_ty = self.typeOfIndex(inst); const pointee_type = ptr_ty.childType(mod); if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) - return o.lowerPtrToVoid(ptr_ty); + return (try o.lowerPtrToVoid(ptr_ty)).toLlvm(&o.builder); const pointee_llvm_ty = (try o.lowerType(pointee_type)).toLlvm(&o.builder); const alignment = ptr_ty.ptrAlignment(mod); @@ -8460,7 +8563,8 @@ pub const FuncGen = struct { const mod = o.module; const ptr_ty = self.typeOfIndex(inst); const ret_ty = ptr_ty.childType(mod); - if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return o.lowerPtrToVoid(ptr_ty); + if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) + return (try o.lowerPtrToVoid(ptr_ty)).toLlvm(&o.builder); if (self.ret_ptr) |ret_ptr| return ret_ptr; const ret_llvm_ty = (try o.lowerType(ret_ty)).toLlvm(&o.builder); return self.buildAlloca(ret_llvm_ty, ptr_ty.ptrAlignment(mod)); @@ -8566,18 +8670,19 @@ pub const FuncGen = struct { _ = inst; const o = self.dg.object; const mod = o.module; - const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); + const llvm_usize = try o.lowerType(Type.usize); const target = mod.getTarget(); if (!target_util.supportsReturnAddress(target)) { // https://github.com/ziglang/zig/issues/11946 - return llvm_usize.constNull(); + return (try o.builder.intConst(llvm_usize, 0)).toLlvm(&o.builder); } - const llvm_i32 = Builder.Type.i32.toLlvm(&o.builder); const llvm_fn = try self.getIntrinsic("llvm.returnaddress", &.{}); - const params = [_]*llvm.Value{llvm_i32.constNull()}; + const params = [_]*llvm.Value{ + (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), + }; const ptr_val = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .Fast, .Auto, ""); - return self.builder.buildPtrToInt(ptr_val, llvm_usize, ""); + return self.builder.buildPtrToInt(ptr_val, llvm_usize.toLlvm(&o.builder), ""); } fn airFrameAddress(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { @@ -8589,7 +8694,9 @@ pub const FuncGen = struct { break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type.toLlvm(&o.builder)); }; - const params = [_]*llvm.Value{Builder.Type.i32.toLlvm(&o.builder).constNull()}; + const params = [_]*llvm.Value{ + (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), + }; const ptr_val = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .Fast, .Auto, ""); const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); return self.builder.buildPtrToInt(ptr_val, llvm_usize, ""); @@ -9060,10 +9167,9 @@ pub const FuncGen = struct { const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); - const llvm_i1 = Builder.Type.i1.toLlvm(&o.builder); const fn_val = try self.getIntrinsic(llvm_fn_name, &.{try o.lowerType(operand_ty)}); - const params = [_]*llvm.Value{ operand, llvm_i1.constNull() }; + const params = [_]*llvm.Value{ operand, Builder.Constant.false.toLlvm(&o.builder) }; const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); const result_ty = self.typeOfIndex(inst); const result_llvm_ty = (try o.lowerType(result_ty)).toLlvm(&o.builder); @@ -9170,11 +9276,9 @@ pub const FuncGen = struct { for (names) |name| { const err_int = @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(name).?)); - const this_tag_int_value = try o.lowerValue(.{ - .ty = Type.err_int, - .val = try mod.intValue(Type.err_int, err_int), - }); - switch_instr.addCase(this_tag_int_value, valid_block); + const this_tag_int_value = + try o.lowerValue((try mod.intValue(Type.err_int, err_int)).toIntern()); + switch_instr.addCase(this_tag_int_value.toLlvm(&o.builder), valid_block); } self.builder.positionBuilderAtEnd(valid_block); _ = self.builder.buildBr(end_block); @@ -9258,13 +9362,9 @@ pub const FuncGen = struct { for (enum_type.names, 0..) |_, field_index_usize| { const field_index = @as(u32, @intCast(field_index_usize)); - const this_tag_int_value = int: { - break :int try o.lowerValue(.{ - .ty = enum_ty, - .val = try mod.enumValueFieldIndex(enum_ty, field_index), - }); - }; - switch_instr.addCase(this_tag_int_value, named_block); + const this_tag_int_value = + try o.lowerValue((try mod.enumValueFieldIndex(enum_ty, field_index)).toIntern()); + switch_instr.addCase(this_tag_int_value.toLlvm(&o.builder), named_block); } self.builder.positionBuilderAtEnd(named_block); _ = self.builder.buildRet(Builder.Constant.true.toLlvm(&o.builder)); @@ -9371,11 +9471,9 @@ pub const FuncGen = struct { slice_global.setAlignment(slice_alignment); const return_block = self.context.appendBasicBlock(fn_val, "Name"); - const this_tag_int_value = try o.lowerValue(.{ - .ty = enum_ty, - .val = try mod.enumValueFieldIndex(enum_ty, field_index), - }); - switch_instr.addCase(this_tag_int_value, return_block); + const this_tag_int_value = + try o.lowerValue((try mod.enumValueFieldIndex(enum_ty, field_index)).toIntern()); + switch_instr.addCase(this_tag_int_value.toLlvm(&o.builder), return_block); self.builder.positionBuilderAtEnd(return_block); const loaded = self.builder.buildLoad(llvm_ret_ty, slice_global, ""); @@ -9404,7 +9502,12 @@ pub const FuncGen = struct { const fn_type = try o.builder.fnType(.i1, &.{Builder.Type.err_int}, .normal); const llvm_fn = o.llvm_module.addFunction(lt_errors_fn_name, fn_type.toLlvm(&o.builder)); + llvm_fn.setLinkage(.Internal); + llvm_fn.setFunctionCallConv(.Fast); + o.addCommonFnAttributes(llvm_fn); + var global = Builder.Global{ + .linkage = .internal, .type = fn_type, .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) }, }; @@ -9412,10 +9515,6 @@ pub const FuncGen = struct { .global = @enumFromInt(o.builder.globals.count()), }; - llvm_fn.setLinkage(.Internal); - llvm_fn.setFunctionCallConv(.Fast); - o.addCommonFnAttributes(llvm_fn); - try o.builder.llvm_globals.append(self.gpa, llvm_fn); _ = try o.builder.addGlobal(try o.builder.string(lt_errors_fn_name), global); try o.builder.functions.append(self.gpa, function); @@ -9431,7 +9530,7 @@ pub const FuncGen = struct { const error_name_table_ptr = try self.getErrorNameTable(); const ptr_slice_llvm_ty = self.context.pointerType(0); - const error_name_table = self.builder.buildLoad(ptr_slice_llvm_ty, error_name_table_ptr, ""); + const error_name_table = self.builder.buildLoad(ptr_slice_llvm_ty, error_name_table_ptr.toLlvm(&o.builder), ""); const indices = [_]*llvm.Value{operand}; const error_name_ptr = self.builder.buildInBoundsGEP(slice_llvm_ty, error_name_table, &indices, indices.len, ""); return self.builder.buildLoad(slice_llvm_ty, error_name_ptr, ""); @@ -9588,18 +9687,18 @@ pub const FuncGen = struct { .Add => switch (scalar_ty.zigTypeTag(mod)) { .Int => return self.builder.buildAddReduce(operand), .Float => if (intrinsicsAllowed(scalar_ty, target)) { - const scalar_llvm_ty = (try o.lowerType(scalar_ty)).toLlvm(&o.builder); - const neutral_value = scalar_llvm_ty.constReal(-0.0); - return self.builder.buildFPAddReduce(neutral_value, operand); + const scalar_llvm_ty = try o.lowerType(scalar_ty); + const neutral_value = try o.builder.fpConst(scalar_llvm_ty, -0.0); + return self.builder.buildFPAddReduce(neutral_value.toLlvm(&o.builder), operand); }, else => unreachable, }, .Mul => switch (scalar_ty.zigTypeTag(mod)) { .Int => return self.builder.buildMulReduce(operand), .Float => if (intrinsicsAllowed(scalar_ty, target)) { - const scalar_llvm_ty = (try o.lowerType(scalar_ty)).toLlvm(&o.builder); - const neutral_value = scalar_llvm_ty.constReal(1.0); - return self.builder.buildFPMulReduce(neutral_value, operand); + const scalar_llvm_ty = try o.lowerType(scalar_ty); + const neutral_value = try o.builder.fpConst(scalar_llvm_ty, 1.0); + return self.builder.buildFPMulReduce(neutral_value.toLlvm(&o.builder), operand); }, else => unreachable, }, @@ -9626,17 +9725,14 @@ pub const FuncGen = struct { const param_llvm_ty = try o.lowerType(scalar_ty); const libc_fn = try self.getLibcFunction(fn_name, &(.{param_llvm_ty} ** 2), param_llvm_ty); - const init_value = try o.lowerValue(.{ - .ty = scalar_ty, - .val = try mod.floatValue(scalar_ty, switch (reduce.operation) { - .Min => std.math.nan(f32), - .Max => std.math.nan(f32), - .Add => -0.0, - .Mul => 1.0, - else => unreachable, - }), - }); - return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(mod), init_value); + const init_value = try o.lowerValue((try mod.floatValue(scalar_ty, switch (reduce.operation) { + .Min => std.math.nan(f32), + .Max => std.math.nan(f32), + .Add => -0.0, + .Mul => 1.0, + else => unreachable, + })).toIntern()); + return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(mod), init_value.toLlvm(&o.builder)); } fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { @@ -10030,26 +10126,42 @@ pub const FuncGen = struct { return self.amdgcnWorkIntrinsic(dimension, 0, "llvm.amdgcn.workgroup.id"); } - fn getErrorNameTable(self: *FuncGen) !*llvm.Value { + fn getErrorNameTable(self: *FuncGen) Allocator.Error!Builder.Variable.Index { const o = self.dg.object; - if (o.error_name_table) |table| { - return table; - } + const table = o.error_name_table; + if (table != .none) return table; const mod = o.module; const slice_ty = Type.slice_const_u8_sentinel_0; const slice_alignment = slice_ty.abiAlignment(mod); - const llvm_slice_ptr_ty = self.context.pointerType(0); // TODO: Address space + const undef_init = try o.builder.undefConst(.ptr); // TODO: Address space - const error_name_table_global = o.llvm_module.addGlobal(llvm_slice_ptr_ty, "__zig_err_name_table"); - error_name_table_global.setInitializer(llvm_slice_ptr_ty.getUndef()); + const name = try o.builder.string("__zig_err_name_table"); + const error_name_table_global = o.llvm_module.addGlobal(Builder.Type.ptr.toLlvm(&o.builder), name.toSlice(&o.builder).?); + error_name_table_global.setInitializer(undef_init.toLlvm(&o.builder)); error_name_table_global.setLinkage(.Private); error_name_table_global.setGlobalConstant(.True); error_name_table_global.setUnnamedAddr(.True); error_name_table_global.setAlignment(slice_alignment); - o.error_name_table = error_name_table_global; - return error_name_table_global; + var global = Builder.Global{ + .linkage = .private, + .unnamed_addr = .unnamed_addr, + .type = .ptr, + .alignment = Builder.Alignment.fromByteUnits(slice_alignment), + .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, + }; + var variable = Builder.Variable{ + .global = @enumFromInt(o.builder.globals.count()), + .mutability = .constant, + .init = undef_init, + }; + try o.builder.llvm_globals.append(o.gpa, error_name_table_global); + _ = try o.builder.addGlobal(name, global); + try o.builder.variables.append(o.gpa, variable); + + o.error_name_table = global.kind.variable; + return global.kind.variable; } /// Assumes the optional is not pointer-like and payload has bits. @@ -10273,14 +10385,14 @@ pub const FuncGen = struct { return llvm_inst; } - const int_elem_ty = (try o.builder.intType(@intCast(info.packed_offset.host_size * 8))).toLlvm(&o.builder); - const containing_int = self.builder.buildLoad(int_elem_ty, ptr, ""); + const containing_int_ty = try o.builder.intType(@intCast(info.packed_offset.host_size * 8)); + const containing_int = self.builder.buildLoad(containing_int_ty.toLlvm(&o.builder), ptr, ""); containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); const elem_bits = @as(c_uint, @intCast(ptr_ty.childType(mod).bitSize(mod))); - const shift_amt = containing_int.typeOf().constInt(info.packed_offset.bit_offset, .False); - const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); + const shift_amt = try o.builder.intConst(containing_int_ty, info.packed_offset.bit_offset); + const shifted_value = self.builder.buildLShr(containing_int, shift_amt.toLlvm(&o.builder), ""); const elem_llvm_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); if (isByRef(elem_ty, mod)) { @@ -10346,30 +10458,29 @@ pub const FuncGen = struct { } if (info.packed_offset.host_size != 0) { - const int_elem_ty = (try o.builder.intType(@intCast(info.packed_offset.host_size * 8))).toLlvm(&o.builder); - const containing_int = self.builder.buildLoad(int_elem_ty, ptr, ""); + const containing_int_ty = try o.builder.intType(@intCast(info.packed_offset.host_size * 8)); + const containing_int = self.builder.buildLoad(containing_int_ty.toLlvm(&o.builder), ptr, ""); assert(ordering == .NotAtomic); containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); const elem_bits = @as(c_uint, @intCast(ptr_ty.childType(mod).bitSize(mod))); - const containing_int_ty = containing_int.typeOf(); - const shift_amt = containing_int_ty.constInt(info.packed_offset.bit_offset, .False); + const shift_amt = try o.builder.intConst(containing_int_ty, info.packed_offset.bit_offset); // Convert to equally-sized integer type in order to perform the bit // operations on the value to store - const value_bits_type = (try o.builder.intType(@intCast(elem_bits))).toLlvm(&o.builder); + const value_bits_type = try o.builder.intType(@intCast(elem_bits)); const value_bits = if (elem_ty.isPtrAtRuntime(mod)) - self.builder.buildPtrToInt(elem, value_bits_type, "") + self.builder.buildPtrToInt(elem, value_bits_type.toLlvm(&o.builder), "") else - self.builder.buildBitCast(elem, value_bits_type, ""); + self.builder.buildBitCast(elem, value_bits_type.toLlvm(&o.builder), ""); - var mask_val = value_bits_type.constAllOnes(); - mask_val = mask_val.constZExt(containing_int_ty); - mask_val = mask_val.constShl(shift_amt); + var mask_val = (try o.builder.intConst(value_bits_type, -1)).toLlvm(&o.builder); + mask_val = mask_val.constZExt(containing_int_ty.toLlvm(&o.builder)); + mask_val = mask_val.constShl(shift_amt.toLlvm(&o.builder)); mask_val = mask_val.constNot(); const anded_containing_int = self.builder.buildAnd(containing_int, mask_val, ""); - const extended_value = self.builder.buildZExt(value_bits, containing_int_ty, ""); - const shifted_value = self.builder.buildShl(extended_value, shift_amt, ""); + const extended_value = self.builder.buildZExt(value_bits, containing_int_ty.toLlvm(&o.builder), ""); + const shifted_value = self.builder.buildShl(extended_value, shift_amt.toLlvm(&o.builder), ""); const ored_value = self.builder.buildOr(shifted_value, anded_containing_int, ""); const store_inst = self.builder.buildStore(ored_value, ptr); diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 95af18e726..1381e5d9d0 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -27,7 +27,7 @@ globals: std.AutoArrayHashMapUnmanaged(String, Global) = .{}, next_unnamed_global: String = @enumFromInt(0), next_unique_global_id: std.AutoHashMapUnmanaged(String, u32) = .{}, aliases: std.ArrayListUnmanaged(Alias) = .{}, -objects: std.ArrayListUnmanaged(Object) = .{}, +variables: std.ArrayListUnmanaged(Variable) = .{}, functions: std.ArrayListUnmanaged(Function) = .{}, constant_map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, @@ -35,10 +35,12 @@ constant_items: std.MultiArrayList(Constant.Item) = .{}, constant_extra: std.ArrayListUnmanaged(u32) = .{}, constant_limbs: std.ArrayListUnmanaged(std.math.big.Limb) = .{}, +pub const expected_fields_len = 32; +pub const expected_gep_indices_len = 8; + pub const String = enum(u32) { none = std.math.maxInt(u31), empty, - debugme, _, pub fn toSlice(self: String, b: *const Builder) ?[:0]const u8 { @@ -58,22 +60,23 @@ pub const String = enum(u32) { _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { + if (comptime std.mem.indexOfNone(u8, fmt_str, "@\"")) |_| + @compileError("invalid format string: '" ++ fmt_str ++ "'"); assert(data.string != .none); const slice = data.string.toSlice(data.builder) orelse return writer.print("{d}", .{@intFromEnum(data.string)}); - const need_quotes = if (comptime std.mem.eql(u8, fmt_str, "")) - !isValidIdentifier(slice) - else if (comptime std.mem.eql(u8, fmt_str, "\"")) - true - else - @compileError("invalid format string: '" ++ fmt_str ++ "'"); - if (need_quotes) try writer.writeByte('\"'); - for (slice) |character| switch (character) { + const full_slice = slice[0 .. slice.len + comptime @intFromBool( + std.mem.indexOfScalar(u8, fmt_str, '@') != null, + )]; + const need_quotes = (comptime std.mem.indexOfScalar(u8, fmt_str, '"') != null) or + !isValidIdentifier(full_slice); + if (need_quotes) try writer.writeByte('"'); + for (full_slice) |character| switch (character) { '\\' => try writer.writeAll("\\\\"), ' '...'"' - 1, '"' + 1...'\\' - 1, '\\' + 1...'~' => try writer.writeByte(character), else => try writer.print("\\{X:0>2}", .{character}), }; - if (need_quotes) try writer.writeByte('\"'); + if (need_quotes) try writer.writeByte('"'); } pub fn fmt(self: String, builder: *const Builder) std.fmt.Formatter(format) { return .{ .data = .{ .string = self, .builder = builder } }; @@ -92,8 +95,8 @@ pub const String = enum(u32) { pub fn hash(_: Adapter, key: []const u8) u32 { return @truncate(std.hash.Wyhash.hash(0, key)); } - pub fn eql(ctx: Adapter, lhs: []const u8, _: void, rhs_index: usize) bool { - return std.mem.eql(u8, lhs, String.fromIndex(rhs_index).toSlice(ctx.builder).?); + pub fn eql(ctx: Adapter, lhs_key: []const u8, _: void, rhs_index: usize) bool { + return std.mem.eql(u8, lhs_key, String.fromIndex(rhs_index).toSlice(ctx.builder).?); } }; }; @@ -204,9 +207,167 @@ pub const Type = enum(u32) { pub const Item = packed struct(u32) { tag: Tag, data: ExtraIndex, + + pub const ExtraIndex = u28; }; - pub const ExtraIndex = u28; + pub fn tag(self: Type, builder: *const Builder) Tag { + return builder.type_items.items[@intFromEnum(self)].tag; + } + + pub fn unnamedTag(self: Type, builder: *const Builder) Tag { + const item = builder.type_items.items[@intFromEnum(self)]; + return switch (item.tag) { + .named_structure => builder.typeExtraData(Type.NamedStructure, item.data).body + .unnamedTag(builder), + else => item.tag, + }; + } + + pub fn scalarTag(self: Type, builder: *const Builder) Tag { + const item = builder.type_items.items[@intFromEnum(self)]; + return switch (item.tag) { + .vector, .scalable_vector => builder.typeExtraData(Type.Vector, item.data) + .child.tag(builder), + else => item.tag, + }; + } + + pub fn isFn(self: Type, builder: *const Builder) bool { + return switch (self.tag(builder)) { + .function, .vararg_function => true, + else => false, + }; + } + + pub fn fnKind(self: Type, builder: *const Builder) Type.Function.Kind { + return switch (self.tag(builder)) { + .function => .normal, + .vararg_function => .vararg, + else => unreachable, + }; + } + + pub fn isVector(self: Type, builder: *const Builder) bool { + return switch (self.tag(builder)) { + .vector, .scalable_vector => true, + else => false, + }; + } + + pub fn vectorKind(self: Type, builder: *const Builder) Type.Vector.Kind { + return switch (self.tag(builder)) { + .vector => .normal, + .scalable_vector => .scalable, + else => unreachable, + }; + } + + pub fn isStruct(self: Type, builder: *const Builder) bool { + return switch (self.tag(builder)) { + .structure, .packed_structure, .named_structure => true, + else => false, + }; + } + + pub fn structKind(self: Type, builder: *const Builder) Type.Structure.Kind { + return switch (self.unnamedTag(builder)) { + .structure => .normal, + .packed_structure => .@"packed", + else => unreachable, + }; + } + + pub fn scalarBits(self: Type, builder: *const Builder) u24 { + return switch (self) { + .void, .label, .token, .metadata, .none, .x86_amx => unreachable, + .i1 => 1, + .i8 => 8, + .half, .bfloat, .i16 => 16, + .i29 => 29, + .float, .i32 => 32, + .double, .i64, .x86_mmx => 64, + .x86_fp80, .i80 => 80, + .fp128, .ppc_fp128, .i128 => 128, + .ptr => @panic("TODO: query data layout"), + _ => { + const item = builder.type_items.items[@intFromEnum(self)]; + return switch (item.tag) { + .simple, + .function, + .vararg_function, + => unreachable, + .integer => @intCast(item.data), + .pointer => @panic("TODO: query data layout"), + .target => unreachable, + .vector, + .scalable_vector, + => builder.typeExtraData(Type.Vector, item.data).child.scalarBits(builder), + .small_array, + .array, + .structure, + .packed_structure, + .named_structure, + => unreachable, + }; + }, + }; + } + + pub fn childType(self: Type, builder: *const Builder) Type { + const item = builder.type_items.items[@intFromEnum(self)]; + return switch (item.tag) { + .vector, + .scalable_vector, + .small_array, + => builder.typeExtraData(Type.Vector, item.data).child, + .array => builder.typeExtraData(Type.Array, item.data).child, + .named_structure => builder.typeExtraData(Type.NamedStructure, item.data).body, + else => unreachable, + }; + } + + pub fn vectorLen(self: Type, builder: *const Builder) u32 { + const item = builder.type_items.items[@intFromEnum(self)]; + return switch (item.tag) { + .vector, + .scalable_vector, + => builder.typeExtraData(Type.Vector, item.data).len, + else => unreachable, + }; + } + + pub fn aggregateLen(self: Type, builder: *const Builder) u64 { + const item = builder.type_items.items[@intFromEnum(self)]; + return switch (item.tag) { + .vector, + .scalable_vector, + .small_array, + => builder.typeExtraData(Type.Vector, item.data).len, + .array => builder.typeExtraData(Type.Array, item.data).len(), + .structure, + .packed_structure, + => builder.typeExtraData(Type.Structure, item.data).fields_len, + .named_structure => builder.typeExtraData(Type.NamedStructure, item.data).body + .aggregateLen(builder), + else => unreachable, + }; + } + + pub fn structFields(self: Type, builder: *const Builder) []const Type { + const item = builder.type_items.items[@intFromEnum(self)]; + switch (item.tag) { + .structure, + .packed_structure, + => { + const extra = builder.typeExtraDataTrail(Type.Structure, item.data); + return @ptrCast(builder.type_extra.items[extra.end..][0..extra.data.fields_len]); + }, + .named_structure => return builder.typeExtraData(Type.NamedStructure, item.data).body + .structFields(builder), + else => unreachable, + } + } pub const FormatData = struct { type: Type, @@ -220,11 +381,11 @@ pub const Type = enum(u32) { ) @TypeOf(writer).Error!void { assert(data.type != .none); if (std.enums.tagName(Type, data.type)) |name| return writer.writeAll(name); - const type_item = data.builder.type_items.items[@intFromEnum(data.type)]; - switch (type_item.tag) { + const item = data.builder.type_items.items[@intFromEnum(data.type)]; + switch (item.tag) { .simple => unreachable, .function, .vararg_function => { - const extra = data.builder.typeExtraDataTrail(Type.Function, type_item.data); + const extra = data.builder.typeExtraDataTrail(Type.Function, item.data); const params: []const Type = @ptrCast(data.builder.type_extra.items[extra.end..][0..extra.data.params_len]); if (!comptime std.mem.eql(u8, fmt_str, ">")) @@ -235,7 +396,7 @@ pub const Type = enum(u32) { if (index > 0) try writer.writeAll(", "); try writer.print("{%}", .{param.fmt(data.builder)}); } - switch (type_item.tag) { + switch (item.tag) { .function => {}, .vararg_function => { if (params.len > 0) try writer.writeAll(", "); @@ -246,10 +407,10 @@ pub const Type = enum(u32) { try writer.writeByte(')'); } }, - .integer => try writer.print("i{d}", .{type_item.data}), - .pointer => try writer.print("ptr{}", .{@as(AddrSpace, @enumFromInt(type_item.data))}), + .integer => try writer.print("i{d}", .{item.data}), + .pointer => try writer.print("ptr{}", .{@as(AddrSpace, @enumFromInt(item.data))}), .target => { - const extra = data.builder.typeExtraDataTrail(Type.Target, type_item.data); + const extra = data.builder.typeExtraDataTrail(Type.Target, item.data); const types: []const Type = @ptrCast(data.builder.type_extra.items[extra.end..][0..extra.data.types_len]); const ints: []const u32 = @ptrCast(data.builder.type_extra.items[extra.end + @@ -262,26 +423,28 @@ pub const Type = enum(u32) { try writer.writeByte(')'); }, .vector => { - const extra = data.builder.typeExtraData(Type.Vector, type_item.data); + const extra = data.builder.typeExtraData(Type.Vector, item.data); try writer.print("<{d} x {%}>", .{ extra.len, extra.child.fmt(data.builder) }); }, .scalable_vector => { - const extra = data.builder.typeExtraData(Type.Vector, type_item.data); + const extra = data.builder.typeExtraData(Type.Vector, item.data); try writer.print("", .{ extra.len, extra.child.fmt(data.builder) }); }, .small_array => { - const extra = data.builder.typeExtraData(Type.Vector, type_item.data); + const extra = data.builder.typeExtraData(Type.Vector, item.data); try writer.print("[{d} x {%}]", .{ extra.len, extra.child.fmt(data.builder) }); }, .array => { - const extra = data.builder.typeExtraData(Type.Array, type_item.data); + const extra = data.builder.typeExtraData(Type.Array, item.data); try writer.print("[{d} x {%}]", .{ extra.len(), extra.child.fmt(data.builder) }); }, - .structure, .packed_structure => { - const extra = data.builder.typeExtraDataTrail(Type.Structure, type_item.data); + .structure, + .packed_structure, + => { + const extra = data.builder.typeExtraDataTrail(Type.Structure, item.data); const fields: []const Type = @ptrCast(data.builder.type_extra.items[extra.end..][0..extra.data.fields_len]); - switch (type_item.tag) { + switch (item.tag) { .structure => {}, .packed_structure => try writer.writeByte('<'), else => unreachable, @@ -292,14 +455,14 @@ pub const Type = enum(u32) { try writer.print("{%}", .{field.fmt(data.builder)}); } try writer.writeAll(" }"); - switch (type_item.tag) { + switch (item.tag) { .structure => {}, .packed_structure => try writer.writeByte('>'), else => unreachable, } }, .named_structure => { - const extra = data.builder.typeExtraData(Type.NamedStructure, type_item.data); + const extra = data.builder.typeExtraData(Type.NamedStructure, item.data); if (comptime std.mem.eql(u8, fmt_str, "%")) try writer.print("%{}", .{ extra.id.fmt(data.builder), }) else switch (extra.body) { @@ -323,7 +486,7 @@ pub const Type = enum(u32) { }; pub const Linkage = enum { - default, + external, private, internal, available_externally, @@ -334,7 +497,6 @@ pub const Linkage = enum { extern_weak, linkonce_odr, weak_odr, - external, pub fn format( self: Linkage, @@ -342,14 +504,14 @@ pub const Linkage = enum { _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - if (self != .default) try writer.print(" {s}", .{@tagName(self)}); + if (self != .external) try writer.print(" {s}", .{@tagName(self)}); } }; pub const Preemption = enum { - default, dso_preemptable, dso_local, + implicit_dso_local, pub fn format( self: Preemption, @@ -357,7 +519,7 @@ pub const Preemption = enum { _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - if (self != .default) try writer.print(" {s}", .{@tagName(self)}); + if (self == .dso_local) try writer.print(" {s}", .{@tagName(self)}); } }; @@ -554,22 +716,25 @@ pub const Alignment = enum(u6) { }; pub const Global = struct { - linkage: Linkage = .default, - preemption: Preemption = .default, + linkage: Linkage = .external, + preemption: Preemption = .dso_preemptable, visibility: Visibility = .default, dll_storage_class: DllStorageClass = .default, unnamed_addr: UnnamedAddr = .default, addr_space: AddrSpace = .default, externally_initialized: ExternallyInitialized = .default, type: Type, + section: String = .none, + partition: String = .none, alignment: Alignment = .default, kind: union(enum) { alias: Alias.Index, - object: Object.Index, + variable: Variable.Index, function: Function.Index, }, pub const Index = enum(u32) { + none = std.math.maxInt(u32), _, pub fn ptr(self: Index, builder: *Builder) *Global { @@ -580,11 +745,33 @@ pub const Global = struct { return &builder.globals.values()[@intFromEnum(self)]; } + pub fn toConst(self: Index) Constant { + return @enumFromInt(@intFromEnum(Constant.first_global) + @intFromEnum(self)); + } + pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value { assert(builder.useLibLlvm()); return builder.llvm_globals.items[@intFromEnum(self)]; } + const FormatData = struct { + global: Index, + builder: *const Builder, + }; + fn format( + data: FormatData, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + try writer.print("@{}", .{ + data.builder.globals.keys()[@intFromEnum(data.global)].fmt(data.builder), + }); + } + pub fn fmt(self: Index, builder: *const Builder) std.fmt.Formatter(format) { + return .{ .data = .{ .global = self, .builder = builder } }; + } + pub fn rename(self: Index, builder: *Builder, name: String) Allocator.Error!void { try builder.ensureUnusedCapacityGlobal(name); self.renameAssumeCapacity(builder, name); @@ -618,12 +805,32 @@ pub const Global = struct { builder.llvm_globals.items[index].setValueName2(slice.ptr, slice.len); } }; + + pub fn updateAttributes(self: *Global) void { + switch (self.linkage) { + .private, .internal => { + self.visibility = .default; + self.dll_storage_class = .default; + self.preemption = .implicit_dso_local; + }, + .extern_weak => if (self.preemption == .implicit_dso_local) { + self.preemption = .dso_local; + }, + else => switch (self.visibility) { + .default => if (self.preemption == .implicit_dso_local) { + self.preemption = .dso_local; + }, + else => self.preemption = .implicit_dso_local, + }, + } + } }; pub const Alias = struct { global: Global.Index, pub const Index = enum(u32) { + none = std.math.maxInt(u32), _, pub fn ptr(self: Index, builder: *Builder) *Alias { @@ -640,21 +847,22 @@ pub const Alias = struct { }; }; -pub const Object = struct { +pub const Variable = struct { global: Global.Index, thread_local: ThreadLocal = .default, mutability: enum { global, constant } = .global, init: Constant = .no_init, pub const Index = enum(u32) { + none = std.math.maxInt(u32), _, - pub fn ptr(self: Index, builder: *Builder) *Object { - return &builder.objects.items[@intFromEnum(self)]; + pub fn ptr(self: Index, builder: *Builder) *Variable { + return &builder.variables.items[@intFromEnum(self)]; } - pub fn ptrConst(self: Index, builder: *const Builder) *const Object { - return &builder.objects.items[@intFromEnum(self)]; + pub fn ptrConst(self: Index, builder: *const Builder) *const Variable { + return &builder.variables.items[@intFromEnum(self)]; } pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value { @@ -670,6 +878,7 @@ pub const Function = struct { blocks: std.ArrayListUnmanaged(Block) = .{}, pub const Index = enum(u32) { + none = std.math.maxInt(u32), _, pub fn ptr(self: Index, builder: *Builder) *Function { @@ -693,13 +902,13 @@ pub const Function = struct { block, }; - pub const Index = enum(u31) { _ }; + pub const Index = enum(u32) { _ }; }; pub const Block = struct { body: std.ArrayListUnmanaged(Instruction.Index) = .{}, - pub const Index = enum(u31) { _ }; + pub const Index = enum(u32) { _ }; }; pub fn deinit(self: *Function, gpa: Allocator) void { @@ -709,6 +918,36 @@ pub const Function = struct { } }; +pub const FloatCondition = enum(u4) { + oeq = 1, + ogt = 2, + oge = 3, + olt = 4, + ole = 5, + one = 6, + ord = 7, + uno = 8, + ueq = 9, + ugt = 10, + uge = 11, + ult = 12, + ule = 13, + une = 14, +}; + +pub const IntegerCondition = enum(u6) { + eq = 32, + ne = 33, + ugt = 34, + uge = 35, + ult = 36, + ule = 37, + sgt = 38, + sge = 39, + slt = 40, + sle = 41, +}; + pub const Constant = enum(u32) { false, true, @@ -719,15 +958,24 @@ pub const Constant = enum(u32) { const first_global: Constant = @enumFromInt(1 << 30); pub const Tag = enum(u6) { - integer_positive, - integer_negative, + positive_integer, + negative_integer, + half, + bfloat, + float, + double, + fp128, + x86_fp80, + ppc_fp128, null, none, structure, + packed_structure, array, + string, + string_null, vector, zeroinitializer, - global, undef, poison, blockaddress, @@ -747,6 +995,7 @@ pub const Constant = enum(u32) { bitcast, addrspacecast, getelementptr, + @"getelementptr inbounds", icmp, fcmp, extractelement, @@ -765,7 +1014,9 @@ pub const Constant = enum(u32) { pub const Item = struct { tag: Tag, - data: u32, + data: ExtraIndex, + + const ExtraIndex = u32; }; pub const Integer = packed struct(u64) { @@ -775,6 +1026,80 @@ pub const Constant = enum(u32) { pub const limbs = @divExact(@bitSizeOf(Integer), @bitSizeOf(std.math.big.Limb)); }; + pub const Double = struct { + lo: u32, + hi: u32, + }; + + pub const Fp80 = struct { + lo_lo: u32, + lo_hi: u32, + hi: u32, + }; + + pub const Fp128 = struct { + lo_lo: u32, + lo_hi: u32, + hi_lo: u32, + hi_hi: u32, + }; + + pub const Aggregate = struct { + type: Type, + }; + + pub const BlockAddress = extern struct { + function: Function.Index, + block: Function.Block.Index, + }; + + pub const FunctionReference = struct { + function: Function.Index, + }; + + pub const Cast = extern struct { + arg: Constant, + type: Type, + + pub const Signedness = enum { unsigned, signed, unneeded }; + }; + + pub const GetElementPtr = struct { + type: Type, + base: Constant, + indices_len: u32, + + pub const Kind = enum { normal, inbounds }; + }; + + pub const Compare = struct { + cond: u32, + lhs: Constant, + rhs: Constant, + }; + + pub const ExtractElement = struct { + arg: Constant, + index: Constant, + }; + + pub const InsertElement = struct { + arg: Constant, + elem: Constant, + index: Constant, + }; + + pub const ShuffleVector = struct { + lhs: Constant, + rhs: Constant, + mask: Constant, + }; + + pub const Binary = extern struct { + lhs: Constant, + rhs: Constant, + }; + pub fn unwrap(self: Constant) union(enum) { constant: u30, global: Global.Index, @@ -785,6 +1110,307 @@ pub const Constant = enum(u32) { .{ .global = @enumFromInt(@intFromEnum(self) - @intFromEnum(first_global)) }; } + pub fn typeOf(self: Constant, builder: *Builder) Type { + switch (self.unwrap()) { + .constant => |constant| { + const item = builder.constant_items.get(constant); + return switch (item.tag) { + .positive_integer, + .negative_integer, + => @as( + *align(@alignOf(std.math.big.Limb)) Integer, + @ptrCast(builder.constant_limbs.items[item.data..][0..Integer.limbs]), + ).type, + .half => .half, + .bfloat => .bfloat, + .float => .float, + .double => .double, + .fp128 => .fp128, + .x86_fp80 => .x86_fp80, + .ppc_fp128 => .ppc_fp128, + .null, + .none, + .zeroinitializer, + .undef, + .poison, + => @enumFromInt(item.data), + .structure, + .packed_structure, + .array, + .vector, + => builder.constantExtraData(Aggregate, item.data).type, + .string, + .string_null, + => builder.arrayTypeAssumeCapacity( + @as(String, @enumFromInt(item.data)).toSlice(builder).?.len + + @intFromBool(item.tag == .string_null), + .i8, + ), + .blockaddress => builder.ptrTypeAssumeCapacity( + builder.constantExtraData(BlockAddress, item.data) + .function.ptrConst(builder).global.ptrConst(builder).addr_space, + ), + .dso_local_equivalent, + .no_cfi, + => builder.ptrTypeAssumeCapacity( + builder.constantExtraData(FunctionReference, item.data) + .function.ptrConst(builder).global.ptrConst(builder).addr_space, + ), + .trunc, + .zext, + .sext, + .fptrunc, + .fpext, + .fptoui, + .fptosi, + .uitofp, + .sitofp, + .ptrtoint, + .inttoptr, + .bitcast, + .addrspacecast, + => builder.constantExtraData(Cast, item.data).type, + .getelementptr, + .@"getelementptr inbounds", + => { + const extra = builder.constantExtraDataTrail(GetElementPtr, item.data); + const indices: []const Constant = @ptrCast(builder.constant_extra + .items[extra.end..][0..extra.data.indices_len]); + const base_ty = extra.data.base.typeOf(builder); + if (!base_ty.isVector(builder)) for (indices) |index| { + const index_ty = index.typeOf(builder); + if (!index_ty.isVector(builder)) continue; + switch (index_ty.vectorKind(builder)) { + inline else => |kind| return builder.vectorTypeAssumeCapacity( + kind, + index_ty.vectorLen(builder), + base_ty, + ), + } + }; + return base_ty; + }, + .icmp, .fcmp => { + const ty = builder.constantExtraData(Compare, item.data).lhs.typeOf(builder); + return switch (ty) { + .half, + .bfloat, + .float, + .double, + .fp128, + .x86_fp80, + .ppc_fp128, + .i1, + .i8, + .i16, + .i29, + .i32, + .i64, + .i80, + .i128, + => ty, + else => if (ty.isVector(builder)) switch (ty.vectorKind(builder)) { + inline else => |kind| builder + .vectorTypeAssumeCapacity(kind, ty.vectorLen(builder), .i1), + } else ty, + }; + }, + .extractelement => builder.constantExtraData(ExtractElement, item.data) + .arg.typeOf(builder).childType(builder), + .insertelement => builder.constantExtraData(InsertElement, item.data) + .arg.typeOf(builder), + .shufflevector => { + const extra = builder.constantExtraData(ShuffleVector, item.data); + const ty = extra.lhs.typeOf(builder); + return switch (ty.vectorKind(builder)) { + inline else => |kind| builder.vectorTypeAssumeCapacity( + kind, + extra.mask.typeOf(builder).vectorLen(builder), + ty.childType(builder), + ), + }; + }, + .add, + .sub, + .mul, + .shl, + .lshr, + .ashr, + .@"and", + .@"or", + .xor, + => builder.constantExtraData(Binary, item.data).lhs.typeOf(builder), + }; + }, + .global => |global| return builder.ptrTypeAssumeCapacity( + global.ptrConst(builder).addr_space, + ), + } + } + + pub fn isZeroInit(self: Constant, builder: *const Builder) bool { + switch (self.unwrap()) { + .constant => |constant| { + const item = builder.constant_items.get(constant); + return switch (item.tag) { + .positive_integer => { + const extra: *align(@alignOf(std.math.big.Limb)) Integer = + @ptrCast(builder.constant_limbs.items[item.data..][0..Integer.limbs]); + const limbs = builder.constant_limbs + .items[item.data + Integer.limbs ..][0..extra.limbs_len]; + return std.mem.eql(std.math.big.Limb, limbs, &.{0}); + }, + .half, .bfloat, .float => item.data == 0, + .double => { + const extra = builder.constantExtraData(Constant.Double, item.data); + return extra.lo == 0 and extra.hi == 0; + }, + .fp128, .ppc_fp128 => { + const extra = builder.constantExtraData(Constant.Fp128, item.data); + return extra.lo_lo == 0 and extra.lo_hi == 0 and + extra.hi_lo == 0 and extra.hi_hi == 0; + }, + .x86_fp80 => { + const extra = builder.constantExtraData(Constant.Fp80, item.data); + return extra.lo_lo == 0 and extra.lo_hi == 0 and extra.hi == 0; + }, + .vector => { + const extra = builder.constantExtraDataTrail(Aggregate, item.data); + const len = extra.data.type.aggregateLen(builder); + const vals: []const Constant = + @ptrCast(builder.constant_extra.items[extra.end..][0..len]); + for (vals) |val| if (!val.isZeroInit(builder)) return false; + return true; + }, + .null, .zeroinitializer => true, + else => false, + }; + }, + .global => return false, + } + } + + pub const FormatData = struct { + constant: Constant, + builder: *Builder, + }; + fn format( + data: FormatData, + comptime fmt_str: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + if (comptime std.mem.eql(u8, fmt_str, "%")) { + try writer.print("{%} ", .{data.constant.typeOf(data.builder).fmt(data.builder)}); + } else if (comptime std.mem.eql(u8, fmt_str, " ")) { + if (data.constant == .no_init) return; + try writer.writeByte(' '); + } + assert(data.constant != .no_init); + if (std.enums.tagName(Constant, data.constant)) |name| return writer.writeAll(name); + switch (data.constant.unwrap()) { + .constant => |constant| { + const item = data.builder.constant_items.get(constant); + switch (item.tag) { + .positive_integer, + .negative_integer, + => { + const extra: *align(@alignOf(std.math.big.Limb)) Integer = + @ptrCast(data.builder.constant_limbs.items[item.data..][0..Integer.limbs]); + const limbs = data.builder.constant_limbs + .items[item.data + Integer.limbs ..][0..extra.limbs_len]; + const bigint = std.math.big.int.Const{ + .limbs = limbs, + .positive = item.tag == .positive_integer, + }; + const ExpectedContents = extern struct { + string: [(64 * 8 / std.math.log2(10)) + 2]u8, + limbs: [ + std.math.big.int.calcToStringLimbsBufferLen( + 64 / @sizeOf(std.math.big.Limb), + 10, + ) + ]std.math.big.Limb, + }; + var stack align(@alignOf(ExpectedContents)) = + std.heap.stackFallback(@sizeOf(ExpectedContents), data.builder.gpa); + const allocator = stack.get(); + const str = bigint.toStringAlloc(allocator, 10, undefined) catch + return writer.writeAll("..."); + defer allocator.free(str); + try writer.writeAll(str); + }, + .null, + .none, + .zeroinitializer, + .undef, + .poison, + => try writer.writeAll(@tagName(item.tag)), + .structure, + .packed_structure, + .array, + .vector, + => { + const extra = data.builder.constantExtraDataTrail(Aggregate, item.data); + const len = extra.data.type.aggregateLen(data.builder); + const vals: []const Constant = + @ptrCast(data.builder.constant_extra.items[extra.end..][0..len]); + + try writer.writeAll(switch (item.tag) { + .structure => "{ ", + .packed_structure => "<{ ", + .array => "[", + .vector => "<", + else => unreachable, + }); + for (vals, 0..) |val, index| { + if (index > 0) try writer.writeAll(", "); + try writer.print("{%}", .{val.fmt(data.builder)}); + } + try writer.writeAll(switch (item.tag) { + .structure => " }", + .packed_structure => " }>", + .array => "]", + .vector => ">", + else => unreachable, + }); + }, + .string => try writer.print( + \\c{"} + , .{@as(String, @enumFromInt(item.data)).fmt(data.builder)}), + .string_null => try writer.print( + \\c{"@} + , .{@as(String, @enumFromInt(item.data)).fmt(data.builder)}), + .blockaddress => { + const extra = data.builder.constantExtraData(BlockAddress, item.data); + const function = extra.function.ptrConst(data.builder); + try writer.print("{s}({}, %{d})", .{ + @tagName(item.tag), + function.global.fmt(data.builder), + @intFromEnum(extra.block), // TODO + }); + }, + .dso_local_equivalent, + .no_cfi, + => { + const extra = data.builder.constantExtraData(FunctionReference, item.data); + try writer.print("{s} {}", .{ + @tagName(item.tag), + extra.function.ptrConst(data.builder).global.fmt(data.builder), + }); + }, + else => try writer.print("<{s}:0x{X}>", .{ + @tagName(item.tag), @intFromEnum(data.constant), + }), + } + }, + .global => |global| try writer.print("{}", .{global.fmt(data.builder)}), + } + } + pub fn fmt(self: Constant, builder: *Builder) std.fmt.Formatter(format) { + return .{ .data = .{ .constant = self, .builder = builder } }; + } + pub fn toLlvm(self: Constant, builder: *const Builder) *llvm.Value { assert(builder.useLibLlvm()); return switch (self.unwrap()) { @@ -813,7 +1439,6 @@ pub const Value = enum(u32) { pub fn init(self: *Builder) Allocator.Error!void { try self.string_indices.append(self.gpa, 0); assert(try self.string("") == .empty); - assert(try self.string("debugme") == .debugme); { const static_len = @typeInfo(Type).Enum.fields.len - 1; @@ -821,10 +1446,9 @@ pub fn init(self: *Builder) Allocator.Error!void { try self.type_items.ensureTotalCapacity(self.gpa, static_len); if (self.useLibLlvm()) try self.llvm_types.ensureTotalCapacity(self.gpa, static_len); inline for (@typeInfo(Type.Simple).Enum.fields) |simple_field| { - const result = self.typeNoExtraAssumeCapacity(.{ - .tag = .simple, - .data = simple_field.value, - }); + const result = self.getOrPutTypeNoExtraAssumeCapacity( + .{ .tag = .simple, .data = simple_field.value }, + ); assert(result.new and result.type == @field(Type, simple_field.name)); if (self.useLibLlvm()) self.llvm_types.appendAssumeCapacity( @field(llvm.Context, simple_field.name ++ "Type")(self.llvm_context), @@ -838,6 +1462,7 @@ pub fn init(self: *Builder) Allocator.Error!void { assert(try self.intConst(.i1, 0) == .false); assert(try self.intConst(.i1, 1) == .true); + assert(try self.noneConst(.token) == .none); } pub fn deinit(self: *Builder) void { @@ -858,7 +1483,7 @@ pub fn deinit(self: *Builder) void { self.globals.deinit(self.gpa); self.next_unique_global_id.deinit(self.gpa); self.aliases.deinit(self.gpa); - self.objects.deinit(self.gpa); + self.variables.deinit(self.gpa); for (self.functions.items) |*function| function.deinit(self.gpa); self.functions.deinit(self.gpa); @@ -1110,19 +1735,19 @@ pub fn fnType( params: []const Type, kind: Type.Function.Kind, ) Allocator.Error!Type { - try self.ensureUnusedCapacityTypes(1, Type.Function, params.len); + try self.ensureUnusedTypeCapacity(1, Type.Function, params.len); return switch (kind) { inline else => |comptime_kind| self.fnTypeAssumeCapacity(ret, params, comptime_kind), }; } pub fn intType(self: *Builder, bits: u24) Allocator.Error!Type { - try self.ensureUnusedCapacityTypes(1, null, 0); + try self.ensureUnusedTypeCapacity(1, null, 0); return self.intTypeAssumeCapacity(bits); } pub fn ptrType(self: *Builder, addr_space: AddrSpace) Allocator.Error!Type { - try self.ensureUnusedCapacityTypes(1, null, 0); + try self.ensureUnusedTypeCapacity(1, null, 0); return self.ptrTypeAssumeCapacity(addr_space); } @@ -1132,7 +1757,7 @@ pub fn vectorType( len: u32, child: Type, ) Allocator.Error!Type { - try self.ensureUnusedCapacityTypes(1, Type.Vector, 0); + try self.ensureUnusedTypeCapacity(1, Type.Vector, 0); return switch (kind) { inline else => |comptime_kind| self.vectorTypeAssumeCapacity(comptime_kind, len, child), }; @@ -1140,7 +1765,7 @@ pub fn vectorType( pub fn arrayType(self: *Builder, len: u64, child: Type) Allocator.Error!Type { comptime assert(@sizeOf(Type.Array) >= @sizeOf(Type.Vector)); - try self.ensureUnusedCapacityTypes(1, Type.Array, 0); + try self.ensureUnusedTypeCapacity(1, Type.Array, 0); return self.arrayTypeAssumeCapacity(len, child); } @@ -1149,7 +1774,7 @@ pub fn structType( kind: Type.Structure.Kind, fields: []const Type, ) Allocator.Error!Type { - try self.ensureUnusedCapacityTypes(1, Type.Structure, fields.len); + try self.ensureUnusedTypeCapacity(1, Type.Structure, fields.len); return switch (kind) { inline else => |comptime_kind| self.structTypeAssumeCapacity(comptime_kind, fields), }; @@ -1162,7 +1787,7 @@ pub fn opaqueType(self: *Builder, name: String) Allocator.Error!Type { try self.string_indices.ensureUnusedCapacity(self.gpa, 1); try self.types.ensureUnusedCapacity(self.gpa, 1); try self.next_unique_type_id.ensureUnusedCapacity(self.gpa, 1); - try self.ensureUnusedCapacityTypes(1, Type.NamedStructure, 0); + try self.ensureUnusedTypeCapacity(1, Type.NamedStructure, 0); return self.opaqueTypeAssumeCapacity(name); } @@ -1181,8 +1806,7 @@ pub fn namedTypeSetBody( @ptrCast(self.type_extra.items[body_extra.end..][0..body_extra.data.fields_len]); const llvm_fields = try self.gpa.alloc(*llvm.Type, body_fields.len); defer self.gpa.free(llvm_fields); - for (llvm_fields, body_fields) |*llvm_field, body_field| - llvm_field.* = self.llvm_types.items[@intFromEnum(body_field)]; + for (llvm_fields, body_fields) |*llvm_field, body_field| llvm_field.* = body_field.toLlvm(self); self.llvm_types.items[@intFromEnum(named_type)].structSetBody( llvm_fields.ptr, @intCast(llvm_fields.len), @@ -1196,11 +1820,13 @@ pub fn namedTypeSetBody( } pub fn addGlobal(self: *Builder, name: String, global: Global) Allocator.Error!Global.Index { + try self.ensureUnusedTypeCapacity(1, null, 0); try self.ensureUnusedCapacityGlobal(name); return self.addGlobalAssumeCapacity(name, global); } pub fn addGlobalAssumeCapacity(self: *Builder, name: String, global: Global) Global.Index { + _ = self.ptrTypeAssumeCapacity(global.addr_space); var id = name; if (id == .none) { id = self.next_unnamed_global; @@ -1210,6 +1836,7 @@ pub fn addGlobalAssumeCapacity(self: *Builder, name: String, global: Global) Glo const global_gop = self.globals.getOrPutAssumeCapacity(id); if (!global_gop.found_existing) { global_gop.value_ptr.* = global; + global_gop.value_ptr.updateAttributes(); const index: Global.Index = @enumFromInt(global_gop.index); index.updateName(self); return index; @@ -1246,6 +1873,207 @@ pub fn bigIntConst(self: *Builder, ty: Type, value: std.math.big.int.Const) Allo return self.bigIntConstAssumeCapacity(ty, value); } +pub fn fpConst(self: *Builder, ty: Type, comptime val: comptime_float) Allocator.Error!Constant { + return switch (ty) { + .half => try self.halfConst(val), + .bfloat => try self.bfloatConst(val), + .float => try self.floatConst(val), + .double => try self.doubleConst(val), + .fp128 => try self.fp128Const(val), + .x86_fp80 => try self.x86_fp80Const(val), + .ppc_fp128 => try self.ppc_fp128Const(.{ val, 0 }), + else => unreachable, + }; +} + +pub fn halfConst(self: *Builder, val: f16) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, null, 0); + return self.halfConstAssumeCapacity(val); +} + +pub fn bfloatConst(self: *Builder, val: f32) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, null, 0); + return self.bfloatConstAssumeCapacity(val); +} + +pub fn floatConst(self: *Builder, val: f32) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, null, 0); + return self.floatConstAssumeCapacity(val); +} + +pub fn doubleConst(self: *Builder, val: f64) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, Constant.Double, 0); + return self.doubleConstAssumeCapacity(val); +} + +pub fn fp128Const(self: *Builder, val: f128) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, Constant.Fp128, 0); + return self.fp128ConstAssumeCapacity(val); +} + +pub fn x86_fp80Const(self: *Builder, val: f80) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, Constant.Fp80, 0); + return self.x86_fp80ConstAssumeCapacity(val); +} + +pub fn ppc_fp128Const(self: *Builder, val: [2]f64) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, Constant.Fp128, 0); + return self.ppc_fp128ConstAssumeCapacity(val); +} + +pub fn nullConst(self: *Builder, ty: Type) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, null, 0); + return self.nullConstAssumeCapacity(ty); +} + +pub fn noneConst(self: *Builder, ty: Type) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, null, 0); + return self.noneConstAssumeCapacity(ty); +} + +pub fn structConst(self: *Builder, ty: Type, vals: []const Constant) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, Constant.Aggregate, vals.len); + return self.structConstAssumeCapacity(ty, vals); +} + +pub fn arrayConst(self: *Builder, ty: Type, vals: []const Constant) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, Constant.Aggregate, vals.len); + return self.arrayConstAssumeCapacity(ty, vals); +} + +pub fn stringConst(self: *Builder, val: String) Allocator.Error!Constant { + try self.ensureUnusedTypeCapacity(1, Type.Array, 0); + try self.ensureUnusedConstantCapacity(1, null, 0); + return self.stringConstAssumeCapacity(val); +} + +pub fn stringNullConst(self: *Builder, val: String) Allocator.Error!Constant { + try self.ensureUnusedTypeCapacity(1, Type.Array, 0); + try self.ensureUnusedConstantCapacity(1, null, 0); + return self.stringNullConstAssumeCapacity(val); +} + +pub fn vectorConst(self: *Builder, ty: Type, vals: []const Constant) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, Constant.Aggregate, vals.len); + return self.vectorConstAssumeCapacity(ty, vals); +} + +pub fn zeroInitConst(self: *Builder, ty: Type) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, null, 0); + return self.zeroInitConstAssumeCapacity(ty); +} + +pub fn undefConst(self: *Builder, ty: Type) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, null, 0); + return self.undefConstAssumeCapacity(ty); +} + +pub fn poisonConst(self: *Builder, ty: Type) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, null, 0); + return self.poisonConstAssumeCapacity(ty); +} + +pub fn blockAddrConst( + self: *Builder, + function: Function.Index, + block: Function.Block.Index, +) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, Constant.BlockAddress, 0); + return self.blockAddrConstAssumeCapacity(function, block); +} + +pub fn dsoLocalEquivalentConst(self: *Builder, function: Function.Index) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, Constant.FunctionReference, 0); + return self.dsoLocalEquivalentConstAssumeCapacity(function); +} + +pub fn noCfiConst(self: *Builder, function: Function.Index) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, Constant.FunctionReference, 0); + return self.noCfiConstAssumeCapacity(function); +} + +pub fn convConst( + self: *Builder, + signedness: Constant.Cast.Signedness, + arg: Constant, + ty: Type, +) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, Constant.Cast, 0); + return self.convConstAssumeCapacity(signedness, arg, ty); +} + +pub fn castConst(self: *Builder, tag: Constant.Tag, arg: Constant, ty: Type) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, Constant.Cast, 0); + return self.castConstAssumeCapacity(tag, arg, ty); +} + +pub fn gepConst( + self: *Builder, + comptime kind: Constant.GetElementPtr.Kind, + ty: Type, + base: Constant, + indices: []const Constant, +) Allocator.Error!Constant { + try self.ensureUnusedTypeCapacity(1, Type.Vector, 0); + try self.ensureUnusedConstantCapacity(1, Constant.GetElementPtr, indices.len); + return self.gepConstAssumeCapacity(kind, ty, base, indices); +} + +pub fn icmpConst( + self: *Builder, + cond: IntegerCondition, + lhs: Constant, + rhs: Constant, +) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, Constant.Compare, 0); + return self.icmpConstAssumeCapacity(cond, lhs, rhs); +} + +pub fn fcmpConst( + self: *Builder, + cond: FloatCondition, + lhs: Constant, + rhs: Constant, +) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, Constant.Compare, 0); + return self.icmpConstAssumeCapacity(cond, lhs, rhs); +} + +pub fn extractElementConst(self: *Builder, arg: Constant, index: Constant) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, Constant.ExtractElement, 0); + return self.extractElementConstAssumeCapacity(arg, index); +} + +pub fn insertElementConst( + self: *Builder, + arg: Constant, + elem: Constant, + index: Constant, +) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, Constant.InsertElement, 0); + return self.insertElementConstAssumeCapacity(arg, elem, index); +} + +pub fn shuffleVectorConst( + self: *Builder, + lhs: Constant, + rhs: Constant, + mask: Constant, +) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, Constant.ShuffleVector, 0); + return self.shuffleVectorConstAssumeCapacity(lhs, rhs, mask); +} + +pub fn binConst( + self: *Builder, + tag: Constant.Tag, + lhs: Constant, + rhs: Constant, +) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, Constant.Binary, 0); + return self.binConstAssumeCapacity(tag, lhs, rhs); +} + pub fn dump(self: *Builder, writer: anytype) @TypeOf(writer).Error!void { if (self.source_filename != .none) try writer.print( \\; ModuleID = '{s}' @@ -1266,43 +2094,44 @@ pub fn dump(self: *Builder, writer: anytype) @TypeOf(writer).Error!void { \\ , .{ id.fmt(self), ty.fmt(self) }); try writer.writeByte('\n'); - for (self.objects.items) |object| { - const global = self.globals.entries.get(@intFromEnum(object.global)); + for (self.variables.items) |variable| { + const global = self.globals.values()[@intFromEnum(variable.global)]; try writer.print( - \\@{} ={}{}{}{}{}{}{}{} {s} {%}{,} + \\{} ={}{}{}{}{}{}{}{} {s} {%}{ }{,} \\ , .{ - global.key.fmt(self), - global.value.linkage, - global.value.preemption, - global.value.visibility, - global.value.dll_storage_class, - object.thread_local, - global.value.unnamed_addr, - global.value.addr_space, - global.value.externally_initialized, - @tagName(object.mutability), - global.value.type.fmt(self), - global.value.alignment, + variable.global.fmt(self), + global.linkage, + global.preemption, + global.visibility, + global.dll_storage_class, + variable.thread_local, + global.unnamed_addr, + global.addr_space, + global.externally_initialized, + @tagName(variable.mutability), + global.type.fmt(self), + variable.init.fmt(self), + global.alignment, }); } try writer.writeByte('\n'); for (self.functions.items) |function| { - const global = self.globals.entries.get(@intFromEnum(function.global)); - const item = self.type_items.items[@intFromEnum(global.value.type)]; + const global = self.globals.values()[@intFromEnum(function.global)]; + const item = self.type_items.items[@intFromEnum(global.type)]; const extra = self.typeExtraDataTrail(Type.Function, item.data); const params: []const Type = @ptrCast(self.type_extra.items[extra.end..][0..extra.data.params_len]); try writer.print( - \\{s} {}{}{}{}{} @{}( + \\{s}{}{}{}{} {} {}( , .{ if (function.body) |_| "define" else "declare", - global.value.linkage, - global.value.preemption, - global.value.visibility, - global.value.dll_storage_class, + global.linkage, + global.preemption, + global.visibility, + global.dll_storage_class, extra.data.ret.fmt(self), - global.key.fmt(self), + function.global.fmt(self), }); for (params, 0..) |param, index| { if (index > 0) try writer.writeAll(", "); @@ -1316,65 +2145,36 @@ pub fn dump(self: *Builder, writer: anytype) @TypeOf(writer).Error!void { }, else => unreachable, } - try writer.print(") {}{}", .{ - global.value.unnamed_addr, - global.value.alignment, - }); + try writer.print(") {}{}", .{ global.unnamed_addr, global.alignment }); if (function.body) |_| try writer.print( \\{{ \\ ret {%} \\}} \\ - , .{ - extra.data.ret.fmt(self), - }); + , .{extra.data.ret.fmt(self)}); try writer.writeByte('\n'); } } +fn isValidIdentifier(id: []const u8) bool { + for (id, 0..) |character, index| switch (character) { + '$', '-', '.', 'A'...'Z', '_', 'a'...'z' => {}, + '0'...'9' => if (index == 0) return false, + else => return false, + }; + return true; +} + fn ensureUnusedCapacityGlobal(self: *Builder, name: String) Allocator.Error!void { if (self.useLibLlvm()) try self.llvm_globals.ensureUnusedCapacity(self.gpa, 1); try self.string_map.ensureUnusedCapacity(self.gpa, 1); - try self.string_bytes.ensureUnusedCapacity(self.gpa, name.toSlice(self).?.len + + if (name.toSlice(self)) |id| try self.string_bytes.ensureUnusedCapacity(self.gpa, id.len + comptime std.fmt.count("{d}" ++ .{0}, .{std.math.maxInt(u32)})); try self.string_indices.ensureUnusedCapacity(self.gpa, 1); try self.globals.ensureUnusedCapacity(self.gpa, 1); try self.next_unique_global_id.ensureUnusedCapacity(self.gpa, 1); } -fn addTypeExtraAssumeCapacity(self: *Builder, extra: anytype) Type.ExtraIndex { - const result: Type.ExtraIndex = @intCast(self.type_extra.items.len); - inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { - const value = @field(extra, field.name); - self.type_extra.appendAssumeCapacity(switch (field.type) { - u32 => value, - String, Type => @intFromEnum(value), - else => @compileError("bad field type: " ++ @typeName(field.type)), - }); - } - return result; -} - -fn typeExtraDataTrail( - self: *const Builder, - comptime T: type, - index: Type.ExtraIndex, -) struct { data: T, end: Type.ExtraIndex } { - var result: T = undefined; - const fields = @typeInfo(T).Struct.fields; - inline for (fields, self.type_extra.items[index..][0..fields.len]) |field, data| - @field(result, field.name) = switch (field.type) { - u32 => data, - String, Type => @enumFromInt(data), - else => @compileError("bad field type: " ++ @typeName(field.type)), - }; - return .{ .data = result, .end = index + @as(Type.ExtraIndex, @intCast(fields.len)) }; -} - -fn typeExtraData(self: *const Builder, comptime T: type, index: Type.ExtraIndex) T { - return self.typeExtraDataTrail(T, index).data; -} - fn fnTypeAssumeCapacity( self: *Builder, ret: Type, @@ -1394,17 +2194,19 @@ fn fnTypeAssumeCapacity( hasher.update(std.mem.sliceAsBytes(key.params)); return @truncate(hasher.final()); } - pub fn eql(ctx: @This(), lhs: Key, _: void, rhs_index: usize) bool { + pub fn eql(ctx: @This(), lhs_key: Key, _: void, rhs_index: usize) bool { const rhs_data = ctx.builder.type_items.items[rhs_index]; const rhs_extra = ctx.builder.typeExtraDataTrail(Type.Function, rhs_data.data); const rhs_params: []const Type = @ptrCast(ctx.builder.type_extra.items[rhs_extra.end..][0..rhs_extra.data.params_len]); - return rhs_data.tag == tag and lhs.ret == rhs_extra.data.ret and - std.mem.eql(Type, lhs.params, rhs_params); + return rhs_data.tag == tag and lhs_key.ret == rhs_extra.data.ret and + std.mem.eql(Type, lhs_key.params, rhs_params); } }; - const data = Key{ .ret = ret, .params = params }; - const gop = self.type_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); + const gop = self.type_map.getOrPutAssumeCapacityAdapted( + Key{ .ret = ret, .params = params }, + Adapter{ .builder = self }, + ); if (!gop.found_existing) { gop.key_ptr.* = {}; gop.value_ptr.* = {}; @@ -1436,17 +2238,16 @@ fn fnTypeAssumeCapacity( fn intTypeAssumeCapacity(self: *Builder, bits: u24) Type { assert(bits > 0); - const result = self.typeNoExtraAssumeCapacity(.{ .tag = .integer, .data = bits }); + const result = self.getOrPutTypeNoExtraAssumeCapacity(.{ .tag = .integer, .data = bits }); if (self.useLibLlvm() and result.new) self.llvm_types.appendAssumeCapacity(self.llvm_context.intType(bits)); return result.type; } fn ptrTypeAssumeCapacity(self: *Builder, addr_space: AddrSpace) Type { - const result = self.typeNoExtraAssumeCapacity(.{ - .tag = .pointer, - .data = @intFromEnum(addr_space), - }); + const result = self.getOrPutTypeNoExtraAssumeCapacity( + .{ .tag = .pointer, .data = @intFromEnum(addr_space) }, + ); if (self.useLibLlvm() and result.new) self.llvm_types.appendAssumeCapacity(self.llvm_context.pointerType(@intFromEnum(addr_space))); return result.type; @@ -1470,10 +2271,10 @@ fn vectorTypeAssumeCapacity( std.mem.asBytes(&key), )); } - pub fn eql(ctx: @This(), lhs: Type.Vector, _: void, rhs_index: usize) bool { + pub fn eql(ctx: @This(), lhs_key: Type.Vector, _: void, rhs_index: usize) bool { const rhs_data = ctx.builder.type_items.items[rhs_index]; return rhs_data.tag == tag and - std.meta.eql(lhs, ctx.builder.typeExtraData(Type.Vector, rhs_data.data)); + std.meta.eql(lhs_key, ctx.builder.typeExtraData(Type.Vector, rhs_data.data)); } }; const data = Type.Vector{ .len = len, .child = child }; @@ -1503,10 +2304,10 @@ fn arrayTypeAssumeCapacity(self: *Builder, len: u64, child: Type) Type { std.mem.asBytes(&key), )); } - pub fn eql(ctx: @This(), lhs: Type.Vector, _: void, rhs_index: usize) bool { + pub fn eql(ctx: @This(), lhs_key: Type.Vector, _: void, rhs_index: usize) bool { const rhs_data = ctx.builder.type_items.items[rhs_index]; return rhs_data.tag == .small_array and - std.meta.eql(lhs, ctx.builder.typeExtraData(Type.Vector, rhs_data.data)); + std.meta.eql(lhs_key, ctx.builder.typeExtraData(Type.Vector, rhs_data.data)); } }; const data = Type.Vector{ .len = small_len, .child = child }; @@ -1532,10 +2333,10 @@ fn arrayTypeAssumeCapacity(self: *Builder, len: u64, child: Type) Type { std.mem.asBytes(&key), )); } - pub fn eql(ctx: @This(), lhs: Type.Array, _: void, rhs_index: usize) bool { + pub fn eql(ctx: @This(), lhs_key: Type.Array, _: void, rhs_index: usize) bool { const rhs_data = ctx.builder.type_items.items[rhs_index]; return rhs_data.tag == .array and - std.meta.eql(lhs, ctx.builder.typeExtraData(Type.Array, rhs_data.data)); + std.meta.eql(lhs_key, ctx.builder.typeExtraData(Type.Array, rhs_data.data)); } }; const data = Type.Array{ @@ -1576,12 +2377,12 @@ fn structTypeAssumeCapacity( std.mem.sliceAsBytes(key), )); } - pub fn eql(ctx: @This(), lhs: []const Type, _: void, rhs_index: usize) bool { + pub fn eql(ctx: @This(), lhs_key: []const Type, _: void, rhs_index: usize) bool { const rhs_data = ctx.builder.type_items.items[rhs_index]; const rhs_extra = ctx.builder.typeExtraDataTrail(Type.Structure, rhs_data.data); const rhs_fields: []const Type = @ptrCast(ctx.builder.type_extra.items[rhs_extra.end..][0..rhs_extra.data.fields_len]); - return rhs_data.tag == tag and std.mem.eql(Type, lhs, rhs_fields); + return rhs_data.tag == tag and std.mem.eql(Type, lhs_key, rhs_fields); } }; const gop = self.type_map.getOrPutAssumeCapacityAdapted(fields, Adapter{ .builder = self }); @@ -1596,15 +2397,14 @@ fn structTypeAssumeCapacity( }); self.type_extra.appendSliceAssumeCapacity(@ptrCast(fields)); if (self.useLibLlvm()) { - const ExpectedContents = [32]*llvm.Type; + const ExpectedContents = [expected_fields_len]*llvm.Type; var stack align(@alignOf(ExpectedContents)) = std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); const allocator = stack.get(); const llvm_fields = try allocator.alloc(*llvm.Type, fields.len); defer allocator.free(llvm_fields); - for (llvm_fields, fields) |*llvm_field, field| - llvm_field.* = self.llvm_types.items[@intFromEnum(field)]; + for (llvm_fields, fields) |*llvm_field, field| llvm_field.* = field.toLlvm(self); self.llvm_types.appendAssumeCapacity(self.llvm_context.structType( llvm_fields.ptr, @@ -1628,10 +2428,10 @@ fn opaqueTypeAssumeCapacity(self: *Builder, name: String) Type { std.mem.asBytes(&key), )); } - pub fn eql(ctx: @This(), lhs: String, _: void, rhs_index: usize) bool { + pub fn eql(ctx: @This(), lhs_key: String, _: void, rhs_index: usize) bool { const rhs_data = ctx.builder.type_items.items[rhs_index]; return rhs_data.tag == .named_structure and - lhs == ctx.builder.typeExtraData(Type.NamedStructure, rhs_data.data).id; + lhs_key == ctx.builder.typeExtraData(Type.NamedStructure, rhs_data.data).id; } }; var id = name; @@ -1669,7 +2469,7 @@ fn opaqueTypeAssumeCapacity(self: *Builder, name: String) Type { } } -fn ensureUnusedCapacityTypes( +fn ensureUnusedTypeCapacity( self: *Builder, count: usize, comptime Extra: ?type, @@ -1680,11 +2480,11 @@ fn ensureUnusedCapacityTypes( if (Extra) |E| try self.type_extra.ensureUnusedCapacity( self.gpa, count * (@typeInfo(E).Struct.fields.len + trail_len), - ); + ) else assert(trail_len == 0); if (self.useLibLlvm()) try self.llvm_types.ensureUnusedCapacity(self.gpa, count); } -fn typeNoExtraAssumeCapacity(self: *Builder, item: Type.Item) struct { new: bool, type: Type } { +fn getOrPutTypeNoExtraAssumeCapacity(self: *Builder, item: Type.Item) struct { new: bool, type: Type } { const Adapter = struct { builder: *const Builder, pub fn hash(_: @This(), key: Type.Item) u32 { @@ -1693,8 +2493,8 @@ fn typeNoExtraAssumeCapacity(self: *Builder, item: Type.Item) struct { new: bool std.mem.asBytes(&key), )); } - pub fn eql(ctx: @This(), lhs: Type.Item, _: void, rhs_index: usize) bool { - const lhs_bits: u32 = @bitCast(lhs); + pub fn eql(ctx: @This(), lhs_key: Type.Item, _: void, rhs_index: usize) bool { + const lhs_bits: u32 = @bitCast(lhs_key); const rhs_bits: u32 = @bitCast(ctx.builder.type_items.items[rhs_index]); return lhs_bits == rhs_bits; } @@ -1708,13 +2508,37 @@ fn typeNoExtraAssumeCapacity(self: *Builder, item: Type.Item) struct { new: bool return .{ .new = !gop.found_existing, .type = @enumFromInt(gop.index) }; } -fn isValidIdentifier(id: []const u8) bool { - for (id, 0..) |character, index| switch (character) { - '$', '-', '.', 'A'...'Z', '_', 'a'...'z' => {}, - '0'...'9' => if (index == 0) return false, - else => return false, - }; - return true; +fn addTypeExtraAssumeCapacity(self: *Builder, extra: anytype) Type.Item.ExtraIndex { + const result: Type.Item.ExtraIndex = @intCast(self.type_extra.items.len); + inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { + const value = @field(extra, field.name); + self.type_extra.appendAssumeCapacity(switch (field.type) { + u32 => value, + String, Type => @intFromEnum(value), + else => @compileError("bad field type: " ++ @typeName(field.type)), + }); + } + return result; +} + +fn typeExtraDataTrail( + self: *const Builder, + comptime T: type, + index: Type.Item.ExtraIndex, +) struct { data: T, end: Type.Item.ExtraIndex } { + var result: T = undefined; + const fields = @typeInfo(T).Struct.fields; + inline for (fields, self.type_extra.items[index..][0..fields.len]) |field, data| + @field(result, field.name) = switch (field.type) { + u32 => data, + String, Type => @enumFromInt(data), + else => @compileError("bad field type: " ++ @typeName(field.type)), + }; + return .{ .data = result, .end = index + @as(Type.Item.ExtraIndex, @intCast(fields.len)) }; +} + +fn typeExtraData(self: *const Builder, comptime T: type, index: Type.Item.ExtraIndex) T { + return self.typeExtraDataTrail(T, index).data; } fn bigIntConstAssumeCapacity( @@ -1748,8 +2572,8 @@ fn bigIntConstAssumeCapacity( const ExtraPtr = *align(@alignOf(std.math.big.Limb)) Constant.Integer; const Key = struct { tag: Constant.Tag, type: Type, limbs: []const std.math.big.Limb }; const tag: Constant.Tag = switch (canonical_value.positive) { - true => .integer_positive, - false => .integer_negative, + true => .positive_integer, + false => .negative_integer, }; const Adapter = struct { builder: *const Builder, @@ -1759,20 +2583,22 @@ fn bigIntConstAssumeCapacity( hasher.update(std.mem.sliceAsBytes(key.limbs)); return @truncate(hasher.final()); } - pub fn eql(ctx: @This(), lhs: Key, _: void, rhs_index: usize) bool { - if (lhs.tag != ctx.builder.constant_items.items(.tag)[rhs_index]) return false; + pub fn eql(ctx: @This(), lhs_key: Key, _: void, rhs_index: usize) bool { + if (lhs_key.tag != ctx.builder.constant_items.items(.tag)[rhs_index]) return false; const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index]; - const rhs_extra: ExtraPtr = @ptrCast( - ctx.builder.constant_limbs.items[rhs_data..][0..Constant.Integer.limbs], - ); + const rhs_extra: ExtraPtr = + @ptrCast(ctx.builder.constant_limbs.items[rhs_data..][0..Constant.Integer.limbs]); const rhs_limbs = ctx.builder.constant_limbs .items[rhs_data + Constant.Integer.limbs ..][0..rhs_extra.limbs_len]; - return lhs.type == rhs_extra.type and std.mem.eql(std.math.big.Limb, lhs.limbs, rhs_limbs); + return lhs_key.type == rhs_extra.type and + std.mem.eql(std.math.big.Limb, lhs_key.limbs, rhs_limbs); } }; - const data = Key{ .tag = tag, .type = ty, .limbs = canonical_value.limbs }; - const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); + const gop = self.constant_map.getOrPutAssumeCapacityAdapted( + Key{ .tag = tag, .type = ty, .limbs = canonical_value.limbs }, + Adapter{ .builder = self }, + ); if (!gop.found_existing) { gop.key_ptr.* = {}; gop.value_ptr.* = {}; @@ -1780,9 +2606,8 @@ fn bigIntConstAssumeCapacity( .tag = tag, .data = @intCast(self.constant_limbs.items.len), }); - const extra: ExtraPtr = @ptrCast( - self.constant_limbs.addManyAsArrayAssumeCapacity(Constant.Integer.limbs), - ); + const extra: ExtraPtr = + @ptrCast(self.constant_limbs.addManyAsArrayAssumeCapacity(Constant.Integer.limbs)); extra.* = .{ .type = ty, .limbs_len = @intCast(canonical_value.limbs.len) }; self.constant_limbs.appendSliceAssumeCapacity(canonical_value.limbs); if (self.useLibLlvm()) { @@ -1827,6 +2652,870 @@ fn bigIntConstAssumeCapacity( return @enumFromInt(gop.index); } +fn halfConstAssumeCapacity(self: *Builder, val: f16) Constant { + const result = self.getOrPutConstantNoExtraAssumeCapacity( + .{ .tag = .half, .data = @as(u16, @bitCast(val)) }, + ); + if (self.useLibLlvm() and result.new) self.llvm_constants.appendAssumeCapacity( + if (std.math.isSignalNan(val)) + Type.i16.toLlvm(self).constInt(@as(u16, @bitCast(val)), .False) + .constBitCast(Type.half.toLlvm(self)) + else + Type.half.toLlvm(self).constReal(val), + ); + return result.constant; +} + +fn bfloatConstAssumeCapacity(self: *Builder, val: f32) Constant { + assert(@as(u16, @truncate(@as(u32, @bitCast(val)))) == 0); + const result = self.getOrPutConstantNoExtraAssumeCapacity( + .{ .tag = .bfloat, .data = @bitCast(val) }, + ); + if (self.useLibLlvm() and result.new) self.llvm_constants.appendAssumeCapacity( + if (std.math.isSignalNan(val)) + Type.i16.toLlvm(self).constInt(@as(u32, @bitCast(val)) >> 16, .False) + .constBitCast(Type.bfloat.toLlvm(self)) + else + Type.bfloat.toLlvm(self).constReal(val), + ); + + if (self.useLibLlvm() and result.new) + self.llvm_constants.appendAssumeCapacity(Type.bfloat.toLlvm(self).constReal(val)); + return result.constant; +} + +fn floatConstAssumeCapacity(self: *Builder, val: f32) Constant { + const result = self.getOrPutConstantNoExtraAssumeCapacity( + .{ .tag = .float, .data = @bitCast(val) }, + ); + if (self.useLibLlvm() and result.new) self.llvm_constants.appendAssumeCapacity( + if (std.math.isSignalNan(val)) + Type.i32.toLlvm(self).constInt(@as(u32, @bitCast(val)), .False) + .constBitCast(Type.float.toLlvm(self)) + else + Type.float.toLlvm(self).constReal(val), + ); + return result.constant; +} + +fn doubleConstAssumeCapacity(self: *Builder, val: f64) Constant { + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: f64) u32 { + return @truncate(std.hash.Wyhash.hash( + comptime std.hash.uint32(@intFromEnum(Constant.Tag.double)), + std.mem.asBytes(&key), + )); + } + pub fn eql(ctx: @This(), lhs_key: f64, _: void, rhs_index: usize) bool { + if (ctx.builder.constant_items.items(.tag)[rhs_index] != .double) return false; + const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index]; + const rhs_extra = ctx.builder.constantExtraData(Constant.Double, rhs_data); + return @as(u64, @bitCast(lhs_key)) == @as(u64, rhs_extra.hi) << 32 | rhs_extra.lo; + } + }; + const gop = self.constant_map.getOrPutAssumeCapacityAdapted(val, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.constant_items.appendAssumeCapacity(.{ + .tag = .double, + .data = self.addConstantExtraAssumeCapacity(Constant.Double{ + .lo = @intCast(@as(u64, @bitCast(val)) >> 32), + .hi = @truncate(@as(u64, @bitCast(val))), + }), + }); + if (self.useLibLlvm()) self.llvm_constants.appendAssumeCapacity( + if (std.math.isSignalNan(val)) + Type.i64.toLlvm(self).constInt(@as(u64, @bitCast(val)), .False) + .constBitCast(Type.double.toLlvm(self)) + else + Type.double.toLlvm(self).constReal(val), + ); + } + return @enumFromInt(gop.index); +} + +fn fp128ConstAssumeCapacity(self: *Builder, val: f128) Constant { + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: f128) u32 { + return @truncate(std.hash.Wyhash.hash( + comptime std.hash.uint32(@intFromEnum(Constant.Tag.fp128)), + std.mem.asBytes(&key), + )); + } + pub fn eql(ctx: @This(), lhs_key: f128, _: void, rhs_index: usize) bool { + if (ctx.builder.constant_items.items(.tag)[rhs_index] != .fp128) return false; + const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index]; + const rhs_extra = ctx.builder.constantExtraData(Constant.Fp128, rhs_data); + return @as(u128, @bitCast(lhs_key)) == @as(u128, rhs_extra.hi_hi) << 96 | + @as(u128, rhs_extra.hi_lo) << 64 | @as(u128, rhs_extra.lo_hi) << 32 | rhs_extra.lo_lo; + } + }; + const gop = self.constant_map.getOrPutAssumeCapacityAdapted(val, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.constant_items.appendAssumeCapacity(.{ + .tag = .fp128, + .data = self.addConstantExtraAssumeCapacity(Constant.Fp128{ + .lo_lo = @truncate(@as(u128, @bitCast(val))), + .lo_hi = @truncate(@as(u128, @bitCast(val)) >> 32), + .hi_lo = @truncate(@as(u128, @bitCast(val)) >> 64), + .hi_hi = @intCast(@as(u128, @bitCast(val)) >> 96), + }), + }); + if (self.useLibLlvm()) { + const llvm_limbs = [_]u64{ + @truncate(@as(u128, @bitCast(val))), + @intCast(@as(u128, @bitCast(val)) >> 64), + }; + self.llvm_constants.appendAssumeCapacity( + Type.i128.toLlvm(self) + .constIntOfArbitraryPrecision(@intCast(llvm_limbs.len), &llvm_limbs) + .constBitCast(Type.fp128.toLlvm(self)), + ); + } + } + return @enumFromInt(gop.index); +} + +fn x86_fp80ConstAssumeCapacity(self: *Builder, val: f80) Constant { + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: f80) u32 { + return @truncate(std.hash.Wyhash.hash( + comptime std.hash.uint32(@intFromEnum(Constant.Tag.x86_fp80)), + std.mem.asBytes(&key)[0..10], + )); + } + pub fn eql(ctx: @This(), lhs_key: f80, _: void, rhs_index: usize) bool { + if (ctx.builder.constant_items.items(.tag)[rhs_index] != .x86_fp80) return false; + const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index]; + const rhs_extra = ctx.builder.constantExtraData(Constant.Fp80, rhs_data); + return @as(u80, @bitCast(lhs_key)) == @as(u80, rhs_extra.hi) << 64 | + @as(u80, rhs_extra.lo_hi) << 32 | rhs_extra.lo_lo; + } + }; + const gop = self.constant_map.getOrPutAssumeCapacityAdapted(val, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.constant_items.appendAssumeCapacity(.{ + .tag = .x86_fp80, + .data = self.addConstantExtraAssumeCapacity(Constant.Fp80{ + .lo_lo = @truncate(@as(u80, @bitCast(val))), + .lo_hi = @truncate(@as(u80, @bitCast(val)) >> 32), + .hi = @intCast(@as(u80, @bitCast(val)) >> 64), + }), + }); + if (self.useLibLlvm()) { + const llvm_limbs = [_]u64{ + @truncate(@as(u80, @bitCast(val))), + @intCast(@as(u80, @bitCast(val)) >> 64), + }; + self.llvm_constants.appendAssumeCapacity( + Type.i80.toLlvm(self) + .constIntOfArbitraryPrecision(@intCast(llvm_limbs.len), &llvm_limbs) + .constBitCast(Type.x86_fp80.toLlvm(self)), + ); + } + } + return @enumFromInt(gop.index); +} + +fn ppc_fp128ConstAssumeCapacity(self: *Builder, val: [2]f64) Constant { + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: [2]f64) u32 { + return @truncate(std.hash.Wyhash.hash( + comptime std.hash.uint32(@intFromEnum(Constant.Tag.ppc_fp128)), + std.mem.asBytes(&key), + )); + } + pub fn eql(ctx: @This(), lhs_key: [2]f64, _: void, rhs_index: usize) bool { + if (ctx.builder.constant_items.items(.tag)[rhs_index] != .ppc_fp128) return false; + const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index]; + const rhs_extra = ctx.builder.constantExtraData(Constant.Fp128, rhs_data); + return @as(u64, @bitCast(lhs_key[0])) == @as(u64, rhs_extra.lo_hi) << 32 | rhs_extra.lo_lo and + @as(u64, @bitCast(lhs_key[1])) == @as(u64, rhs_extra.hi_hi) << 32 | rhs_extra.hi_lo; + } + }; + const gop = self.constant_map.getOrPutAssumeCapacityAdapted(val, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.constant_items.appendAssumeCapacity(.{ + .tag = .ppc_fp128, + .data = self.addConstantExtraAssumeCapacity(Constant.Fp128{ + .lo_lo = @truncate(@as(u64, @bitCast(val[0]))), + .lo_hi = @intCast(@as(u64, @bitCast(val[0])) >> 32), + .hi_lo = @truncate(@as(u64, @bitCast(val[1]))), + .hi_hi = @intCast(@as(u64, @bitCast(val[1])) >> 32), + }), + }); + if (self.useLibLlvm()) { + const llvm_limbs: *const [2]u64 = @ptrCast(&val); + self.llvm_constants.appendAssumeCapacity( + Type.i128.toLlvm(self) + .constIntOfArbitraryPrecision(@intCast(llvm_limbs.len), llvm_limbs) + .constBitCast(Type.ppc_fp128.toLlvm(self)), + ); + } + } + return @enumFromInt(gop.index); +} + +fn nullConstAssumeCapacity(self: *Builder, ty: Type) Constant { + assert(self.type_items.items[@intFromEnum(ty)].tag == .pointer); + const result = self.getOrPutConstantNoExtraAssumeCapacity( + .{ .tag = .null, .data = @intFromEnum(ty) }, + ); + if (self.useLibLlvm() and result.new) + self.llvm_constants.appendAssumeCapacity(ty.toLlvm(self).constNull()); + return result.constant; +} + +fn noneConstAssumeCapacity(self: *Builder, ty: Type) Constant { + assert(ty == .token); + const result = self.getOrPutConstantNoExtraAssumeCapacity( + .{ .tag = .none, .data = @intFromEnum(ty) }, + ); + if (self.useLibLlvm() and result.new) + self.llvm_constants.appendAssumeCapacity(ty.toLlvm(self).constNull()); + return result.constant; +} + +fn structConstAssumeCapacity( + self: *Builder, + ty: Type, + vals: []const Constant, +) if (build_options.have_llvm) Allocator.Error!Constant else Constant { + const type_item = self.type_items.items[@intFromEnum(ty)]; + const extra = self.typeExtraDataTrail(Type.Structure, switch (type_item.tag) { + .structure, .packed_structure => type_item.data, + .named_structure => data: { + const body_ty = self.typeExtraData(Type.NamedStructure, type_item.data).body; + const body_item = self.type_items.items[@intFromEnum(body_ty)]; + switch (body_item.tag) { + .structure, .packed_structure => break :data body_item.data, + else => unreachable, + } + }, + else => unreachable, + }); + const fields: []const Type = + @ptrCast(self.type_extra.items[extra.end..][0..extra.data.fields_len]); + for (fields, vals) |field, val| assert(field == val.typeOf(self)); + + for (vals) |val| { + if (!val.isZeroInit(self)) break; + } else return self.zeroInitConstAssumeCapacity(ty); + + const tag: Constant.Tag = switch (ty.unnamedTag(self)) { + .structure => .structure, + .packed_structure => .packed_structure, + else => unreachable, + }; + const result = self.getOrPutConstantAggregateAssumeCapacity(tag, ty, vals); + if (self.useLibLlvm() and result.new) { + const ExpectedContents = [expected_fields_len]*llvm.Value; + var stack align(@alignOf(ExpectedContents)) = + std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); + const allocator = stack.get(); + + const llvm_vals = try allocator.alloc(*llvm.Value, vals.len); + defer allocator.free(llvm_vals); + for (llvm_vals, vals) |*llvm_val, val| llvm_val.* = val.toLlvm(self); + + self.llvm_constants.appendAssumeCapacity( + ty.toLlvm(self).constNamedStruct(llvm_vals.ptr, @intCast(llvm_vals.len)), + ); + } + return result.constant; +} + +fn arrayConstAssumeCapacity( + self: *Builder, + ty: Type, + vals: []const Constant, +) if (build_options.have_llvm) Allocator.Error!Constant else Constant { + const type_item = self.type_items.items[@intFromEnum(ty)]; + const type_extra: struct { len: u64, child: Type } = switch (type_item.tag) { + .small_array => extra: { + const extra = self.typeExtraData(Type.Vector, type_item.data); + break :extra .{ .len = extra.len, .child = extra.child }; + }, + .array => extra: { + const extra = self.typeExtraData(Type.Array, type_item.data); + break :extra .{ .len = extra.len(), .child = extra.child }; + }, + else => unreachable, + }; + assert(type_extra.len == vals.len); + for (vals) |val| assert(type_extra.child == val.typeOf(self)); + + for (vals) |val| { + if (!val.isZeroInit(self)) break; + } else return self.zeroInitConstAssumeCapacity(ty); + + const result = self.getOrPutConstantAggregateAssumeCapacity(.array, ty, vals); + if (self.useLibLlvm() and result.new) { + const ExpectedContents = [expected_fields_len]*llvm.Value; + var stack align(@alignOf(ExpectedContents)) = + std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); + const allocator = stack.get(); + + const llvm_vals = try allocator.alloc(*llvm.Value, vals.len); + defer allocator.free(llvm_vals); + for (llvm_vals, vals) |*llvm_val, val| llvm_val.* = val.toLlvm(self); + + self.llvm_constants.appendAssumeCapacity( + type_extra.child.toLlvm(self).constArray(llvm_vals.ptr, @intCast(llvm_vals.len)), + ); + } + return result.constant; +} + +fn stringConstAssumeCapacity(self: *Builder, val: String) Constant { + const slice = val.toSlice(self).?; + const ty = self.arrayTypeAssumeCapacity(slice.len, .i8); + if (std.mem.allEqual(u8, slice, 0)) return self.zeroInitConstAssumeCapacity(ty); + const result = self.getOrPutConstantNoExtraAssumeCapacity( + .{ .tag = .string, .data = @intFromEnum(val) }, + ); + if (self.useLibLlvm() and result.new) self.llvm_constants.appendAssumeCapacity( + self.llvm_context.constString(slice.ptr, @intCast(slice.len), .True), + ); + return result.constant; +} + +fn stringNullConstAssumeCapacity(self: *Builder, val: String) Constant { + const slice = val.toSlice(self).?; + const ty = self.arrayTypeAssumeCapacity(slice.len + 1, .i8); + if (std.mem.allEqual(u8, slice, 0)) return self.zeroInitConstAssumeCapacity(ty); + const result = self.getOrPutConstantNoExtraAssumeCapacity( + .{ .tag = .string_null, .data = @intFromEnum(val) }, + ); + if (self.useLibLlvm() and result.new) self.llvm_constants.appendAssumeCapacity( + self.llvm_context.constString(slice.ptr, @intCast(slice.len + 1), .True), + ); + return result.constant; +} + +fn vectorConstAssumeCapacity( + self: *Builder, + ty: Type, + vals: []const Constant, +) if (build_options.have_llvm) Allocator.Error!Constant else Constant { + if (std.debug.runtime_safety) { + const type_item = self.type_items.items[@intFromEnum(ty)]; + assert(type_item.tag == .vector); + const extra = self.typeExtraData(Type.Vector, type_item.data); + assert(extra.len == vals.len); + for (vals) |val| assert(extra.child == val.typeOf(self)); + } + + for (vals) |val| { + if (!val.isZeroInit(self)) break; + } else return self.zeroInitConstAssumeCapacity(ty); + + const result = self.getOrPutConstantAggregateAssumeCapacity(.vector, ty, vals); + if (self.useLibLlvm() and result.new) { + const ExpectedContents = [expected_fields_len]*llvm.Value; + var stack align(@alignOf(ExpectedContents)) = + std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); + const allocator = stack.get(); + + const llvm_vals = try allocator.alloc(*llvm.Value, vals.len); + defer allocator.free(llvm_vals); + for (llvm_vals, vals) |*llvm_val, val| llvm_val.* = val.toLlvm(self); + + self.llvm_constants.appendAssumeCapacity( + llvm.constVector(llvm_vals.ptr, @intCast(llvm_vals.len)), + ); + } + return result.constant; +} + +fn zeroInitConstAssumeCapacity(self: *Builder, ty: Type) Constant { + switch (self.type_items.items[@intFromEnum(ty)].tag) { + .simple, + .function, + .vararg_function, + .integer, + .pointer, + => unreachable, + .target, + .vector, + .scalable_vector, + .small_array, + .array, + .structure, + .packed_structure, + .named_structure, + => {}, + } + const result = self.getOrPutConstantNoExtraAssumeCapacity( + .{ .tag = .zeroinitializer, .data = @intFromEnum(ty) }, + ); + if (self.useLibLlvm() and result.new) + self.llvm_constants.appendAssumeCapacity(ty.toLlvm(self).constNull()); + return result.constant; +} + +fn undefConstAssumeCapacity(self: *Builder, ty: Type) Constant { + switch (self.type_items.items[@intFromEnum(ty)].tag) { + .simple => switch (ty) { + .void, .label => unreachable, + else => {}, + }, + .function, .vararg_function => unreachable, + else => {}, + } + const result = self.getOrPutConstantNoExtraAssumeCapacity( + .{ .tag = .undef, .data = @intFromEnum(ty) }, + ); + if (self.useLibLlvm() and result.new) + self.llvm_constants.appendAssumeCapacity(ty.toLlvm(self).getUndef()); + return result.constant; +} + +fn poisonConstAssumeCapacity(self: *Builder, ty: Type) Constant { + switch (self.type_items.items[@intFromEnum(ty)].tag) { + .simple => switch (ty) { + .void, .label => unreachable, + else => {}, + }, + .function, .vararg_function => unreachable, + else => {}, + } + const result = self.getOrPutConstantNoExtraAssumeCapacity( + .{ .tag = .poison, .data = @intFromEnum(ty) }, + ); + if (self.useLibLlvm() and result.new) + self.llvm_constants.appendAssumeCapacity(ty.toLlvm(self).getUndef()); + return result.constant; +} + +fn blockAddrConstAssumeCapacity( + self: *Builder, + function: Function.Index, + block: Function.Block.Index, +) Constant { + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: Constant.BlockAddress) u32 { + return @truncate(std.hash.Wyhash.hash( + comptime std.hash.uint32(@intFromEnum(Constant.Tag.blockaddress)), + std.mem.asBytes(&key), + )); + } + pub fn eql(ctx: @This(), lhs_key: Constant.BlockAddress, _: void, rhs_index: usize) bool { + if (ctx.builder.constant_items.items(.tag)[rhs_index] != .blockaddress) return false; + const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index]; + const rhs_extra = ctx.builder.constantExtraData(Constant.BlockAddress, rhs_data); + return std.meta.eql(lhs_key, rhs_extra); + } + }; + const data = Constant.BlockAddress{ .function = function, .block = block }; + const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.constant_items.appendAssumeCapacity(.{ + .tag = .blockaddress, + .data = self.addConstantExtraAssumeCapacity(data), + }); + if (self.useLibLlvm()) self.llvm_constants.appendAssumeCapacity( + function.toLlvm(self).blockAddress(block.toValue(self, function).toLlvm(self, function)), + ); + } + return @enumFromInt(gop.index); +} + +fn dsoLocalEquivalentConstAssumeCapacity(self: *Builder, function: Function.Index) Constant { + const result = self.getOrPutConstantNoExtraAssumeCapacity( + .{ .tag = .dso_local_equivalent, .data = @intFromEnum(function) }, + ); + if (self.useLibLlvm() and result.new) self.llvm_constants.appendAssumeCapacity(undefined); + return result.constant; +} + +fn noCfiConstAssumeCapacity(self: *Builder, function: Function.Index) Constant { + const result = self.getOrPutConstantNoExtraAssumeCapacity( + .{ .tag = .no_cfi, .data = @intFromEnum(function) }, + ); + if (self.useLibLlvm() and result.new) self.llvm_constants.appendAssumeCapacity(undefined); + return result.constant; +} + +fn convConstAssumeCapacity( + self: *Builder, + signedness: Constant.Cast.Signedness, + arg: Constant, + ty: Type, +) Constant { + const arg_ty = arg.typeOf(self); + if (arg_ty == ty) return arg; + return self.castConstAssumeCapacity(switch (arg_ty.scalarTag(self)) { + .simple => switch (ty.scalarTag(self)) { + .simple => switch (std.math.order(arg_ty.scalarBits(self), ty.scalarBits(self))) { + .lt => .fpext, + .eq => unreachable, + .gt => .fptrunc, + }, + .integer => switch (signedness) { + .unsigned => .fptoui, + .signed => .fptosi, + .unneeded => unreachable, + }, + else => unreachable, + }, + .integer => switch (ty.tag(self)) { + .simple => switch (signedness) { + .unsigned => .uitofp, + .signed => .sitofp, + .unneeded => unreachable, + }, + .integer => switch (std.math.order(arg_ty.scalarBits(self), ty.scalarBits(self))) { + .lt => switch (signedness) { + .unsigned => .zext, + .signed => .sext, + .unneeded => unreachable, + }, + .eq => unreachable, + .gt => .trunc, + }, + .pointer => .inttoptr, + else => unreachable, + }, + .pointer => switch (ty.tag(self)) { + .integer => .ptrtoint, + .pointer => .addrspacecast, + else => unreachable, + }, + else => unreachable, + }, arg, ty); +} + +fn castConstAssumeCapacity(self: *Builder, tag: Constant.Tag, arg: Constant, ty: Type) Constant { + const Key = struct { tag: Constant.Tag, cast: Constant.Cast }; + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: Key) u32 { + return @truncate(std.hash.Wyhash.hash( + std.hash.uint32(@intFromEnum(key.tag)), + std.mem.asBytes(&key.cast), + )); + } + pub fn eql(ctx: @This(), lhs_key: Key, _: void, rhs_index: usize) bool { + if (lhs_key.tag != ctx.builder.constant_items.items(.tag)[rhs_index]) return false; + const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index]; + const rhs_extra = ctx.builder.constantExtraData(Constant.Cast, rhs_data); + return std.meta.eql(lhs_key.cast, rhs_extra); + } + }; + const data = Key{ .tag = tag, .cast = .{ .arg = arg, .type = ty } }; + const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.constant_items.appendAssumeCapacity(.{ + .tag = tag, + .data = self.addConstantExtraAssumeCapacity(data.cast), + }); + if (self.useLibLlvm()) self.llvm_constants.appendAssumeCapacity(switch (tag) { + .trunc => &llvm.Value.constTrunc, + .zext => &llvm.Value.constZExt, + .sext => &llvm.Value.constSExt, + .fptrunc => &llvm.Value.constFPTrunc, + .fpext => &llvm.Value.constFPExt, + .fptoui => &llvm.Value.constFPToUI, + .fptosi => &llvm.Value.constFPToSI, + .uitofp => &llvm.Value.constUIToFP, + .sitofp => &llvm.Value.constSIToFP, + .ptrtoint => &llvm.Value.constPtrToInt, + .inttoptr => &llvm.Value.constIntToPtr, + .bitcast => &llvm.Value.constBitCast, + else => unreachable, + }(arg.toLlvm(self), ty.toLlvm(self))); + } + return @enumFromInt(gop.index); +} + +fn gepConstAssumeCapacity( + self: *Builder, + comptime kind: Constant.GetElementPtr.Kind, + ty: Type, + base: Constant, + indices: []const Constant, +) if (build_options.have_llvm) Allocator.Error!Constant else Constant { + const tag: Constant.Tag = switch (kind) { + .normal => .getelementptr, + .inbounds => .@"getelementptr inbounds", + }; + const base_ty = base.typeOf(self); + const base_is_vector = base_ty.isVector(self); + + const VectorInfo = struct { + kind: Type.Vector.Kind, + len: u32, + + fn init(vector_ty: Type, builder: *const Builder) @This() { + return .{ .kind = vector_ty.vectorKind(builder), .len = vector_ty.vectorLen(builder) }; + } + }; + var vector_info: ?VectorInfo = if (base_is_vector) VectorInfo.init(base_ty, self) else null; + for (indices) |index| { + const index_ty = index.typeOf(self); + switch (index_ty.tag(self)) { + .integer => {}, + .vector, .scalable_vector => { + const index_info = VectorInfo.init(index_ty, self); + if (vector_info) |info| + assert(std.meta.eql(info, index_info)) + else + vector_info = index_info; + }, + else => unreachable, + } + } + if (!base_is_vector) if (vector_info) |info| switch (info.kind) { + inline else => |vector_kind| _ = self.vectorTypeAssumeCapacity(vector_kind, info.len, base_ty), + }; + + const Key = struct { type: Type, base: Constant, indices: []const Constant }; + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: Key) u32 { + var hasher = std.hash.Wyhash.init(comptime std.hash.uint32(@intFromEnum(tag))); + hasher.update(std.mem.asBytes(&key.type)); + hasher.update(std.mem.asBytes(&key.base)); + hasher.update(std.mem.sliceAsBytes(key.indices)); + return @truncate(hasher.final()); + } + pub fn eql(ctx: @This(), lhs_key: Key, _: void, rhs_index: usize) bool { + if (ctx.builder.constant_items.items(.tag)[rhs_index] != tag) return false; + const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index]; + const rhs_extra = ctx.builder.constantExtraDataTrail(Constant.GetElementPtr, rhs_data); + const rhs_indices: []const Constant = @ptrCast(ctx.builder.constant_extra + .items[rhs_extra.end..][0..rhs_extra.data.indices_len]); + return lhs_key.type == rhs_extra.data.type and lhs_key.base == rhs_extra.data.base and + std.mem.eql(Constant, lhs_key.indices, rhs_indices); + } + }; + const data = Key{ .type = ty, .base = base, .indices = indices }; + const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.constant_items.appendAssumeCapacity(.{ + .tag = tag, + .data = self.addConstantExtraAssumeCapacity(Constant.GetElementPtr{ + .type = ty, + .base = base, + .indices_len = @intCast(indices.len), + }), + }); + self.constant_extra.appendSliceAssumeCapacity(@ptrCast(indices)); + if (self.useLibLlvm()) { + const ExpectedContents = [expected_gep_indices_len]*llvm.Value; + var stack align(@alignOf(ExpectedContents)) = + std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); + const allocator = stack.get(); + + const llvm_indices = try allocator.alloc(*llvm.Value, indices.len); + defer allocator.free(llvm_indices); + for (llvm_indices, indices) |*llvm_index, index| llvm_index.* = index.toLlvm(self); + + self.llvm_constants.appendAssumeCapacity(switch (kind) { + .normal => &llvm.Type.constGEP, + .inbounds => &llvm.Type.constInBoundsGEP, + }(ty.toLlvm(self), base.toLlvm(self), llvm_indices.ptr, @intCast(indices.len))); + } + } + return @enumFromInt(gop.index); +} + +fn binConstAssumeCapacity( + self: *Builder, + tag: Constant.Tag, + lhs: Constant, + rhs: Constant, +) Constant { + switch (tag) { + .add, .sub, .mul, .shl, .lshr, .ashr, .@"and", .@"or", .xor => {}, + else => unreachable, + } + const Key = struct { tag: Constant.Tag, bin: Constant.Binary }; + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: Key) u32 { + return @truncate(std.hash.Wyhash.hash( + std.hash.uint32(@intFromEnum(key.tag)), + std.mem.asBytes(&key.bin), + )); + } + pub fn eql(ctx: @This(), lhs_key: Key, _: void, rhs_index: usize) bool { + if (lhs_key.tag != ctx.builder.constant_items.items(.tag)[rhs_index]) return false; + const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index]; + const rhs_extra = ctx.builder.constantExtraData(Constant.Binary, rhs_data); + return std.meta.eql(lhs_key.bin, rhs_extra); + } + }; + const data = Key{ .tag = tag, .bin = .{ .lhs = lhs, .rhs = rhs } }; + const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.constant_items.appendAssumeCapacity(.{ + .tag = tag, + .data = self.addConstantExtraAssumeCapacity(data.bin), + }); + if (self.useLibLlvm()) self.llvm_constants.appendAssumeCapacity(switch (tag) { + .add => &llvm.Value.constAdd, + .sub => &llvm.Value.constSub, + .mul => &llvm.Value.constMul, + .shl => &llvm.Value.constShl, + .lshr => &llvm.Value.constLShr, + .ashr => &llvm.Value.constAShr, + .@"and" => &llvm.Value.constAnd, + .@"or" => &llvm.Value.constOr, + .xor => &llvm.Value.constXor, + else => unreachable, + }(lhs.toLlvm(self), rhs.toLlvm(self))); + } + return @enumFromInt(gop.index); +} + +fn ensureUnusedConstantCapacity( + self: *Builder, + count: usize, + comptime Extra: ?type, + trail_len: usize, +) Allocator.Error!void { + try self.constant_map.ensureUnusedCapacity(self.gpa, count); + try self.constant_items.ensureUnusedCapacity(self.gpa, count); + if (Extra) |E| try self.constant_extra.ensureUnusedCapacity( + self.gpa, + count * (@typeInfo(E).Struct.fields.len + trail_len), + ) else assert(trail_len == 0); + if (self.useLibLlvm()) try self.llvm_constants.ensureUnusedCapacity(self.gpa, count); +} + +fn getOrPutConstantNoExtraAssumeCapacity( + self: *Builder, + item: Constant.Item, +) struct { new: bool, constant: Constant } { + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: Constant.Item) u32 { + return @truncate(std.hash.Wyhash.hash( + std.hash.uint32(@intFromEnum(key.tag)), + std.mem.asBytes(&key.data), + )); + } + pub fn eql(ctx: @This(), lhs_key: Constant.Item, _: void, rhs_index: usize) bool { + return std.meta.eql(lhs_key, ctx.builder.constant_items.get(rhs_index)); + } + }; + const gop = self.constant_map.getOrPutAssumeCapacityAdapted(item, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.constant_items.appendAssumeCapacity(item); + } + return .{ .new = !gop.found_existing, .constant = @enumFromInt(gop.index) }; +} + +fn getOrPutConstantAggregateAssumeCapacity( + self: *Builder, + tag: Constant.Tag, + ty: Type, + vals: []const Constant, +) struct { new: bool, constant: Constant } { + switch (tag) { + .structure, .packed_structure, .array, .vector => {}, + else => unreachable, + } + const Key = struct { tag: Constant.Tag, type: Type, vals: []const Constant }; + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: Key) u32 { + var hasher = std.hash.Wyhash.init(std.hash.uint32(@intFromEnum(key.tag))); + hasher.update(std.mem.asBytes(&key.type)); + hasher.update(std.mem.sliceAsBytes(key.vals)); + return @truncate(hasher.final()); + } + pub fn eql(ctx: @This(), lhs_key: Key, _: void, rhs_index: usize) bool { + if (lhs_key.tag != ctx.builder.constant_items.items(.tag)[rhs_index]) return false; + const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index]; + const rhs_extra = ctx.builder.constantExtraDataTrail(Constant.Aggregate, rhs_data); + if (lhs_key.type != rhs_extra.data.type) return false; + const rhs_vals: []const Constant = + @ptrCast(ctx.builder.constant_extra.items[rhs_extra.end..][0..lhs_key.vals.len]); + return std.mem.eql(Constant, lhs_key.vals, rhs_vals); + } + }; + const gop = self.constant_map.getOrPutAssumeCapacityAdapted( + Key{ .tag = tag, .type = ty, .vals = vals }, + Adapter{ .builder = self }, + ); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.constant_items.appendAssumeCapacity(.{ + .tag = tag, + .data = self.addConstantExtraAssumeCapacity(Constant.Aggregate{ .type = ty }), + }); + self.constant_extra.appendSliceAssumeCapacity(@ptrCast(vals)); + } + return .{ .new = !gop.found_existing, .constant = @enumFromInt(gop.index) }; +} + +fn addConstantExtraAssumeCapacity(self: *Builder, extra: anytype) Constant.Item.ExtraIndex { + const result: Constant.Item.ExtraIndex = @intCast(self.constant_extra.items.len); + inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { + const value = @field(extra, field.name); + self.constant_extra.appendAssumeCapacity(switch (field.type) { + u32 => value, + Type, + Constant, + Function.Index, + Function.Block.Index, + => @intFromEnum(value), + else => @compileError("bad field type: " ++ @typeName(field.type)), + }); + } + return result; +} + +fn constantExtraDataTrail( + self: *const Builder, + comptime T: type, + index: Constant.Item.ExtraIndex, +) struct { data: T, end: Constant.Item.ExtraIndex } { + var result: T = undefined; + const fields = @typeInfo(T).Struct.fields; + inline for (fields, self.constant_extra.items[index..][0..fields.len]) |field, data| + @field(result, field.name) = switch (field.type) { + u32 => data, + Type, + Constant, + Function.Index, + Function.Block.Index, + => @enumFromInt(data), + else => @compileError("bad field type: " ++ @typeName(field.type)), + }; + return .{ .data = result, .end = index + @as(Constant.Item.ExtraIndex, @intCast(fields.len)) }; +} + +fn constantExtraData(self: *const Builder, comptime T: type, index: Constant.Item.ExtraIndex) T { + return self.constantExtraDataTrail(T, index).data; +} + inline fn useLibLlvm(self: *const Builder) bool { return build_options.have_llvm and self.use_lib_llvm; } diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index e5fa8ba265..080184f488 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -168,23 +168,41 @@ pub const Value = opaque { pub const setAliasee = LLVMAliasSetAliasee; extern fn LLVMAliasSetAliasee(Alias: *Value, Aliasee: *Value) void; - pub const constBitCast = LLVMConstBitCast; - extern fn LLVMConstBitCast(ConstantVal: *Value, ToType: *Type) *Value; + pub const constTrunc = LLVMConstTrunc; + extern fn LLVMConstTrunc(ConstantVal: *Value, ToType: *Type) *Value; - pub const constIntToPtr = LLVMConstIntToPtr; - extern fn LLVMConstIntToPtr(ConstantVal: *Value, ToType: *Type) *Value; + pub const constSExt = LLVMConstSExt; + extern fn LLVMConstSExt(ConstantVal: *Value, ToType: *Type) *Value; + + pub const constZExt = LLVMConstZExt; + extern fn LLVMConstZExt(ConstantVal: *Value, ToType: *Type) *Value; + + pub const constFPTrunc = LLVMConstFPTrunc; + extern fn LLVMConstFPTrunc(ConstantVal: *Value, ToType: *Type) *Value; + + pub const constFPExt = LLVMConstFPExt; + extern fn LLVMConstFPExt(ConstantVal: *Value, ToType: *Type) *Value; + + pub const constUIToFP = LLVMConstUIToFP; + extern fn LLVMConstUIToFP(ConstantVal: *Value, ToType: *Type) *Value; + + pub const constSIToFP = LLVMConstSIToFP; + extern fn LLVMConstSIToFP(ConstantVal: *Value, ToType: *Type) *Value; + + pub const constFPToUI = LLVMConstFPToUI; + extern fn LLVMConstFPToUI(ConstantVal: *Value, ToType: *Type) *Value; + + pub const constFPToSI = LLVMConstFPToSI; + extern fn LLVMConstFPToSI(ConstantVal: *Value, ToType: *Type) *Value; pub const constPtrToInt = LLVMConstPtrToInt; extern fn LLVMConstPtrToInt(ConstantVal: *Value, ToType: *Type) *Value; - pub const constShl = LLVMConstShl; - extern fn LLVMConstShl(LHSConstant: *Value, RHSConstant: *Value) *Value; - - pub const constOr = LLVMConstOr; - extern fn LLVMConstOr(LHSConstant: *Value, RHSConstant: *Value) *Value; + pub const constIntToPtr = LLVMConstIntToPtr; + extern fn LLVMConstIntToPtr(ConstantVal: *Value, ToType: *Type) *Value; - pub const constZExt = LLVMConstZExt; - extern fn LLVMConstZExt(ConstantVal: *Value, ToType: *Type) *Value; + pub const constBitCast = LLVMConstBitCast; + extern fn LLVMConstBitCast(ConstantVal: *Value, ToType: *Type) *Value; pub const constZExtOrBitCast = LLVMConstZExtOrBitCast; extern fn LLVMConstZExtOrBitCast(ConstantVal: *Value, ToType: *Type) *Value; @@ -195,6 +213,30 @@ pub const Value = opaque { pub const constAdd = LLVMConstAdd; extern fn LLVMConstAdd(LHSConstant: *Value, RHSConstant: *Value) *Value; + pub const constSub = LLVMConstSub; + extern fn LLVMConstSub(LHSConstant: *Value, RHSConstant: *Value) *Value; + + pub const constMul = LLVMConstMul; + extern fn LLVMConstMul(LHSConstant: *Value, RHSConstant: *Value) *Value; + + pub const constAnd = LLVMConstAnd; + extern fn LLVMConstAnd(LHSConstant: *Value, RHSConstant: *Value) *Value; + + pub const constOr = LLVMConstOr; + extern fn LLVMConstOr(LHSConstant: *Value, RHSConstant: *Value) *Value; + + pub const constXor = LLVMConstXor; + extern fn LLVMConstXor(LHSConstant: *Value, RHSConstant: *Value) *Value; + + pub const constShl = LLVMConstShl; + extern fn LLVMConstShl(LHSConstant: *Value, RHSConstant: *Value) *Value; + + pub const constLShr = LLVMConstLShr; + extern fn LLVMConstLShr(LHSConstant: *Value, RHSConstant: *Value) *Value; + + pub const constAShr = LLVMConstAShr; + extern fn LLVMConstAShr(LHSConstant: *Value, RHSConstant: *Value) *Value; + pub const constAddrSpaceCast = LLVMConstAddrSpaceCast; extern fn LLVMConstAddrSpaceCast(ConstantVal: *Value, ToType: *Type) *Value; @@ -281,6 +323,9 @@ pub const Value = opaque { pub const attachMetaData = ZigLLVMAttachMetaData; extern fn ZigLLVMAttachMetaData(GlobalVar: *Value, DIG: *DIGlobalVariableExpression) void; + pub const blockAddress = LLVMBlockAddress; + extern fn LLVMBlockAddress(F: *Value, BB: *BasicBlock) *Value; + pub const dump = LLVMDumpValue; extern fn LLVMDumpValue(Val: *Value) void; }; @@ -349,6 +394,14 @@ pub const Type = opaque { pub const isSized = LLVMTypeIsSized; extern fn LLVMTypeIsSized(Ty: *Type) Bool; + pub const constGEP = LLVMConstGEP2; + extern fn LLVMConstGEP2( + Ty: *Type, + ConstantVal: *Value, + ConstantIndices: [*]const *Value, + NumIndices: c_uint, + ) *Value; + pub const constInBoundsGEP = LLVMConstInBoundsGEP2; extern fn LLVMConstInBoundsGEP2( Ty: *Type, -- cgit v1.2.3 From 7ec7fe53597a70a3f2c9f040d7da978ae3a52a6b Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 10 Jul 2023 20:30:15 -0400 Subject: llvm: cleanup init --- src/codegen/llvm.zig | 286 +++++++++++++++++++------------------------ src/codegen/llvm/Builder.zig | 196 +++++++++++++++++++---------- 2 files changed, 259 insertions(+), 223 deletions(-) (limited to 'src') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 0ac30e00e7..c34e241fa4 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -35,7 +35,7 @@ const compilerRtIntAbbrev = target_util.compilerRtIntAbbrev; const Error = error{ OutOfMemory, CodegenFail }; -pub fn targetTriple(allocator: Allocator, target: std.Target) ![:0]u8 { +pub fn targetTriple(allocator: Allocator, target: std.Target) ![]const u8 { var llvm_triple = std.ArrayList(u8).init(allocator); defer llvm_triple.deinit(); @@ -208,7 +208,7 @@ pub fn targetTriple(allocator: Allocator, target: std.Target) ![:0]u8 { }; try llvm_triple.appendSlice(llvm_abi); - return llvm_triple.toOwnedSliceSentinel(0); + return llvm_triple.toOwnedSlice(); } pub fn targetOs(os_tag: std.Target.Os.Tag) llvm.OSType { @@ -602,160 +602,137 @@ pub const Object = struct { } pub fn init(gpa: Allocator, options: link.Options) !Object { - var builder = Builder{ - .gpa = gpa, - .use_lib_llvm = options.use_lib_llvm, - - .llvm_context = llvm.Context.create(), - .llvm_module = undefined, - }; - errdefer builder.llvm_context.dispose(); - - builder.initializeLLVMTarget(options.target.cpu.arch); - - builder.llvm_module = llvm.Module.createWithName(options.root_name.ptr, builder.llvm_context); - errdefer builder.llvm_module.dispose(); - const llvm_target_triple = try targetTriple(gpa, options.target); defer gpa.free(llvm_target_triple); - var error_message: [*:0]const u8 = undefined; - var target: *llvm.Target = undefined; - if (llvm.Target.getFromTriple(llvm_target_triple.ptr, &target, &error_message).toBool()) { - defer llvm.disposeMessage(error_message); - - log.err("LLVM failed to parse '{s}': {s}", .{ llvm_target_triple, error_message }); - return error.InvalidLlvmTriple; - } - - builder.llvm_module.setTarget(llvm_target_triple.ptr); - var opt_di_builder: ?*llvm.DIBuilder = null; - errdefer if (opt_di_builder) |di_builder| di_builder.dispose(); + var builder = try Builder.init(.{ + .allocator = gpa, + .use_lib_llvm = options.use_lib_llvm, + .name = options.root_name, + .target = options.target, + .triple = llvm_target_triple, + }); + errdefer builder.deinit(); - var di_compile_unit: ?*llvm.DICompileUnit = null; + var target_machine: *llvm.TargetMachine = undefined; + var target_data: *llvm.TargetData = undefined; + if (builder.useLibLlvm()) { + if (!options.strip) { + switch (options.target.ofmt) { + .coff => builder.llvm.module.?.addModuleCodeViewFlag(), + else => builder.llvm.module.?.addModuleDebugInfoFlag(options.dwarf_format == std.dwarf.Format.@"64"), + } + builder.llvm.di_builder = builder.llvm.module.?.createDIBuilder(true); + + // Don't use the version string here; LLVM misparses it when it + // includes the git revision. + const producer = try builder.fmt("zig {d}.{d}.{d}", .{ + build_options.semver.major, + build_options.semver.minor, + build_options.semver.patch, + }); - if (!options.strip) { - switch (options.target.ofmt) { - .coff => builder.llvm_module.addModuleCodeViewFlag(), - else => builder.llvm_module.addModuleDebugInfoFlag(options.dwarf_format == std.dwarf.Format.@"64"), - } - const di_builder = builder.llvm_module.createDIBuilder(true); - opt_di_builder = di_builder; - - // Don't use the version string here; LLVM misparses it when it - // includes the git revision. - const producer = try std.fmt.allocPrintZ(gpa, "zig {d}.{d}.{d}", .{ - build_options.semver.major, - build_options.semver.minor, - build_options.semver.patch, - }); - defer gpa.free(producer); - - // We fully resolve all paths at this point to avoid lack of source line info in stack - // traces or lack of debugging information which, if relative paths were used, would - // be very location dependent. - // TODO: the only concern I have with this is WASI as either host or target, should - // we leave the paths as relative then? - var buf: [std.fs.MAX_PATH_BYTES]u8 = undefined; - const compile_unit_dir = blk: { - const path = d: { - const mod = options.module orelse break :d "."; - break :d mod.root_pkg.root_src_directory.path orelse "."; + // We fully resolve all paths at this point to avoid lack of source line info in stack + // traces or lack of debugging information which, if relative paths were used, would + // be very location dependent. + // TODO: the only concern I have with this is WASI as either host or target, should + // we leave the paths as relative then? + var buf: [std.fs.MAX_PATH_BYTES]u8 = undefined; + const compile_unit_dir = blk: { + const path = d: { + const mod = options.module orelse break :d "."; + break :d mod.root_pkg.root_src_directory.path orelse "."; + }; + if (std.fs.path.isAbsolute(path)) break :blk path; + break :blk std.os.realpath(path, &buf) catch path; // If realpath fails, fallback to whatever path was }; - if (std.fs.path.isAbsolute(path)) break :blk path; - break :blk std.os.realpath(path, &buf) catch path; // If realpath fails, fallback to whatever path was - }; - const compile_unit_dir_z = try gpa.dupeZ(u8, compile_unit_dir); - defer gpa.free(compile_unit_dir_z); - - di_compile_unit = di_builder.createCompileUnit( - DW.LANG.C99, - di_builder.createFile(options.root_name, compile_unit_dir_z), - producer, - options.optimize_mode != .Debug, - "", // flags - 0, // runtime version - "", // split name - 0, // dwo id - true, // emit debug info - ); - } - - const opt_level: llvm.CodeGenOptLevel = if (options.optimize_mode == .Debug) - .None - else - .Aggressive; + const compile_unit_dir_z = try builder.gpa.dupeZ(u8, compile_unit_dir); + defer builder.gpa.free(compile_unit_dir_z); + + builder.llvm.di_compile_unit = builder.llvm.di_builder.?.createCompileUnit( + DW.LANG.C99, + builder.llvm.di_builder.?.createFile(options.root_name, compile_unit_dir_z), + producer.toSlice(&builder).?, + options.optimize_mode != .Debug, + "", // flags + 0, // runtime version + "", // split name + 0, // dwo id + true, // emit debug info + ); + } - const reloc_mode: llvm.RelocMode = if (options.pic) - .PIC - else if (options.link_mode == .Dynamic) - llvm.RelocMode.DynamicNoPIC - else - .Static; - - const code_model: llvm.CodeModel = switch (options.machine_code_model) { - .default => .Default, - .tiny => .Tiny, - .small => .Small, - .kernel => .Kernel, - .medium => .Medium, - .large => .Large, - }; + const opt_level: llvm.CodeGenOptLevel = if (options.optimize_mode == .Debug) + .None + else + .Aggressive; - // TODO handle float ABI better- it should depend on the ABI portion of std.Target - const float_abi: llvm.ABIType = .Default; - - const target_machine = llvm.TargetMachine.create( - target, - llvm_target_triple.ptr, - if (options.target.cpu.model.llvm_name) |s| s.ptr else null, - options.llvm_cpu_features, - opt_level, - reloc_mode, - code_model, - options.function_sections, - float_abi, - if (target_util.llvmMachineAbi(options.target)) |s| s.ptr else null, - ); - errdefer target_machine.dispose(); + const reloc_mode: llvm.RelocMode = if (options.pic) + .PIC + else if (options.link_mode == .Dynamic) + llvm.RelocMode.DynamicNoPIC + else + .Static; + + const code_model: llvm.CodeModel = switch (options.machine_code_model) { + .default => .Default, + .tiny => .Tiny, + .small => .Small, + .kernel => .Kernel, + .medium => .Medium, + .large => .Large, + }; - const target_data = target_machine.createTargetDataLayout(); - errdefer target_data.dispose(); + // TODO handle float ABI better- it should depend on the ABI portion of std.Target + const float_abi: llvm.ABIType = .Default; + + target_machine = llvm.TargetMachine.create( + builder.llvm.target.?, + builder.target_triple.toSlice(&builder).?.ptr, + if (options.target.cpu.model.llvm_name) |s| s.ptr else null, + options.llvm_cpu_features, + opt_level, + reloc_mode, + code_model, + options.function_sections, + float_abi, + if (target_util.llvmMachineAbi(options.target)) |s| s.ptr else null, + ); + errdefer target_machine.dispose(); - builder.llvm_module.setModuleDataLayout(target_data); + target_data = target_machine.createTargetDataLayout(); + errdefer target_data.dispose(); - if (options.pic) builder.llvm_module.setModulePICLevel(); - if (options.pie) builder.llvm_module.setModulePIELevel(); - if (code_model != .Default) builder.llvm_module.setModuleCodeModel(code_model); + builder.llvm.module.?.setModuleDataLayout(target_data); - if (options.opt_bisect_limit >= 0) { - builder.llvm_context.setOptBisectLimit(std.math.lossyCast(c_int, options.opt_bisect_limit)); - } + if (options.pic) builder.llvm.module.?.setModulePICLevel(); + if (options.pie) builder.llvm.module.?.setModulePIELevel(); + if (code_model != .Default) builder.llvm.module.?.setModuleCodeModel(code_model); - try builder.init(); - errdefer builder.deinit(); - builder.source_filename = try builder.string(options.root_name); - builder.data_layout = try builder.fmt("{}", .{DataLayoutBuilder{ .target = options.target }}); - builder.target_triple = try builder.string(llvm_target_triple); + if (options.opt_bisect_limit >= 0) { + builder.llvm.context.setOptBisectLimit(std.math.lossyCast(c_int, options.opt_bisect_limit)); + } - if (std.debug.runtime_safety) { - const rep = target_data.stringRep(); - defer llvm.disposeMessage(rep); - std.testing.expectEqualStrings( - std.mem.span(rep), - builder.data_layout.toSlice(&builder).?, - ) catch unreachable; + builder.data_layout = try builder.fmt("{}", .{DataLayoutBuilder{ .target = options.target }}); + if (std.debug.runtime_safety) { + const rep = target_data.stringRep(); + defer llvm.disposeMessage(rep); + std.testing.expectEqualStrings( + std.mem.span(rep), + builder.data_layout.toSlice(&builder).?, + ) catch unreachable; + } } - return Object{ + return .{ .gpa = gpa, .builder = builder, .module = options.module.?, - .llvm_module = builder.llvm_module, + .llvm_module = builder.llvm.module.?, .di_map = .{}, - .di_builder = opt_di_builder, - .di_compile_unit = di_compile_unit, - .context = builder.llvm_context, + .di_builder = builder.llvm.di_builder, + .di_compile_unit = builder.llvm.di_compile_unit, + .context = builder.llvm.context, .target_machine = target_machine, .target_data = target_data, .target = options.target, @@ -770,15 +747,10 @@ pub const Object = struct { } pub fn deinit(self: *Object, gpa: Allocator) void { - if (self.di_builder) |dib| { - dib.dispose(); - self.di_map.deinit(gpa); - self.di_type_map.deinit(gpa); - } + self.di_map.deinit(gpa); + self.di_type_map.deinit(gpa); self.target_data.dispose(); self.target_machine.dispose(); - self.llvm_module.dispose(); - self.context.dispose(); self.decl_map.deinit(gpa); self.named_enum_map.deinit(gpa); self.type_map.deinit(gpa); @@ -845,7 +817,7 @@ pub const Object = struct { .mutability = .constant, .init = str_init, }; - try o.builder.llvm_globals.append(o.gpa, str_global); + try o.builder.llvm.globals.append(o.gpa, str_global); const str_global_index = try o.builder.addGlobal(.none, global); try o.builder.variables.append(o.gpa, variable); @@ -875,7 +847,7 @@ pub const Object = struct { .mutability = .constant, .init = error_name_table_init, }; - try o.builder.llvm_globals.append(o.gpa, error_name_table_global); + try o.builder.llvm.globals.append(o.gpa, error_name_table_global); _ = try o.builder.addGlobal(.none, global); try o.builder.variables.append(o.gpa, variable); @@ -941,7 +913,7 @@ pub const Object = struct { llvm_global.replaceAllUsesWith(other_llvm_global); deleteLlvmGlobal(llvm_global); - object.builder.llvm_globals.items[@intFromEnum(global)] = other_llvm_global; + object.builder.llvm.globals.items[@intFromEnum(global)] = other_llvm_global; } object.extern_collisions.clearRetainingCapacity(); @@ -960,7 +932,7 @@ pub const Object = struct { other_llvm_global.replaceAllUsesWith(llvm_global); try global.takeName(&object.builder, other_global); deleteLlvmGlobal(other_llvm_global); - object.builder.llvm_globals.items[@intFromEnum(other_global)] = llvm_global; + object.builder.llvm.globals.items[@intFromEnum(other_global)] = llvm_global; // Problem: now we need to replace in the decl_map that // the extern decl index points to this new global. However we don't // know the decl index. @@ -2765,7 +2737,7 @@ pub const Object = struct { .global = @enumFromInt(o.builder.globals.count()), .init = llvm_init, }; - try o.builder.llvm_globals.append(o.gpa, llvm_global); + try o.builder.llvm.globals.append(o.gpa, llvm_global); _ = try o.builder.addGlobal(.none, global); try o.builder.variables.append(o.gpa, variable); @@ -2908,7 +2880,7 @@ pub const Object = struct { }; } - try o.builder.llvm_globals.append(o.gpa, llvm_fn); + try o.builder.llvm.globals.append(o.gpa, llvm_fn); gop.value_ptr.* = try o.builder.addGlobal(fqn, global); try o.builder.functions.append(o.gpa, function); return global.kind.function; @@ -3017,7 +2989,7 @@ pub const Object = struct { llvm_global.setUnnamedAddr(.True); } - try o.builder.llvm_globals.append(o.gpa, llvm_global); + try o.builder.llvm.globals.append(o.gpa, llvm_global); gop.value_ptr.* = try o.builder.addGlobal(name, global); try o.builder.variables.append(o.gpa, variable); return global.kind.variable; @@ -4553,7 +4525,7 @@ pub const DeclGen = struct { // TODO: How should this work then the address space of a global changed? llvm_global.replaceAllUsesWith(new_global); new_global.takeName(llvm_global); - o.builder.llvm_globals.items[@intFromEnum(object.ptrConst(&o.builder).global)] = + o.builder.llvm.globals.items[@intFromEnum(object.ptrConst(&o.builder).global)] = new_global; llvm_global.deleteGlobal(); llvm_global = new_global; @@ -4699,7 +4671,7 @@ pub const FuncGen = struct { .mutability = .constant, .init = llvm_val, }; - try o.builder.llvm_globals.append(o.gpa, llvm_global); + try o.builder.llvm.globals.append(o.gpa, llvm_global); _ = try o.builder.addGlobal(.none, global); try o.builder.variables.append(o.gpa, variable); @@ -7855,7 +7827,7 @@ pub const FuncGen = struct { .global = @enumFromInt(o.builder.globals.count()), }; - try o.builder.llvm_globals.append(self.gpa, f); + try o.builder.llvm.globals.append(self.gpa, f); _ = try o.builder.addGlobal(fn_name, global); try o.builder.functions.append(self.gpa, function); break :b f; @@ -9372,7 +9344,7 @@ pub const FuncGen = struct { self.builder.positionBuilderAtEnd(unnamed_block); _ = self.builder.buildRet(Builder.Constant.false.toLlvm(&o.builder)); - try o.builder.llvm_globals.append(self.gpa, fn_val); + try o.builder.llvm.globals.append(self.gpa, fn_val); _ = try o.builder.addGlobal(llvm_fn_name, global); try o.builder.functions.append(self.gpa, function); gop.value_ptr.* = global.kind.function; @@ -9484,7 +9456,7 @@ pub const FuncGen = struct { self.builder.positionBuilderAtEnd(bad_value_block); _ = self.builder.buildUnreachable(); - try o.builder.llvm_globals.append(self.gpa, fn_val); + try o.builder.llvm.globals.append(self.gpa, fn_val); gop.value_ptr.* = try o.builder.addGlobal(llvm_fn_name, global); try o.builder.functions.append(self.gpa, function); return fn_val; @@ -9515,7 +9487,7 @@ pub const FuncGen = struct { .global = @enumFromInt(o.builder.globals.count()), }; - try o.builder.llvm_globals.append(self.gpa, llvm_fn); + try o.builder.llvm.globals.append(self.gpa, llvm_fn); _ = try o.builder.addGlobal(try o.builder.string(lt_errors_fn_name), global); try o.builder.functions.append(self.gpa, function); return llvm_fn; @@ -10156,7 +10128,7 @@ pub const FuncGen = struct { .mutability = .constant, .init = undef_init, }; - try o.builder.llvm_globals.append(o.gpa, error_name_table_global); + try o.builder.llvm.globals.append(o.gpa, error_name_table_global); _ = try o.builder.addGlobal(name, global); try o.builder.variables.append(o.gpa, variable); diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 1381e5d9d0..22df52753a 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -1,12 +1,16 @@ gpa: Allocator, use_lib_llvm: bool, -llvm_context: *llvm.Context, -llvm_module: *llvm.Module, -di_builder: ?*llvm.DIBuilder = null, -llvm_types: std.ArrayListUnmanaged(*llvm.Type) = .{}, -llvm_globals: std.ArrayListUnmanaged(*llvm.Value) = .{}, -llvm_constants: std.ArrayListUnmanaged(*llvm.Value) = .{}, +llvm: if (build_options.have_llvm) struct { + context: *llvm.Context, + module: ?*llvm.Module = null, + target: ?*llvm.Target = null, + di_builder: ?*llvm.DIBuilder = null, + di_compile_unit: ?*llvm.DICompileUnit = null, + types: std.ArrayListUnmanaged(*llvm.Type) = .{}, + globals: std.ArrayListUnmanaged(*llvm.Value) = .{}, + constants: std.ArrayListUnmanaged(*llvm.Value) = .{}, +} else void, source_filename: String = .none, data_layout: String = .none, @@ -38,6 +42,14 @@ constant_limbs: std.ArrayListUnmanaged(std.math.big.Limb) = .{}, pub const expected_fields_len = 32; pub const expected_gep_indices_len = 8; +pub const Options = struct { + allocator: Allocator, + use_lib_llvm: bool = false, + name: []const u8 = &.{}, + target: std.Target = builtin.target, + triple: []const u8 = &.{}, +}; + pub const String = enum(u32) { none = std.math.maxInt(u31), empty, @@ -481,7 +493,7 @@ pub const Type = enum(u32) { pub fn toLlvm(self: Type, builder: *const Builder) *llvm.Type { assert(builder.useLibLlvm()); - return builder.llvm_types.items[@intFromEnum(self)]; + return builder.llvm.types.items[@intFromEnum(self)]; } }; @@ -751,7 +763,7 @@ pub const Global = struct { pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value { assert(builder.useLibLlvm()); - return builder.llvm_globals.items[@intFromEnum(self)]; + return builder.llvm.globals.items[@intFromEnum(self)]; } const FormatData = struct { @@ -780,9 +792,9 @@ pub const Global = struct { pub fn renameAssumeCapacity(self: Index, builder: *Builder, name: String) void { const index = @intFromEnum(self); if (builder.globals.keys()[index] == name) return; - if (builder.useLibLlvm()) builder.llvm_globals.appendAssumeCapacity(builder.llvm_globals.items[index]); + if (builder.useLibLlvm()) builder.llvm.globals.appendAssumeCapacity(builder.llvm.globals.items[index]); _ = builder.addGlobalAssumeCapacity(name, builder.globals.values()[index]); - if (builder.useLibLlvm()) _ = builder.llvm_globals.pop(); + if (builder.useLibLlvm()) _ = builder.llvm.globals.pop(); builder.globals.swapRemoveAt(index); self.updateName(builder); } @@ -802,7 +814,7 @@ pub const Global = struct { if (!builder.useLibLlvm()) return; const index = @intFromEnum(self); const slice = builder.globals.keys()[index].toSlice(builder) orelse ""; - builder.llvm_globals.items[index].setValueName2(slice.ptr, slice.len); + builder.llvm.globals.items[index].setValueName2(slice.ptr, slice.len); } }; @@ -1414,7 +1426,7 @@ pub const Constant = enum(u32) { pub fn toLlvm(self: Constant, builder: *const Builder) *llvm.Value { assert(builder.useLibLlvm()); return switch (self.unwrap()) { - .constant => |constant| builder.llvm_constants.items[constant], + .constant => |constant| builder.llvm.constants.items[constant], .global => |global| global.toLlvm(builder), }; } @@ -1436,26 +1448,70 @@ pub const Value = enum(u32) { } }; -pub fn init(self: *Builder) Allocator.Error!void { +pub const InitError = error{ + InvalidLlvmTriple, +} || Allocator.Error; + +pub fn init(options: Options) InitError!Builder { + var self = Builder{ + .gpa = options.allocator, + .use_lib_llvm = options.use_lib_llvm, + .llvm = undefined, + }; + if (self.useLibLlvm()) self.llvm = .{ .context = llvm.Context.create() }; + errdefer self.deinit(); + try self.string_indices.append(self.gpa, 0); assert(try self.string("") == .empty); + if (options.name.len > 0) self.source_filename = try self.string(options.name); + self.initializeLLVMTarget(options.target.cpu.arch); + if (self.useLibLlvm()) self.llvm.module = llvm.Module.createWithName( + (self.source_filename.toSlice(&self) orelse "").ptr, + self.llvm.context, + ); + + if (options.triple.len > 0) { + self.target_triple = try self.string(options.triple); + + if (self.useLibLlvm()) { + var error_message: [*:0]const u8 = undefined; + var target: *llvm.Target = undefined; + if (llvm.Target.getFromTriple( + self.target_triple.toSlice(&self).?.ptr, + &target, + &error_message, + ).toBool()) { + defer llvm.disposeMessage(error_message); + + log.err("LLVM failed to parse '{s}': {s}", .{ + self.target_triple.toSlice(&self).?, + error_message, + }); + return InitError.InvalidLlvmTriple; + } + self.llvm.target = target; + self.llvm.module.?.setTarget(self.target_triple.toSlice(&self).?.ptr); + } + } + { const static_len = @typeInfo(Type).Enum.fields.len - 1; try self.type_map.ensureTotalCapacity(self.gpa, static_len); try self.type_items.ensureTotalCapacity(self.gpa, static_len); - if (self.useLibLlvm()) try self.llvm_types.ensureTotalCapacity(self.gpa, static_len); + if (self.useLibLlvm()) try self.llvm.types.ensureTotalCapacity(self.gpa, static_len); inline for (@typeInfo(Type.Simple).Enum.fields) |simple_field| { const result = self.getOrPutTypeNoExtraAssumeCapacity( .{ .tag = .simple, .data = simple_field.value }, ); assert(result.new and result.type == @field(Type, simple_field.name)); - if (self.useLibLlvm()) self.llvm_types.appendAssumeCapacity( - @field(llvm.Context, simple_field.name ++ "Type")(self.llvm_context), + if (self.useLibLlvm()) self.llvm.types.appendAssumeCapacity( + @field(llvm.Context, simple_field.name ++ "Type")(self.llvm.context), ); } - inline for (.{ 1, 8, 16, 29, 32, 64, 80, 128 }) |bits| assert(self.intTypeAssumeCapacity(bits) == - @field(Type, std.fmt.comptimePrint("i{d}", .{bits}))); + inline for (.{ 1, 8, 16, 29, 32, 64, 80, 128 }) |bits| + assert(self.intTypeAssumeCapacity(bits) == + @field(Type, std.fmt.comptimePrint("i{d}", .{bits}))); inline for (.{0}) |addr_space| assert(self.ptrTypeAssumeCapacity(@enumFromInt(addr_space)) == .ptr); } @@ -1463,13 +1519,11 @@ pub fn init(self: *Builder) Allocator.Error!void { assert(try self.intConst(.i1, 0) == .false); assert(try self.intConst(.i1, 1) == .true); assert(try self.noneConst(.token) == .none); + + return self; } pub fn deinit(self: *Builder) void { - self.llvm_types.deinit(self.gpa); - self.llvm_globals.deinit(self.gpa); - self.llvm_constants.deinit(self.gpa); - self.string_map.deinit(self.gpa); self.string_bytes.deinit(self.gpa); self.string_indices.deinit(self.gpa); @@ -1492,6 +1546,14 @@ pub fn deinit(self: *Builder) void { self.constant_extra.deinit(self.gpa); self.constant_limbs.deinit(self.gpa); + if (self.useLibLlvm()) { + self.llvm.constants.deinit(self.gpa); + self.llvm.globals.deinit(self.gpa); + self.llvm.types.deinit(self.gpa); + if (self.llvm.di_builder) |di_builder| di_builder.dispose(); + if (self.llvm.module) |module| module.dispose(); + self.llvm.context.dispose(); + } self.* = undefined; } @@ -1807,7 +1869,7 @@ pub fn namedTypeSetBody( const llvm_fields = try self.gpa.alloc(*llvm.Type, body_fields.len); defer self.gpa.free(llvm_fields); for (llvm_fields, body_fields) |*llvm_field, body_field| llvm_field.* = body_field.toLlvm(self); - self.llvm_types.items[@intFromEnum(named_type)].structSetBody( + self.llvm.types.items[@intFromEnum(named_type)].structSetBody( llvm_fields.ptr, @intCast(llvm_fields.len), switch (body_item.tag) { @@ -1869,7 +1931,7 @@ pub fn bigIntConst(self: *Builder, ty: Type, value: std.math.big.int.Const) Allo try self.constant_map.ensureUnusedCapacity(self.gpa, 1); try self.constant_items.ensureUnusedCapacity(self.gpa, 1); try self.constant_limbs.ensureUnusedCapacity(self.gpa, Constant.Integer.limbs + value.limbs.len); - if (self.useLibLlvm()) try self.llvm_constants.ensureUnusedCapacity(self.gpa, 1); + if (self.useLibLlvm()) try self.llvm.constants.ensureUnusedCapacity(self.gpa, 1); return self.bigIntConstAssumeCapacity(ty, value); } @@ -2166,7 +2228,7 @@ fn isValidIdentifier(id: []const u8) bool { } fn ensureUnusedCapacityGlobal(self: *Builder, name: String) Allocator.Error!void { - if (self.useLibLlvm()) try self.llvm_globals.ensureUnusedCapacity(self.gpa, 1); + if (self.useLibLlvm()) try self.llvm.globals.ensureUnusedCapacity(self.gpa, 1); try self.string_map.ensureUnusedCapacity(self.gpa, 1); if (name.toSlice(self)) |id| try self.string_bytes.ensureUnusedCapacity(self.gpa, id.len + comptime std.fmt.count("{d}" ++ .{0}, .{std.math.maxInt(u32)})); @@ -2222,7 +2284,7 @@ fn fnTypeAssumeCapacity( const llvm_params = try self.gpa.alloc(*llvm.Type, params.len); defer self.gpa.free(llvm_params); for (llvm_params, params) |*llvm_param, param| llvm_param.* = param.toLlvm(self); - self.llvm_types.appendAssumeCapacity(llvm.functionType( + self.llvm.types.appendAssumeCapacity(llvm.functionType( ret.toLlvm(self), llvm_params.ptr, @intCast(llvm_params.len), @@ -2240,7 +2302,7 @@ fn intTypeAssumeCapacity(self: *Builder, bits: u24) Type { assert(bits > 0); const result = self.getOrPutTypeNoExtraAssumeCapacity(.{ .tag = .integer, .data = bits }); if (self.useLibLlvm() and result.new) - self.llvm_types.appendAssumeCapacity(self.llvm_context.intType(bits)); + self.llvm.types.appendAssumeCapacity(self.llvm.context.intType(bits)); return result.type; } @@ -2249,7 +2311,7 @@ fn ptrTypeAssumeCapacity(self: *Builder, addr_space: AddrSpace) Type { .{ .tag = .pointer, .data = @intFromEnum(addr_space) }, ); if (self.useLibLlvm() and result.new) - self.llvm_types.appendAssumeCapacity(self.llvm_context.pointerType(@intFromEnum(addr_space))); + self.llvm.types.appendAssumeCapacity(self.llvm.context.pointerType(@intFromEnum(addr_space))); return result.type; } @@ -2286,7 +2348,7 @@ fn vectorTypeAssumeCapacity( .tag = tag, .data = self.addTypeExtraAssumeCapacity(data), }); - if (self.useLibLlvm()) self.llvm_types.appendAssumeCapacity(switch (kind) { + if (self.useLibLlvm()) self.llvm.types.appendAssumeCapacity(switch (kind) { .normal => &llvm.Type.vectorType, .scalable => &llvm.Type.scalableVectorType, }(child.toLlvm(self), @intCast(len))); @@ -2319,7 +2381,7 @@ fn arrayTypeAssumeCapacity(self: *Builder, len: u64, child: Type) Type { .tag = .small_array, .data = self.addTypeExtraAssumeCapacity(data), }); - if (self.useLibLlvm()) self.llvm_types.appendAssumeCapacity( + if (self.useLibLlvm()) self.llvm.types.appendAssumeCapacity( child.toLlvm(self).arrayType(@intCast(len)), ); } @@ -2352,7 +2414,7 @@ fn arrayTypeAssumeCapacity(self: *Builder, len: u64, child: Type) Type { .tag = .array, .data = self.addTypeExtraAssumeCapacity(data), }); - if (self.useLibLlvm()) self.llvm_types.appendAssumeCapacity( + if (self.useLibLlvm()) self.llvm.types.appendAssumeCapacity( child.toLlvm(self).arrayType(@intCast(len)), ); } @@ -2406,7 +2468,7 @@ fn structTypeAssumeCapacity( defer allocator.free(llvm_fields); for (llvm_fields, fields) |*llvm_field, field| llvm_field.* = field.toLlvm(self); - self.llvm_types.appendAssumeCapacity(self.llvm_context.structType( + self.llvm.types.appendAssumeCapacity(self.llvm.context.structType( llvm_fields.ptr, @intCast(llvm_fields.len), switch (kind) { @@ -2456,8 +2518,8 @@ fn opaqueTypeAssumeCapacity(self: *Builder, name: String) Type { }); const result: Type = @enumFromInt(gop.index); type_gop.value_ptr.* = result; - if (self.useLibLlvm()) self.llvm_types.appendAssumeCapacity( - self.llvm_context.structCreateNamed(id.toSlice(self) orelse ""), + if (self.useLibLlvm()) self.llvm.types.appendAssumeCapacity( + self.llvm.context.structCreateNamed(id.toSlice(self) orelse ""), ); return result; } @@ -2481,7 +2543,7 @@ fn ensureUnusedTypeCapacity( self.gpa, count * (@typeInfo(E).Struct.fields.len + trail_len), ) else assert(trail_len == 0); - if (self.useLibLlvm()) try self.llvm_types.ensureUnusedCapacity(self.gpa, count); + if (self.useLibLlvm()) try self.llvm.types.ensureUnusedCapacity(self.gpa, count); } fn getOrPutTypeNoExtraAssumeCapacity(self: *Builder, item: Type.Item) struct { new: bool, type: Type } { @@ -2613,9 +2675,9 @@ fn bigIntConstAssumeCapacity( if (self.useLibLlvm()) { const llvm_type = ty.toLlvm(self); if (canonical_value.to(c_longlong)) |small| { - self.llvm_constants.appendAssumeCapacity(llvm_type.constInt(@bitCast(small), .True)); + self.llvm.constants.appendAssumeCapacity(llvm_type.constInt(@bitCast(small), .True)); } else |_| if (canonical_value.to(c_ulonglong)) |small| { - self.llvm_constants.appendAssumeCapacity(llvm_type.constInt(small, .False)); + self.llvm.constants.appendAssumeCapacity(llvm_type.constInt(small, .False)); } else |_| { const llvm_limbs = try allocator.alloc(u64, std.math.divCeil( usize, @@ -2643,7 +2705,7 @@ fn bigIntConstAssumeCapacity( } result_limb.* = llvm_limb; } - self.llvm_constants.appendAssumeCapacity( + self.llvm.constants.appendAssumeCapacity( llvm_type.constIntOfArbitraryPrecision(@intCast(llvm_limbs.len), llvm_limbs.ptr), ); } @@ -2656,7 +2718,7 @@ fn halfConstAssumeCapacity(self: *Builder, val: f16) Constant { const result = self.getOrPutConstantNoExtraAssumeCapacity( .{ .tag = .half, .data = @as(u16, @bitCast(val)) }, ); - if (self.useLibLlvm() and result.new) self.llvm_constants.appendAssumeCapacity( + if (self.useLibLlvm() and result.new) self.llvm.constants.appendAssumeCapacity( if (std.math.isSignalNan(val)) Type.i16.toLlvm(self).constInt(@as(u16, @bitCast(val)), .False) .constBitCast(Type.half.toLlvm(self)) @@ -2671,7 +2733,7 @@ fn bfloatConstAssumeCapacity(self: *Builder, val: f32) Constant { const result = self.getOrPutConstantNoExtraAssumeCapacity( .{ .tag = .bfloat, .data = @bitCast(val) }, ); - if (self.useLibLlvm() and result.new) self.llvm_constants.appendAssumeCapacity( + if (self.useLibLlvm() and result.new) self.llvm.constants.appendAssumeCapacity( if (std.math.isSignalNan(val)) Type.i16.toLlvm(self).constInt(@as(u32, @bitCast(val)) >> 16, .False) .constBitCast(Type.bfloat.toLlvm(self)) @@ -2680,7 +2742,7 @@ fn bfloatConstAssumeCapacity(self: *Builder, val: f32) Constant { ); if (self.useLibLlvm() and result.new) - self.llvm_constants.appendAssumeCapacity(Type.bfloat.toLlvm(self).constReal(val)); + self.llvm.constants.appendAssumeCapacity(Type.bfloat.toLlvm(self).constReal(val)); return result.constant; } @@ -2688,7 +2750,7 @@ fn floatConstAssumeCapacity(self: *Builder, val: f32) Constant { const result = self.getOrPutConstantNoExtraAssumeCapacity( .{ .tag = .float, .data = @bitCast(val) }, ); - if (self.useLibLlvm() and result.new) self.llvm_constants.appendAssumeCapacity( + if (self.useLibLlvm() and result.new) self.llvm.constants.appendAssumeCapacity( if (std.math.isSignalNan(val)) Type.i32.toLlvm(self).constInt(@as(u32, @bitCast(val)), .False) .constBitCast(Type.float.toLlvm(self)) @@ -2725,7 +2787,7 @@ fn doubleConstAssumeCapacity(self: *Builder, val: f64) Constant { .hi = @truncate(@as(u64, @bitCast(val))), }), }); - if (self.useLibLlvm()) self.llvm_constants.appendAssumeCapacity( + if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity( if (std.math.isSignalNan(val)) Type.i64.toLlvm(self).constInt(@as(u64, @bitCast(val)), .False) .constBitCast(Type.double.toLlvm(self)) @@ -2771,7 +2833,7 @@ fn fp128ConstAssumeCapacity(self: *Builder, val: f128) Constant { @truncate(@as(u128, @bitCast(val))), @intCast(@as(u128, @bitCast(val)) >> 64), }; - self.llvm_constants.appendAssumeCapacity( + self.llvm.constants.appendAssumeCapacity( Type.i128.toLlvm(self) .constIntOfArbitraryPrecision(@intCast(llvm_limbs.len), &llvm_limbs) .constBitCast(Type.fp128.toLlvm(self)), @@ -2815,7 +2877,7 @@ fn x86_fp80ConstAssumeCapacity(self: *Builder, val: f80) Constant { @truncate(@as(u80, @bitCast(val))), @intCast(@as(u80, @bitCast(val)) >> 64), }; - self.llvm_constants.appendAssumeCapacity( + self.llvm.constants.appendAssumeCapacity( Type.i80.toLlvm(self) .constIntOfArbitraryPrecision(@intCast(llvm_limbs.len), &llvm_limbs) .constBitCast(Type.x86_fp80.toLlvm(self)), @@ -2857,7 +2919,7 @@ fn ppc_fp128ConstAssumeCapacity(self: *Builder, val: [2]f64) Constant { }); if (self.useLibLlvm()) { const llvm_limbs: *const [2]u64 = @ptrCast(&val); - self.llvm_constants.appendAssumeCapacity( + self.llvm.constants.appendAssumeCapacity( Type.i128.toLlvm(self) .constIntOfArbitraryPrecision(@intCast(llvm_limbs.len), llvm_limbs) .constBitCast(Type.ppc_fp128.toLlvm(self)), @@ -2873,7 +2935,7 @@ fn nullConstAssumeCapacity(self: *Builder, ty: Type) Constant { .{ .tag = .null, .data = @intFromEnum(ty) }, ); if (self.useLibLlvm() and result.new) - self.llvm_constants.appendAssumeCapacity(ty.toLlvm(self).constNull()); + self.llvm.constants.appendAssumeCapacity(ty.toLlvm(self).constNull()); return result.constant; } @@ -2883,7 +2945,7 @@ fn noneConstAssumeCapacity(self: *Builder, ty: Type) Constant { .{ .tag = .none, .data = @intFromEnum(ty) }, ); if (self.useLibLlvm() and result.new) - self.llvm_constants.appendAssumeCapacity(ty.toLlvm(self).constNull()); + self.llvm.constants.appendAssumeCapacity(ty.toLlvm(self).constNull()); return result.constant; } @@ -2929,7 +2991,7 @@ fn structConstAssumeCapacity( defer allocator.free(llvm_vals); for (llvm_vals, vals) |*llvm_val, val| llvm_val.* = val.toLlvm(self); - self.llvm_constants.appendAssumeCapacity( + self.llvm.constants.appendAssumeCapacity( ty.toLlvm(self).constNamedStruct(llvm_vals.ptr, @intCast(llvm_vals.len)), ); } @@ -2971,7 +3033,7 @@ fn arrayConstAssumeCapacity( defer allocator.free(llvm_vals); for (llvm_vals, vals) |*llvm_val, val| llvm_val.* = val.toLlvm(self); - self.llvm_constants.appendAssumeCapacity( + self.llvm.constants.appendAssumeCapacity( type_extra.child.toLlvm(self).constArray(llvm_vals.ptr, @intCast(llvm_vals.len)), ); } @@ -2985,8 +3047,8 @@ fn stringConstAssumeCapacity(self: *Builder, val: String) Constant { const result = self.getOrPutConstantNoExtraAssumeCapacity( .{ .tag = .string, .data = @intFromEnum(val) }, ); - if (self.useLibLlvm() and result.new) self.llvm_constants.appendAssumeCapacity( - self.llvm_context.constString(slice.ptr, @intCast(slice.len), .True), + if (self.useLibLlvm() and result.new) self.llvm.constants.appendAssumeCapacity( + self.llvm.context.constString(slice.ptr, @intCast(slice.len), .True), ); return result.constant; } @@ -2998,8 +3060,8 @@ fn stringNullConstAssumeCapacity(self: *Builder, val: String) Constant { const result = self.getOrPutConstantNoExtraAssumeCapacity( .{ .tag = .string_null, .data = @intFromEnum(val) }, ); - if (self.useLibLlvm() and result.new) self.llvm_constants.appendAssumeCapacity( - self.llvm_context.constString(slice.ptr, @intCast(slice.len + 1), .True), + if (self.useLibLlvm() and result.new) self.llvm.constants.appendAssumeCapacity( + self.llvm.context.constString(slice.ptr, @intCast(slice.len + 1), .True), ); return result.constant; } @@ -3032,7 +3094,7 @@ fn vectorConstAssumeCapacity( defer allocator.free(llvm_vals); for (llvm_vals, vals) |*llvm_val, val| llvm_val.* = val.toLlvm(self); - self.llvm_constants.appendAssumeCapacity( + self.llvm.constants.appendAssumeCapacity( llvm.constVector(llvm_vals.ptr, @intCast(llvm_vals.len)), ); } @@ -3061,7 +3123,7 @@ fn zeroInitConstAssumeCapacity(self: *Builder, ty: Type) Constant { .{ .tag = .zeroinitializer, .data = @intFromEnum(ty) }, ); if (self.useLibLlvm() and result.new) - self.llvm_constants.appendAssumeCapacity(ty.toLlvm(self).constNull()); + self.llvm.constants.appendAssumeCapacity(ty.toLlvm(self).constNull()); return result.constant; } @@ -3078,7 +3140,7 @@ fn undefConstAssumeCapacity(self: *Builder, ty: Type) Constant { .{ .tag = .undef, .data = @intFromEnum(ty) }, ); if (self.useLibLlvm() and result.new) - self.llvm_constants.appendAssumeCapacity(ty.toLlvm(self).getUndef()); + self.llvm.constants.appendAssumeCapacity(ty.toLlvm(self).getUndef()); return result.constant; } @@ -3095,7 +3157,7 @@ fn poisonConstAssumeCapacity(self: *Builder, ty: Type) Constant { .{ .tag = .poison, .data = @intFromEnum(ty) }, ); if (self.useLibLlvm() and result.new) - self.llvm_constants.appendAssumeCapacity(ty.toLlvm(self).getUndef()); + self.llvm.constants.appendAssumeCapacity(ty.toLlvm(self).getUndef()); return result.constant; } @@ -3128,7 +3190,7 @@ fn blockAddrConstAssumeCapacity( .tag = .blockaddress, .data = self.addConstantExtraAssumeCapacity(data), }); - if (self.useLibLlvm()) self.llvm_constants.appendAssumeCapacity( + if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity( function.toLlvm(self).blockAddress(block.toValue(self, function).toLlvm(self, function)), ); } @@ -3139,7 +3201,7 @@ fn dsoLocalEquivalentConstAssumeCapacity(self: *Builder, function: Function.Inde const result = self.getOrPutConstantNoExtraAssumeCapacity( .{ .tag = .dso_local_equivalent, .data = @intFromEnum(function) }, ); - if (self.useLibLlvm() and result.new) self.llvm_constants.appendAssumeCapacity(undefined); + if (self.useLibLlvm() and result.new) self.llvm.constants.appendAssumeCapacity(undefined); return result.constant; } @@ -3147,7 +3209,7 @@ fn noCfiConstAssumeCapacity(self: *Builder, function: Function.Index) Constant { const result = self.getOrPutConstantNoExtraAssumeCapacity( .{ .tag = .no_cfi, .data = @intFromEnum(function) }, ); - if (self.useLibLlvm() and result.new) self.llvm_constants.appendAssumeCapacity(undefined); + if (self.useLibLlvm() and result.new) self.llvm.constants.appendAssumeCapacity(undefined); return result.constant; } @@ -3226,7 +3288,7 @@ fn castConstAssumeCapacity(self: *Builder, tag: Constant.Tag, arg: Constant, ty: .tag = tag, .data = self.addConstantExtraAssumeCapacity(data.cast), }); - if (self.useLibLlvm()) self.llvm_constants.appendAssumeCapacity(switch (tag) { + if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity(switch (tag) { .trunc => &llvm.Value.constTrunc, .zext => &llvm.Value.constZExt, .sext => &llvm.Value.constSExt, @@ -3330,7 +3392,7 @@ fn gepConstAssumeCapacity( defer allocator.free(llvm_indices); for (llvm_indices, indices) |*llvm_index, index| llvm_index.* = index.toLlvm(self); - self.llvm_constants.appendAssumeCapacity(switch (kind) { + self.llvm.constants.appendAssumeCapacity(switch (kind) { .normal => &llvm.Type.constGEP, .inbounds => &llvm.Type.constInBoundsGEP, }(ty.toLlvm(self), base.toLlvm(self), llvm_indices.ptr, @intCast(indices.len))); @@ -3374,7 +3436,7 @@ fn binConstAssumeCapacity( .tag = tag, .data = self.addConstantExtraAssumeCapacity(data.bin), }); - if (self.useLibLlvm()) self.llvm_constants.appendAssumeCapacity(switch (tag) { + if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity(switch (tag) { .add => &llvm.Value.constAdd, .sub => &llvm.Value.constSub, .mul => &llvm.Value.constMul, @@ -3402,7 +3464,7 @@ fn ensureUnusedConstantCapacity( self.gpa, count * (@typeInfo(E).Struct.fields.len + trail_len), ) else assert(trail_len == 0); - if (self.useLibLlvm()) try self.llvm_constants.ensureUnusedCapacity(self.gpa, count); + if (self.useLibLlvm()) try self.llvm.constants.ensureUnusedCapacity(self.gpa, count); } fn getOrPutConstantNoExtraAssumeCapacity( @@ -3516,13 +3578,15 @@ fn constantExtraData(self: *const Builder, comptime T: type, index: Constant.Ite return self.constantExtraDataTrail(T, index).data; } -inline fn useLibLlvm(self: *const Builder) bool { +pub inline fn useLibLlvm(self: *const Builder) bool { return build_options.have_llvm and self.use_lib_llvm; } const assert = std.debug.assert; const build_options = @import("build_options"); +const builtin = @import("builtin"); const llvm = @import("bindings.zig"); +const log = std.log.scoped(.llvm); const std = @import("std"); const Allocator = std.mem.Allocator; -- cgit v1.2.3 From 1f8407c356b5cdc9eca0f2403a85d863744cf279 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 11 Jul 2023 02:14:17 -0400 Subject: llvm: cleanup management and implement more const functions --- src/codegen/llvm.zig | 64 ++--- src/codegen/llvm/Builder.zig | 579 +++++++++++++++++++++++++++++++++++++----- src/codegen/llvm/bindings.zig | 126 ++++++--- 3 files changed, 629 insertions(+), 140 deletions(-) (limited to 'src') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index c34e241fa4..704b93cfa4 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -533,15 +533,6 @@ const DataLayoutBuilder = struct { } }; -/// TODO can this be done with simpler logic / different API binding? -fn deleteLlvmGlobal(llvm_global: *llvm.Value) void { - if (llvm_global.globalGetValueType().getTypeKind() == .Function) { - llvm_global.deleteFunction(); - return; - } - return llvm_global.deleteGlobal(); -} - pub const Object = struct { gpa: Allocator, builder: Builder, @@ -818,7 +809,7 @@ pub const Object = struct { .init = str_init, }; try o.builder.llvm.globals.append(o.gpa, str_global); - const str_global_index = try o.builder.addGlobal(.none, global); + const str_global_index = try o.builder.addGlobal(.empty, global); try o.builder.variables.append(o.gpa, variable); llvm_error.* = try o.builder.structConst(llvm_slice_ty, &.{ @@ -848,7 +839,7 @@ pub const Object = struct { .init = error_name_table_init, }; try o.builder.llvm.globals.append(o.gpa, error_name_table_global); - _ = try o.builder.addGlobal(.none, global); + _ = try o.builder.addGlobal(.empty, global); try o.builder.variables.append(o.gpa, variable); const error_name_table_ptr = error_name_table_global; @@ -904,35 +895,27 @@ pub const Object = struct { // This map has externs with incorrect symbol names. for (object.extern_collisions.keys()) |decl_index| { const global = object.decl_map.get(decl_index) orelse continue; - const llvm_global = global.toLlvm(&object.builder); // Same logic as below but for externs instead of exports. const decl_name = object.builder.stringIfExists(mod.intern_pool.stringToSlice(mod.declPtr(decl_index).name)) orelse continue; const other_global = object.builder.getGlobal(decl_name) orelse continue; - const other_llvm_global = other_global.toLlvm(&object.builder); - if (other_llvm_global == llvm_global) continue; + if (other_global.eql(global, &object.builder)) continue; - llvm_global.replaceAllUsesWith(other_llvm_global); - deleteLlvmGlobal(llvm_global); - object.builder.llvm.globals.items[@intFromEnum(global)] = other_llvm_global; + try global.replace(other_global, &object.builder); } object.extern_collisions.clearRetainingCapacity(); for (mod.decl_exports.keys(), mod.decl_exports.values()) |decl_index, export_list| { const global = object.decl_map.get(decl_index) orelse continue; - const llvm_global = global.toLlvm(&object.builder); for (export_list.items) |exp| { // Detect if the LLVM global has already been created as an extern. In such // case, we need to replace all uses of it with this exported global. const exp_name = object.builder.stringIfExists(mod.intern_pool.stringToSlice(exp.opts.name)) orelse continue; const other_global = object.builder.getGlobal(exp_name) orelse continue; - const other_llvm_global = other_global.toLlvm(&object.builder); - if (other_llvm_global == llvm_global) continue; + if (other_global.eql(global, &object.builder)) continue; - other_llvm_global.replaceAllUsesWith(llvm_global); - try global.takeName(&object.builder, other_global); - deleteLlvmGlobal(other_llvm_global); - object.builder.llvm.globals.items[@intFromEnum(other_global)] = llvm_global; + try global.takeName(other_global, &object.builder); + try other_global.replace(global, &object.builder); // Problem: now we need to replace in the decl_map that // the extern decl index points to this new global. However we don't // know the decl index. @@ -1519,7 +1502,7 @@ pub const Object = struct { } } - try global.rename(&self.builder, decl_name); + try global.rename(decl_name, &self.builder); global.ptr(&self.builder).unnamed_addr = .default; llvm_global.setUnnamedAddr(.False); global.ptr(&self.builder).linkage = .external; @@ -1558,7 +1541,7 @@ pub const Object = struct { global.ptr(&self.builder).updateAttributes(); } else if (exports.len != 0) { const exp_name = try self.builder.string(mod.intern_pool.stringToSlice(exports[0].opts.name)); - try global.rename(&self.builder, exp_name); + try global.rename(exp_name, &self.builder); global.ptr(&self.builder).unnamed_addr = .default; llvm_global.setUnnamedAddr(.False); if (mod.wantDllExports()) { @@ -1641,7 +1624,7 @@ pub const Object = struct { } } else { const fqn = try self.builder.string(mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod))); - try global.rename(&self.builder, fqn); + try global.rename(fqn, &self.builder); global.ptr(&self.builder).linkage = .internal; llvm_global.setLinkage(.Internal); if (mod.wantDllExports()) { @@ -2738,7 +2721,7 @@ pub const Object = struct { .init = llvm_init, }; try o.builder.llvm.globals.append(o.gpa, llvm_global); - _ = try o.builder.addGlobal(.none, global); + _ = try o.builder.addGlobal(.empty, global); try o.builder.variables.append(o.gpa, variable); const addrspace_casted_global = if (llvm_wanted_addrspace != llvm_actual_addrspace) @@ -4473,8 +4456,8 @@ pub const DeclGen = struct { _ = try o.resolveLlvmFunction(extern_func.decl); } else { const target = mod.getTarget(); - const object = try o.resolveGlobalDecl(decl_index); - const global = object.ptrConst(&o.builder).global; + const variable = try o.resolveGlobalDecl(decl_index); + const global = variable.ptrConst(&o.builder).global; var llvm_global = global.toLlvm(&o.builder); global.ptr(&o.builder).alignment = Builder.Alignment.fromByteUnits(decl.getAlignment(mod)); llvm_global.setAlignment(decl.getAlignment(mod)); @@ -4483,18 +4466,18 @@ pub const DeclGen = struct { llvm_global.setSection(section); } assert(decl.has_tv); - const init_val = if (decl.val.getVariable(mod)) |decl_var| init_val: { - object.ptr(&o.builder).mutability = .global; - break :init_val decl_var.init; - } else init_val: { - object.ptr(&o.builder).mutability = .constant; + const init_val = if (decl.val.getVariable(mod)) |decl_var| decl_var.init else init_val: { + variable.ptr(&o.builder).mutability = .constant; llvm_global.setGlobalConstant(.True); break :init_val decl.val.toIntern(); }; if (init_val != .none) { const llvm_init = try o.lowerValue(init_val); + const llvm_init_ty = llvm_init.typeOf(&o.builder); + global.ptr(&o.builder).type = llvm_init_ty; + variable.ptr(&o.builder).mutability = .global; + variable.ptr(&o.builder).init = llvm_init; if (llvm_global.globalGetValueType() == llvm_init.typeOf(&o.builder).toLlvm(&o.builder)) { - object.ptr(&o.builder).init = llvm_init; llvm_global.setInitializer(llvm_init.toLlvm(&o.builder)); } else { // LLVM does not allow us to change the type of globals. So we must @@ -4512,7 +4495,7 @@ pub const DeclGen = struct { // Related: https://github.com/ziglang/zig/issues/13265 const llvm_global_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); const new_global = o.llvm_module.addGlobalInAddressSpace( - llvm_init.typeOf(&o.builder).toLlvm(&o.builder), + llvm_init_ty.toLlvm(&o.builder), "", @intFromEnum(llvm_global_addrspace), ); @@ -4525,7 +4508,7 @@ pub const DeclGen = struct { // TODO: How should this work then the address space of a global changed? llvm_global.replaceAllUsesWith(new_global); new_global.takeName(llvm_global); - o.builder.llvm.globals.items[@intFromEnum(object.ptrConst(&o.builder).global)] = + o.builder.llvm.globals.items[@intFromEnum(variable.ptrConst(&o.builder).global)] = new_global; llvm_global.deleteGlobal(); llvm_global = new_global; @@ -4672,7 +4655,7 @@ pub const FuncGen = struct { .init = llvm_val, }; try o.builder.llvm.globals.append(o.gpa, llvm_global); - _ = try o.builder.addGlobal(.none, global); + _ = try o.builder.addGlobal(.empty, global); try o.builder.variables.append(o.gpa, variable); const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace) @@ -9312,6 +9295,7 @@ pub const FuncGen = struct { }; var function = Builder.Function{ .global = @enumFromInt(o.builder.globals.count()), + .body = {}, }; const prev_block = self.builder.getInsertBlock(); @@ -9395,6 +9379,7 @@ pub const FuncGen = struct { }; var function = Builder.Function{ .global = @enumFromInt(o.builder.globals.count()), + .body = {}, }; const prev_block = self.builder.getInsertBlock(); @@ -9485,6 +9470,7 @@ pub const FuncGen = struct { }; var function = Builder.Function{ .global = @enumFromInt(o.builder.globals.count()), + .body = {}, }; try o.builder.llvm.globals.append(self.gpa, llvm_fn); diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 22df52753a..2ed8633f59 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -29,6 +29,7 @@ type_extra: std.ArrayListUnmanaged(u32) = .{}, globals: std.AutoArrayHashMapUnmanaged(String, Global) = .{}, next_unnamed_global: String = @enumFromInt(0), +next_replaced_global: String = .none, next_unique_global_id: std.AutoHashMapUnmanaged(String, u32) = .{}, aliases: std.ArrayListUnmanaged(Alias) = .{}, variables: std.ArrayListUnmanaged(Variable) = .{}, @@ -55,6 +56,11 @@ pub const String = enum(u32) { empty, _, + pub fn isAnon(self: String) bool { + assert(self != .none); + return self.toIndex() == null; + } + pub fn toSlice(self: String, b: *const Builder) ?[:0]const u8 { const index = self.toIndex() orelse return null; const start = b.string_indices.items[index]; @@ -743,18 +749,36 @@ pub const Global = struct { alias: Alias.Index, variable: Variable.Index, function: Function.Index, + replaced: Global.Index, }, pub const Index = enum(u32) { none = std.math.maxInt(u32), _, + pub fn unwrap(self: Index, builder: *const Builder) Index { + var cur = self; + while (true) { + const replacement = cur.getReplacement(builder); + if (replacement == .none) return cur; + cur = replacement; + } + } + + pub fn eql(self: Index, other: Index, builder: *const Builder) bool { + return self.unwrap(builder) == other.unwrap(builder); + } + + pub fn name(self: Index, builder: *const Builder) String { + return builder.globals.keys()[@intFromEnum(self.unwrap(builder))]; + } + pub fn ptr(self: Index, builder: *Builder) *Global { - return &builder.globals.values()[@intFromEnum(self)]; + return &builder.globals.values()[@intFromEnum(self.unwrap(builder))]; } pub fn ptrConst(self: Index, builder: *const Builder) *const Global { - return &builder.globals.values()[@intFromEnum(self)]; + return &builder.globals.values()[@intFromEnum(self.unwrap(builder))]; } pub fn toConst(self: Index) Constant { @@ -763,7 +787,7 @@ pub const Global = struct { pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value { assert(builder.useLibLlvm()); - return builder.llvm.globals.items[@intFromEnum(self)]; + return builder.llvm.globals.items[@intFromEnum(self.unwrap(builder))]; } const FormatData = struct { @@ -777,44 +801,80 @@ pub const Global = struct { writer: anytype, ) @TypeOf(writer).Error!void { try writer.print("@{}", .{ - data.builder.globals.keys()[@intFromEnum(data.global)].fmt(data.builder), + data.global.unwrap(data.builder).name(data.builder).fmt(data.builder), }); } pub fn fmt(self: Index, builder: *const Builder) std.fmt.Formatter(format) { return .{ .data = .{ .global = self, .builder = builder } }; } - pub fn rename(self: Index, builder: *Builder, name: String) Allocator.Error!void { - try builder.ensureUnusedCapacityGlobal(name); - self.renameAssumeCapacity(builder, name); + pub fn rename(self: Index, new_name: String, builder: *Builder) Allocator.Error!void { + try builder.ensureUnusedCapacityGlobal(new_name); + self.renameAssumeCapacity(new_name, builder); } - pub fn renameAssumeCapacity(self: Index, builder: *Builder, name: String) void { - const index = @intFromEnum(self); - if (builder.globals.keys()[index] == name) return; - if (builder.useLibLlvm()) builder.llvm.globals.appendAssumeCapacity(builder.llvm.globals.items[index]); - _ = builder.addGlobalAssumeCapacity(name, builder.globals.values()[index]); - if (builder.useLibLlvm()) _ = builder.llvm.globals.pop(); - builder.globals.swapRemoveAt(index); - self.updateName(builder); + pub fn takeName(self: Index, other: Index, builder: *Builder) Allocator.Error!void { + try builder.ensureUnusedCapacityGlobal(.empty); + self.takeNameAssumeCapacity(other, builder); } - pub fn takeName(self: Index, builder: *Builder, other: Index) Allocator.Error!void { + pub fn replace(self: Index, other: Index, builder: *Builder) Allocator.Error!void { try builder.ensureUnusedCapacityGlobal(.empty); - self.takeNameAssumeCapacity(builder, other); + self.replaceAssumeCapacity(other, builder); + } + + fn renameAssumeCapacity(self: Index, new_name: String, builder: *Builder) void { + const old_name = self.name(builder); + if (new_name == old_name) return; + const index = @intFromEnum(self.unwrap(builder)); + if (builder.useLibLlvm()) + builder.llvm.globals.appendAssumeCapacity(builder.llvm.globals.items[index]); + _ = builder.addGlobalAssumeCapacity(new_name, builder.globals.values()[index]); + if (builder.useLibLlvm()) _ = builder.llvm.globals.pop(); + builder.globals.swapRemoveAt(index); + self.updateName(builder); + if (!old_name.isAnon()) return; + builder.next_unnamed_global = @enumFromInt(@intFromEnum(builder.next_unnamed_global) - 1); + if (builder.next_unnamed_global == old_name) return; + builder.getGlobal(builder.next_unnamed_global).?.renameAssumeCapacity(old_name, builder); } - pub fn takeNameAssumeCapacity(self: Index, builder: *Builder, other: Index) void { - const other_name = builder.globals.keys()[@intFromEnum(other)]; - other.renameAssumeCapacity(builder, .none); - self.renameAssumeCapacity(builder, other_name); + fn takeNameAssumeCapacity(self: Index, other: Index, builder: *Builder) void { + const other_name = other.name(builder); + other.renameAssumeCapacity(.empty, builder); + self.renameAssumeCapacity(other_name, builder); } fn updateName(self: Index, builder: *const Builder) void { if (!builder.useLibLlvm()) return; - const index = @intFromEnum(self); - const slice = builder.globals.keys()[index].toSlice(builder) orelse ""; - builder.llvm.globals.items[index].setValueName2(slice.ptr, slice.len); + const index = @intFromEnum(self.unwrap(builder)); + const name_slice = self.name(builder).toSlice(builder) orelse ""; + builder.llvm.globals.items[index].setValueName2(name_slice.ptr, name_slice.len); + } + + fn replaceAssumeCapacity(self: Index, other: Index, builder: *Builder) void { + if (self.eql(other, builder)) return; + builder.next_replaced_global = @enumFromInt(@intFromEnum(builder.next_replaced_global) - 1); + self.renameAssumeCapacity(builder.next_replaced_global, builder); + if (builder.useLibLlvm()) { + const self_llvm = self.toLlvm(builder); + self_llvm.replaceAllUsesWith(other.toLlvm(builder)); + switch (self.ptr(builder).kind) { + .alias, + .variable, + => self_llvm.deleteGlobal(), + .function => self_llvm.deleteFunction(), + .replaced => unreachable, + } + } + self.ptr(builder).kind = .{ .replaced = other.unwrap(builder) }; + } + + fn getReplacement(self: Index, builder: *const Builder) Index { + return switch (builder.globals.values()[@intFromEnum(self)].kind) { + .replaced => |replacement| replacement, + else => .none, + }; } }; @@ -1014,8 +1074,14 @@ pub const Constant = enum(u32) { insertelement, shufflevector, add, + @"add nsw", + @"add nuw", sub, + @"sub nsw", + @"sub nuw", mul, + @"mul nsw", + @"mul nuw", shl, lshr, ashr, @@ -1084,24 +1150,24 @@ pub const Constant = enum(u32) { pub const Kind = enum { normal, inbounds }; }; - pub const Compare = struct { + pub const Compare = extern struct { cond: u32, lhs: Constant, rhs: Constant, }; - pub const ExtractElement = struct { + pub const ExtractElement = extern struct { arg: Constant, index: Constant, }; - pub const InsertElement = struct { + pub const InsertElement = extern struct { arg: Constant, elem: Constant, index: Constant, }; - pub const ShuffleVector = struct { + pub const ShuffleVector = extern struct { lhs: Constant, rhs: Constant, mask: Constant, @@ -1243,8 +1309,14 @@ pub const Constant = enum(u32) { }; }, .add, + .@"add nsw", + .@"add nuw", .sub, + .@"sub nsw", + .@"sub nuw", .mul, + .@"mul nsw", + .@"mul nuw", .shl, .lshr, .ashr, @@ -1326,14 +1398,14 @@ pub const Constant = enum(u32) { switch (item.tag) { .positive_integer, .negative_integer, - => { + => |tag| { const extra: *align(@alignOf(std.math.big.Limb)) Integer = @ptrCast(data.builder.constant_limbs.items[item.data..][0..Integer.limbs]); const limbs = data.builder.constant_limbs .items[item.data + Integer.limbs ..][0..extra.limbs_len]; const bigint = std.math.big.int.Const{ .limbs = limbs, - .positive = item.tag == .positive_integer, + .positive = tag == .positive_integer, }; const ExpectedContents = extern struct { string: [(64 * 8 / std.math.log2(10)) + 2]u8, @@ -1352,23 +1424,63 @@ pub const Constant = enum(u32) { defer allocator.free(str); try writer.writeAll(str); }, + .half, + .bfloat, + => |tag| try writer.print("0x{c}{X:0>4}", .{ @as(u8, switch (tag) { + .half => 'H', + .bfloat => 'R', + else => unreachable, + }), item.data >> switch (tag) { + .half => 0, + .bfloat => 16, + else => unreachable, + } }), + .float => try writer.print("0x{X:0>16}", .{ + @as(u64, @bitCast(@as(f64, @as(f32, @bitCast(item.data))))), + }), + .double => { + const extra = data.builder.constantExtraData(Double, item.data); + try writer.print("0x{X:0>8}{X:0>8}", .{ extra.hi, extra.lo }); + }, + .fp128, + .ppc_fp128, + => |tag| { + const extra = data.builder.constantExtraData(Fp128, item.data); + try writer.print("0x{c}{X:0>8}{X:0>8}{X:0>8}{X:0>8}", .{ + @as(u8, switch (tag) { + .fp128 => 'L', + .ppc_fp128 => 'M', + else => unreachable, + }), + extra.lo_hi, + extra.lo_lo, + extra.hi_hi, + extra.hi_lo, + }); + }, + .x86_fp80 => { + const extra = data.builder.constantExtraData(Fp80, item.data); + try writer.print("0xK{X:0>4}{X:0>8}{X:0>8}", .{ + extra.hi, extra.lo_hi, extra.lo_lo, + }); + }, .null, .none, .zeroinitializer, .undef, .poison, - => try writer.writeAll(@tagName(item.tag)), + => |tag| try writer.writeAll(@tagName(tag)), .structure, .packed_structure, .array, .vector, - => { + => |tag| { const extra = data.builder.constantExtraDataTrail(Aggregate, item.data); const len = extra.data.type.aggregateLen(data.builder); const vals: []const Constant = @ptrCast(data.builder.constant_extra.items[extra.end..][0..len]); - try writer.writeAll(switch (item.tag) { + try writer.writeAll(switch (tag) { .structure => "{ ", .packed_structure => "<{ ", .array => "[", @@ -1379,7 +1491,7 @@ pub const Constant = enum(u32) { if (index > 0) try writer.writeAll(", "); try writer.print("{%}", .{val.fmt(data.builder)}); } - try writer.writeAll(switch (item.tag) { + try writer.writeAll(switch (tag) { .structure => " }", .packed_structure => " }>", .array => "]", @@ -1387,33 +1499,130 @@ pub const Constant = enum(u32) { else => unreachable, }); }, - .string => try writer.print( - \\c{"} - , .{@as(String, @enumFromInt(item.data)).fmt(data.builder)}), - .string_null => try writer.print( - \\c{"@} - , .{@as(String, @enumFromInt(item.data)).fmt(data.builder)}), - .blockaddress => { + inline .string, + .string_null, + => |tag| try writer.print("c{\"" ++ switch (tag) { + .string => "", + .string_null => "@", + else => unreachable, + } ++ "}", .{@as(String, @enumFromInt(item.data)).fmt(data.builder)}), + .blockaddress => |tag| { const extra = data.builder.constantExtraData(BlockAddress, item.data); const function = extra.function.ptrConst(data.builder); try writer.print("{s}({}, %{d})", .{ - @tagName(item.tag), + @tagName(tag), function.global.fmt(data.builder), @intFromEnum(extra.block), // TODO }); }, .dso_local_equivalent, .no_cfi, - => { + => |tag| { const extra = data.builder.constantExtraData(FunctionReference, item.data); try writer.print("{s} {}", .{ - @tagName(item.tag), + @tagName(tag), extra.function.ptrConst(data.builder).global.fmt(data.builder), }); }, - else => try writer.print("<{s}:0x{X}>", .{ - @tagName(item.tag), @intFromEnum(data.constant), - }), + .trunc, + .zext, + .sext, + .fptrunc, + .fpext, + .fptoui, + .fptosi, + .uitofp, + .sitofp, + .ptrtoint, + .inttoptr, + .bitcast, + .addrspacecast, + => |tag| { + const extra = data.builder.constantExtraData(Cast, item.data); + try writer.print("{s} ({%} to {%})", .{ + @tagName(tag), + extra.arg.fmt(data.builder), + extra.type.fmt(data.builder), + }); + }, + .getelementptr, + .@"getelementptr inbounds", + => |tag| { + const extra = data.builder.constantExtraDataTrail(GetElementPtr, item.data); + const indices: []const Constant = @ptrCast(data.builder.constant_extra + .items[extra.end..][0..extra.data.indices_len]); + try writer.print("{s} ({%}, {%}", .{ + @tagName(tag), + extra.data.type.fmt(data.builder), + extra.data.base.fmt(data.builder), + }); + for (indices) |index| try writer.print(", {%}", .{index.fmt(data.builder)}); + try writer.writeByte(')'); + }, + inline .icmp, + .fcmp, + => |tag| { + const extra = data.builder.constantExtraData(Compare, item.data); + try writer.print("{s} {s} ({%}, {%})", .{ + @tagName(tag), + @tagName(@as(switch (tag) { + .icmp => IntegerCondition, + .fcmp => FloatCondition, + else => unreachable, + }, @enumFromInt(extra.cond))), + extra.lhs.fmt(data.builder), + extra.rhs.fmt(data.builder), + }); + }, + .extractelement => |tag| { + const extra = data.builder.constantExtraData(ExtractElement, item.data); + try writer.print("{s} ({%}, {%})", .{ + @tagName(tag), + extra.arg.fmt(data.builder), + extra.index.fmt(data.builder), + }); + }, + .insertelement => |tag| { + const extra = data.builder.constantExtraData(InsertElement, item.data); + try writer.print("{s} ({%}, {%}, {%})", .{ + @tagName(tag), + extra.arg.fmt(data.builder), + extra.elem.fmt(data.builder), + extra.index.fmt(data.builder), + }); + }, + .shufflevector => |tag| { + const extra = data.builder.constantExtraData(ShuffleVector, item.data); + try writer.print("{s} ({%}, {%}, {%})", .{ + @tagName(tag), + extra.lhs.fmt(data.builder), + extra.rhs.fmt(data.builder), + extra.mask.fmt(data.builder), + }); + }, + .add, + .@"add nsw", + .@"add nuw", + .sub, + .@"sub nsw", + .@"sub nuw", + .mul, + .@"mul nsw", + .@"mul nuw", + .shl, + .lshr, + .ashr, + .@"and", + .@"or", + .xor, + => |tag| { + const extra = data.builder.constantExtraData(Binary, item.data); + try writer.print("{s} ({%}, {%})", .{ + @tagName(tag), + extra.lhs.fmt(data.builder), + extra.rhs.fmt(data.builder), + }); + }, } }, .global => |global| try writer.print("{}", .{global.fmt(data.builder)}), @@ -1882,6 +2091,7 @@ pub fn namedTypeSetBody( } pub fn addGlobal(self: *Builder, name: String, global: Global) Allocator.Error!Global.Index { + assert(!name.isAnon()); try self.ensureUnusedTypeCapacity(1, null, 0); try self.ensureUnusedCapacityGlobal(name); return self.addGlobalAssumeCapacity(name, global); @@ -1890,9 +2100,10 @@ pub fn addGlobal(self: *Builder, name: String, global: Global) Allocator.Error!G pub fn addGlobalAssumeCapacity(self: *Builder, name: String, global: Global) Global.Index { _ = self.ptrTypeAssumeCapacity(global.addr_space); var id = name; - if (id == .none) { + if (name == .empty) { id = self.next_unnamed_global; - self.next_unnamed_global = @enumFromInt(@intFromEnum(self.next_unnamed_global) + 1); + assert(id != self.next_replaced_global); + self.next_unnamed_global = @enumFromInt(@intFromEnum(id) + 1); } while (true) { const global_gop = self.globals.getOrPutAssumeCapacity(id); @@ -2136,7 +2347,7 @@ pub fn binConst( return self.binConstAssumeCapacity(tag, lhs, rhs); } -pub fn dump(self: *Builder, writer: anytype) @TypeOf(writer).Error!void { +pub fn dump(self: *Builder, writer: anytype) (@TypeOf(writer).Error || Allocator.Error)!void { if (self.source_filename != .none) try writer.print( \\; ModuleID = '{s}' \\source_filename = {"} @@ -2157,7 +2368,8 @@ pub fn dump(self: *Builder, writer: anytype) @TypeOf(writer).Error!void { , .{ id.fmt(self), ty.fmt(self) }); try writer.writeByte('\n'); for (self.variables.items) |variable| { - const global = self.globals.values()[@intFromEnum(variable.global)]; + if (variable.global.getReplacement(self) != .none) continue; + const global = variable.global.ptrConst(self); try writer.print( \\{} ={}{}{}{}{}{}{}{} {s} {%}{ }{,} \\ @@ -2179,7 +2391,8 @@ pub fn dump(self: *Builder, writer: anytype) @TypeOf(writer).Error!void { } try writer.writeByte('\n'); for (self.functions.items) |function| { - const global = self.globals.values()[@intFromEnum(function.global)]; + if (function.global.getReplacement(self) != .none) continue; + const global = function.global.ptrConst(self); const item = self.type_items.items[@intFromEnum(global.type)]; const extra = self.typeExtraDataTrail(Type.Function, item.data); const params: []const Type = @@ -2207,14 +2420,51 @@ pub fn dump(self: *Builder, writer: anytype) @TypeOf(writer).Error!void { }, else => unreachable, } - try writer.print(") {}{}", .{ global.unnamed_addr, global.alignment }); - if (function.body) |_| try writer.print( - \\{{ - \\ ret {%} - \\}} - \\ - , .{extra.data.ret.fmt(self)}); - try writer.writeByte('\n'); + try writer.print("){}{}", .{ global.unnamed_addr, global.alignment }); + if (function.body) |_| { + try writer.writeAll(" {\n ret "); + void: { + try writer.print("{%}", .{switch (extra.data.ret) { + .void => |tag| { + try writer.writeAll(@tagName(tag)); + break :void; + }, + inline .half, + .bfloat, + .float, + .double, + .fp128, + .x86_fp80, + => |tag| try @field(Builder, @tagName(tag) ++ "Const")(self, 0.0), + .ppc_fp128 => try self.ppc_fp128Const(.{ 0.0, 0.0 }), + .x86_amx, + .x86_mmx, + .label, + .metadata, + => unreachable, + .token => Constant.none, + else => switch (extra.data.ret.tag(self)) { + .simple, + .function, + .vararg_function, + => unreachable, + .integer => try self.intConst(extra.data.ret, 0), + .pointer => try self.nullConst(extra.data.ret), + .target, + .vector, + .scalable_vector, + .small_array, + .array, + .structure, + .packed_structure, + .named_structure, + => try self.zeroInitConst(extra.data.ret), + }, + }.fmt(self)}); + } + try writer.writeAll("\n}"); + } + try writer.writeAll("\n\n"); } } @@ -2497,11 +2747,11 @@ fn opaqueTypeAssumeCapacity(self: *Builder, name: String) Type { } }; var id = name; - if (name == .none) { + if (name == .empty) { id = self.next_unnamed_type; assert(id != .none); self.next_unnamed_type = @enumFromInt(@intFromEnum(id) + 1); - } else assert(name.toIndex() != null); + } else assert(!name.isAnon()); while (true) { const type_gop = self.types.getOrPutAssumeCapacity(id); if (!type_gop.found_existing) { @@ -2783,8 +3033,8 @@ fn doubleConstAssumeCapacity(self: *Builder, val: f64) Constant { self.constant_items.appendAssumeCapacity(.{ .tag = .double, .data = self.addConstantExtraAssumeCapacity(Constant.Double{ - .lo = @intCast(@as(u64, @bitCast(val)) >> 32), - .hi = @truncate(@as(u64, @bitCast(val))), + .lo = @truncate(@as(u64, @bitCast(val))), + .hi = @intCast(@as(u64, @bitCast(val)) >> 32), }), }); if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity( @@ -3401,6 +3651,190 @@ fn gepConstAssumeCapacity( return @enumFromInt(gop.index); } +fn icmpConstAssumeCapacity( + self: *Builder, + cond: IntegerCondition, + lhs: Constant, + rhs: Constant, +) Constant { + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: Constant.Compare) u32 { + return @truncate(std.hash.Wyhash.hash( + std.hash.uint32(@intFromEnum(Constant.tag.icmp)), + std.mem.asBytes(&key), + )); + } + pub fn eql(ctx: @This(), lhs_key: Constant.Compare, _: void, rhs_index: usize) bool { + if (ctx.builder.constant_items.items(.tag)[rhs_index] != .icmp) return false; + const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index]; + const rhs_extra = ctx.builder.constantExtraData(Constant.Compare, rhs_data); + return std.meta.eql(lhs_key, rhs_extra); + } + }; + const data = Constant.Compare{ .cond = @intFromEnum(cond), .lhs = lhs, .rhs = rhs }; + const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.constant_items.appendAssumeCapacity(.{ + .tag = .icmp, + .data = self.addConstantExtraAssumeCapacity(data), + }); + if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity( + llvm.constICmp(@enumFromInt(@intFromEnum(cond)), lhs.toLlvm(self), rhs.toLlvm(self)), + ); + } + return @enumFromInt(gop.index); +} + +fn fcmpConstAssumeCapacity( + self: *Builder, + cond: FloatCondition, + lhs: Constant, + rhs: Constant, +) Constant { + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: Constant.Compare) u32 { + return @truncate(std.hash.Wyhash.hash( + std.hash.uint32(@intFromEnum(Constant.tag.fcmp)), + std.mem.asBytes(&key), + )); + } + pub fn eql(ctx: @This(), lhs_key: Constant.Compare, _: void, rhs_index: usize) bool { + if (ctx.builder.constant_items.items(.tag)[rhs_index] != .fcmp) return false; + const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index]; + const rhs_extra = ctx.builder.constantExtraData(Constant.Compare, rhs_data); + return std.meta.eql(lhs_key, rhs_extra); + } + }; + const data = Constant.Compare{ .cond = @intFromEnum(cond), .lhs = lhs, .rhs = rhs }; + const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.constant_items.appendAssumeCapacity(.{ + .tag = .fcmp, + .data = self.addConstantExtraAssumeCapacity(data), + }); + if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity( + llvm.constFCmp(@enumFromInt(@intFromEnum(cond)), lhs.toLlvm(self), rhs.toLlvm(self)), + ); + } + return @enumFromInt(gop.index); +} + +fn extractElementConstAssumeCapacity( + self: *Builder, + arg: Constant, + index: Constant, +) Constant { + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: Constant.ExtractElement) u32 { + return @truncate(std.hash.Wyhash.hash( + comptime std.hash.uint32(@intFromEnum(Constant.Tag.extractelement)), + std.mem.asBytes(&key), + )); + } + pub fn eql(ctx: @This(), lhs_key: Constant.ExtractElement, _: void, rhs_index: usize) bool { + if (ctx.builder.constant_items.items(.tag)[rhs_index] != .extractelement) return false; + const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index]; + const rhs_extra = ctx.builder.constantExtraData(Constant.ExtractElement, rhs_data); + return std.meta.eql(lhs_key, rhs_extra); + } + }; + const data = Constant.ExtractElement{ .arg = arg, .index = index }; + const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.constant_items.appendAssumeCapacity(.{ + .tag = .extractelement, + .data = self.addConstantExtraAssumeCapacity(data), + }); + if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity( + arg.toLlvm(self).constExtractElement(index.toLlvm(self)), + ); + } + return @enumFromInt(gop.index); +} + +fn insertElementConstAssumeCapacity( + self: *Builder, + arg: Constant, + elem: Constant, + index: Constant, +) Constant { + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: Constant.InsertElement) u32 { + return @truncate(std.hash.Wyhash.hash( + comptime std.hash.uint32(@intFromEnum(Constant.Tag.insertelement)), + std.mem.asBytes(&key), + )); + } + pub fn eql(ctx: @This(), lhs_key: Constant.InsertElement, _: void, rhs_index: usize) bool { + if (ctx.builder.constant_items.items(.tag)[rhs_index] != .insertelement) return false; + const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index]; + const rhs_extra = ctx.builder.constantExtraData(Constant.InsertElement, rhs_data); + return std.meta.eql(lhs_key, rhs_extra); + } + }; + const data = Constant.InsertElement{ .arg = arg, .elem = elem, .index = index }; + const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.constant_items.appendAssumeCapacity(.{ + .tag = .insertelement, + .data = self.addConstantExtraAssumeCapacity(data), + }); + if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity( + arg.toLlvm(self).constInsertElement(elem.toLlvm(self), index.toLlvm(self)), + ); + } + return @enumFromInt(gop.index); +} + +fn shuffleVectorConstAssumeCapacity( + self: *Builder, + lhs: Constant, + rhs: Constant, + mask: Constant, +) Constant { + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: Constant.ShuffleVector) u32 { + return @truncate(std.hash.Wyhash.hash( + comptime std.hash.uint32(@intFromEnum(Constant.Tag.shufflevector)), + std.mem.asBytes(&key), + )); + } + pub fn eql(ctx: @This(), lhs_key: Constant.ShuffleVector, _: void, rhs_index: usize) bool { + if (ctx.builder.constant_items.items(.tag)[rhs_index] != .shufflevector) return false; + const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index]; + const rhs_extra = ctx.builder.constantExtraData(Constant.ShuffleVector, rhs_data); + return std.meta.eql(lhs_key, rhs_extra); + } + }; + const data = Constant.ShuffleVector{ .lhs = lhs, .rhs = rhs, .mask = mask }; + const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.constant_items.appendAssumeCapacity(.{ + .tag = .shufflevector, + .data = self.addConstantExtraAssumeCapacity(data), + }); + if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity( + lhs.toLlvm(self).constShuffleVector(rhs.toLlvm(self), mask.toLlvm(self)), + ); + } + return @enumFromInt(gop.index); +} + fn binConstAssumeCapacity( self: *Builder, tag: Constant.Tag, @@ -3408,7 +3842,22 @@ fn binConstAssumeCapacity( rhs: Constant, ) Constant { switch (tag) { - .add, .sub, .mul, .shl, .lshr, .ashr, .@"and", .@"or", .xor => {}, + .add, + .@"add nsw", + .@"add nuw", + .sub, + .@"sub nsw", + .@"sub nuw", + .mul, + .@"mul nsw", + .@"mul nuw", + .shl, + .lshr, + .ashr, + .@"and", + .@"or", + .xor, + => {}, else => unreachable, } const Key = struct { tag: Constant.Tag, bin: Constant.Binary }; diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index 080184f488..c3c471fc2e 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -168,6 +168,66 @@ pub const Value = opaque { pub const setAliasee = LLVMAliasSetAliasee; extern fn LLVMAliasSetAliasee(Alias: *Value, Aliasee: *Value) void; + pub const constZExtOrBitCast = LLVMConstZExtOrBitCast; + extern fn LLVMConstZExtOrBitCast(ConstantVal: *Value, ToType: *Type) *Value; + + pub const constNeg = LLVMConstNeg; + extern fn LLVMConstNeg(ConstantVal: *Value) *Value; + + pub const constNSWNeg = LLVMConstNSWNeg; + extern fn LLVMConstNSWNeg(ConstantVal: *Value) *Value; + + pub const constNUWNeg = LLVMConstNUWNeg; + extern fn LLVMConstNUWNeg(ConstantVal: *Value) *Value; + + pub const constNot = LLVMConstNot; + extern fn LLVMConstNot(ConstantVal: *Value) *Value; + + pub const constAdd = LLVMConstAdd; + extern fn LLVMConstAdd(LHSConstant: *Value, RHSConstant: *Value) *Value; + + pub const constNSWAdd = LLVMConstNSWAdd; + extern fn LLVMConstNSWAdd(LHSConstant: *Value, RHSConstant: *Value) *Value; + + pub const constNUWAdd = LLVMConstNUWAdd; + extern fn LLVMConstNUWAdd(LHSConstant: *Value, RHSConstant: *Value) *Value; + + pub const constSub = LLVMConstSub; + extern fn LLVMConstSub(LHSConstant: *Value, RHSConstant: *Value) *Value; + + pub const constNSWSub = LLVMConstNSWSub; + extern fn LLVMConstNSWSub(LHSConstant: *Value, RHSConstant: *Value) *Value; + + pub const constNUWSub = LLVMConstNUWSub; + extern fn LLVMConstNUWSub(LHSConstant: *Value, RHSConstant: *Value) *Value; + + pub const constMul = LLVMConstMul; + extern fn LLVMConstMul(LHSConstant: *Value, RHSConstant: *Value) *Value; + + pub const constNSWMul = LLVMConstNSWMul; + extern fn LLVMConstNSWMul(LHSConstant: *Value, RHSConstant: *Value) *Value; + + pub const constNUWMul = LLVMConstNUWMul; + extern fn LLVMConstNUWMul(LHSConstant: *Value, RHSConstant: *Value) *Value; + + pub const constAnd = LLVMConstAnd; + extern fn LLVMConstAnd(LHSConstant: *Value, RHSConstant: *Value) *Value; + + pub const constOr = LLVMConstOr; + extern fn LLVMConstOr(LHSConstant: *Value, RHSConstant: *Value) *Value; + + pub const constXor = LLVMConstXor; + extern fn LLVMConstXor(LHSConstant: *Value, RHSConstant: *Value) *Value; + + pub const constShl = LLVMConstShl; + extern fn LLVMConstShl(LHSConstant: *Value, RHSConstant: *Value) *Value; + + pub const constLShr = LLVMConstLShr; + extern fn LLVMConstLShr(LHSConstant: *Value, RHSConstant: *Value) *Value; + + pub const constAShr = LLVMConstAShr; + extern fn LLVMConstAShr(LHSConstant: *Value, RHSConstant: *Value) *Value; + pub const constTrunc = LLVMConstTrunc; extern fn LLVMConstTrunc(ConstantVal: *Value, ToType: *Type) *Value; @@ -204,41 +264,35 @@ pub const Value = opaque { pub const constBitCast = LLVMConstBitCast; extern fn LLVMConstBitCast(ConstantVal: *Value, ToType: *Type) *Value; - pub const constZExtOrBitCast = LLVMConstZExtOrBitCast; - extern fn LLVMConstZExtOrBitCast(ConstantVal: *Value, ToType: *Type) *Value; - - pub const constNot = LLVMConstNot; - extern fn LLVMConstNot(ConstantVal: *Value) *Value; - - pub const constAdd = LLVMConstAdd; - extern fn LLVMConstAdd(LHSConstant: *Value, RHSConstant: *Value) *Value; - - pub const constSub = LLVMConstSub; - extern fn LLVMConstSub(LHSConstant: *Value, RHSConstant: *Value) *Value; - - pub const constMul = LLVMConstMul; - extern fn LLVMConstMul(LHSConstant: *Value, RHSConstant: *Value) *Value; - - pub const constAnd = LLVMConstAnd; - extern fn LLVMConstAnd(LHSConstant: *Value, RHSConstant: *Value) *Value; - - pub const constOr = LLVMConstOr; - extern fn LLVMConstOr(LHSConstant: *Value, RHSConstant: *Value) *Value; + pub const constAddrSpaceCast = LLVMConstAddrSpaceCast; + extern fn LLVMConstAddrSpaceCast(ConstantVal: *Value, ToType: *Type) *Value; - pub const constXor = LLVMConstXor; - extern fn LLVMConstXor(LHSConstant: *Value, RHSConstant: *Value) *Value; + pub const constSelect = LLVMConstSelect; + extern fn LLVMConstSelect( + ConstantCondition: *Value, + ConstantIfTrue: *Value, + ConstantIfFalse: *Value, + ) *Value; - pub const constShl = LLVMConstShl; - extern fn LLVMConstShl(LHSConstant: *Value, RHSConstant: *Value) *Value; + pub const constExtractElement = LLVMConstExtractElement; + extern fn LLVMConstExtractElement(VectorConstant: *Value, IndexConstant: *Value) *Value; - pub const constLShr = LLVMConstLShr; - extern fn LLVMConstLShr(LHSConstant: *Value, RHSConstant: *Value) *Value; + pub const constInsertElement = LLVMConstInsertElement; + extern fn LLVMConstInsertElement( + VectorConstant: *Value, + ElementValueConstant: *Value, + IndexConstant: *Value, + ) *Value; - pub const constAShr = LLVMConstAShr; - extern fn LLVMConstAShr(LHSConstant: *Value, RHSConstant: *Value) *Value; + pub const constShuffleVector = LLVMConstShuffleVector; + extern fn LLVMConstShuffleVector( + VectorAConstant: *Value, + VectorBConstant: *Value, + MaskConstant: *Value, + ) *Value; - pub const constAddrSpaceCast = LLVMConstAddrSpaceCast; - extern fn LLVMConstAddrSpaceCast(ConstantVal: *Value, ToType: *Type) *Value; + pub const blockAddress = LLVMBlockAddress; + extern fn LLVMBlockAddress(F: *Value, BB: *BasicBlock) *Value; pub const setWeak = LLVMSetWeak; extern fn LLVMSetWeak(CmpXchgInst: *Value, IsWeak: Bool) void; @@ -323,9 +377,6 @@ pub const Value = opaque { pub const attachMetaData = ZigLLVMAttachMetaData; extern fn ZigLLVMAttachMetaData(GlobalVar: *Value, DIG: *DIGlobalVariableExpression) void; - pub const blockAddress = LLVMBlockAddress; - extern fn LLVMBlockAddress(F: *Value, BB: *BasicBlock) *Value; - pub const dump = LLVMDumpValue; extern fn LLVMDumpValue(Val: *Value) void; }; @@ -522,15 +573,18 @@ pub const VerifierFailureAction = enum(c_int) { ReturnStatus, }; -pub const constNeg = LLVMConstNeg; -extern fn LLVMConstNeg(ConstantVal: *Value) *Value; - pub const constVector = LLVMConstVector; extern fn LLVMConstVector( ScalarConstantVals: [*]*Value, Size: c_uint, ) *Value; +pub const constICmp = LLVMConstICmp; +extern fn LLVMConstICmp(Predicate: IntPredicate, LHSConstant: *Value, RHSConstant: *Value) *Value; + +pub const constFCmp = LLVMConstFCmp; +extern fn LLVMConstFCmp(Predicate: RealPredicate, LHSConstant: *Value, RHSConstant: *Value) *Value; + pub const getEnumAttributeKindForName = LLVMGetEnumAttributeKindForName; extern fn LLVMGetEnumAttributeKindForName(Name: [*]const u8, SLen: usize) c_uint; -- cgit v1.2.3 From 8fde3a8f04e393b05879aff7afb9b09aadb4f2e1 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 11 Jul 2023 23:02:18 -0400 Subject: llvm: finish converting constants --- src/codegen/llvm.zig | 359 +++++++++++++++++++------------------------ src/codegen/llvm/Builder.zig | 229 ++++++++++++++++++++------- 2 files changed, 333 insertions(+), 255 deletions(-) (limited to 'src') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 704b93cfa4..5a326d634e 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -8,7 +8,10 @@ const native_endian = builtin.cpu.arch.endian(); const DW = std.dwarf; const Builder = @import("llvm/Builder.zig"); -const llvm = @import("llvm/bindings.zig"); +const llvm = if (build_options.have_llvm) + @import("llvm/bindings.zig") +else + @compileError("LLVM unavailable"); const link = @import("../link.zig"); const Compilation = @import("../Compilation.zig"); const build_options = @import("build_options"); @@ -577,7 +580,7 @@ pub const Object = struct { extern_collisions: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, void), /// Memoizes a null `?usize` value. - null_opt_addr: ?*llvm.Value, + null_opt_usize: Builder.Constant, pub const TypeMap = std.AutoHashMapUnmanaged(InternPool.Index, Builder.Type); @@ -733,7 +736,7 @@ pub const Object = struct { .di_type_map = .{}, .error_name_table = .none, .extern_collisions = .{}, - .null_opt_addr = null, + .null_opt_usize = .no_init, }; } @@ -789,31 +792,31 @@ pub const Object = struct { const name = try o.builder.string(mod.intern_pool.stringToSlice(name_nts)); const str_init = try o.builder.stringNullConst(name); const str_ty = str_init.typeOf(&o.builder); - const str_global = o.llvm_module.addGlobal(str_ty.toLlvm(&o.builder), ""); - str_global.setInitializer(str_init.toLlvm(&o.builder)); - str_global.setLinkage(.Private); - str_global.setGlobalConstant(.True); - str_global.setUnnamedAddr(.True); - str_global.setAlignment(1); - - var global = Builder.Global{ + const str_llvm_global = o.llvm_module.addGlobal(str_ty.toLlvm(&o.builder), ""); + str_llvm_global.setInitializer(str_init.toLlvm(&o.builder)); + str_llvm_global.setLinkage(.Private); + str_llvm_global.setGlobalConstant(.True); + str_llvm_global.setUnnamedAddr(.True); + str_llvm_global.setAlignment(1); + + var str_global = Builder.Global{ .linkage = .private, .unnamed_addr = .unnamed_addr, .type = str_ty, .alignment = comptime Builder.Alignment.fromByteUnits(1), .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, }; - var variable = Builder.Variable{ + var str_variable = Builder.Variable{ .global = @enumFromInt(o.builder.globals.count()), .mutability = .constant, .init = str_init, }; - try o.builder.llvm.globals.append(o.gpa, str_global); - const str_global_index = try o.builder.addGlobal(.empty, global); - try o.builder.variables.append(o.gpa, variable); + try o.builder.llvm.globals.append(o.gpa, str_llvm_global); + const global_index = try o.builder.addGlobal(.empty, str_global); + try o.builder.variables.append(o.gpa, str_variable); llvm_error.* = try o.builder.structConst(llvm_slice_ty, &.{ - str_global_index.toConst(), + global_index.toConst(), try o.builder.intConst(llvm_usize_ty, name.toSlice(&o.builder).?.len), }); } @@ -2684,55 +2687,6 @@ pub const Object = struct { return buffer.toOwnedSliceSentinel(0); } - fn getNullOptAddr(o: *Object) !*llvm.Value { - if (o.null_opt_addr) |global| return global; - - const mod = o.module; - const target = mod.getTarget(); - const ty = try mod.intern(.{ .opt_type = .usize_type }); - - const llvm_init = try o.lowerValue(try mod.intern(.{ .opt = .{ - .ty = ty, - .val = .none, - } })); - const llvm_ty = llvm_init.typeOf(&o.builder); - const llvm_wanted_addrspace = toLlvmAddressSpace(.generic, target); - const llvm_actual_addrspace = toLlvmGlobalAddressSpace(.generic, target); - const llvm_alignment = ty.toType().abiAlignment(mod); - const llvm_global = o.llvm_module.addGlobalInAddressSpace( - llvm_ty.toLlvm(&o.builder), - "", - @intFromEnum(llvm_actual_addrspace), - ); - llvm_global.setLinkage(.Internal); - llvm_global.setUnnamedAddr(.True); - llvm_global.setAlignment(llvm_alignment); - llvm_global.setInitializer(llvm_init.toLlvm(&o.builder)); - - var global = Builder.Global{ - .linkage = .internal, - .unnamed_addr = .unnamed_addr, - .type = llvm_ty, - .alignment = Builder.Alignment.fromByteUnits(llvm_alignment), - .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, - }; - var variable = Builder.Variable{ - .global = @enumFromInt(o.builder.globals.count()), - .init = llvm_init, - }; - try o.builder.llvm.globals.append(o.gpa, llvm_global); - _ = try o.builder.addGlobal(.empty, global); - try o.builder.variables.append(o.gpa, variable); - - const addrspace_casted_global = if (llvm_wanted_addrspace != llvm_actual_addrspace) - llvm_global.constAddrSpaceCast((try o.builder.ptrType(llvm_wanted_addrspace)).toLlvm(&o.builder)) - else - llvm_global; - - o.null_opt_addr = addrspace_casted_global; - return addrspace_casted_global; - } - /// If the llvm function does not exist, create it. /// Note that this can be called before the function's semantic analysis has /// completed, so if any attributes rely on that, they must be done in updateFunc, not here. @@ -3755,25 +3709,34 @@ pub const Object = struct { }, }, .vector_type => |vector_type| { - const ExpectedContents = [Builder.expected_fields_len]Builder.Constant; - var stack align(@max( - @alignOf(std.heap.StackFallbackAllocator(0)), - @alignOf(ExpectedContents), - )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa); - const allocator = stack.get(); - const vals = try allocator.alloc(Builder.Constant, vector_type.len); - defer allocator.free(vals); - + const vector_ty = try o.lowerType(ty); switch (aggregate.storage) { - .bytes => |bytes| for (vals, bytes) |*result_val, byte| { - result_val.* = try o.builder.intConst(.i8, byte); - }, - .elems => |elems| for (vals, elems) |*result_val, elem| { - result_val.* = try o.lowerValue(elem); + .bytes, .elems => { + const ExpectedContents = [Builder.expected_fields_len]Builder.Constant; + var stack align(@max( + @alignOf(std.heap.StackFallbackAllocator(0)), + @alignOf(ExpectedContents), + )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa); + const allocator = stack.get(); + const vals = try allocator.alloc(Builder.Constant, vector_type.len); + defer allocator.free(vals); + + switch (aggregate.storage) { + .bytes => |bytes| for (vals, bytes) |*result_val, byte| { + result_val.* = try o.builder.intConst(.i8, byte); + }, + .elems => |elems| for (vals, elems) |*result_val, elem| { + result_val.* = try o.lowerValue(elem); + }, + .repeated_elem => unreachable, + } + return o.builder.vectorConst(vector_ty, vals); }, - .repeated_elem => |elem| @memset(vals, try o.lowerValue(elem)), + .repeated_elem => |elem| return o.builder.splatConst( + vector_ty, + try o.lowerValue(elem), + ), } - return o.builder.vectorConst(try o.lowerType(ty), vals); }, .anon_struct_type => |tuple| { const struct_ty = try o.lowerType(ty); @@ -4209,14 +4172,11 @@ pub const Object = struct { else (try o.resolveGlobalDecl(decl_index)).ptrConst(&o.builder).global; - const target = mod.getTarget(); - const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); - const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); - const llvm_val = if (llvm_wanted_addrspace != llvm_actual_addrspace) try o.builder.castConst( - .addrspacecast, + const llvm_val = try o.builder.convConst( + .unneeded, llvm_global.toConst(), - try o.builder.ptrType(llvm_wanted_addrspace), - ) else llvm_global.toConst(); + try o.builder.ptrType(toLlvmAddressSpace(decl.@"addrspace", mod.getTarget())), + ); return o.builder.convConst(if (ty.isAbiInt(mod)) switch (ty.intInfo(mod).signedness) { .signed => .signed, @@ -4618,15 +4578,15 @@ pub const FuncGen = struct { .ty = self.typeOf(inst), .val = (try self.air.value(inst, mod)).?, }); - gop.value_ptr.* = llvm_val; - return llvm_val; + gop.value_ptr.* = llvm_val.toLlvm(&o.builder); + return gop.value_ptr.*; } - fn resolveValue(self: *FuncGen, tv: TypedValue) !*llvm.Value { + fn resolveValue(self: *FuncGen, tv: TypedValue) Error!Builder.Constant { const o = self.dg.object; const mod = o.module; const llvm_val = try o.lowerValue(tv.val.toIntern()); - if (!isByRef(tv.ty, mod)) return llvm_val.toLlvm(&o.builder); + if (!isByRef(tv.ty, mod)) return llvm_val; // We have an LLVM value but we need to create a global constant and // set the value as its initializer, and then return a pointer to the global. @@ -4645,6 +4605,7 @@ pub const FuncGen = struct { var global = Builder.Global{ .linkage = .private, .unnamed_addr = .unnamed_addr, + .addr_space = llvm_actual_addrspace, .type = llvm_ty, .alignment = Builder.Alignment.fromByteUnits(llvm_alignment), .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, @@ -4655,16 +4616,27 @@ pub const FuncGen = struct { .init = llvm_val, }; try o.builder.llvm.globals.append(o.gpa, llvm_global); - _ = try o.builder.addGlobal(.empty, global); + const global_index = try o.builder.addGlobal(.empty, global); try o.builder.variables.append(o.gpa, variable); - const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace) - llvm_global.constAddrSpaceCast( - (try o.builder.ptrType(llvm_wanted_addrspace)).toLlvm(&o.builder), - ) - else - llvm_global; - return addrspace_casted_ptr; + return try o.builder.convConst( + .unneeded, + global_index.toConst(), + try o.builder.ptrType(llvm_wanted_addrspace), + ); + } + + fn resolveNullOptUsize(self: *FuncGen) Error!Builder.Constant { + const o = self.dg.object; + const mod = o.module; + if (o.null_opt_usize == .no_init) { + const ty = try mod.intern(.{ .opt_type = .usize_type }); + o.null_opt_usize = try self.resolveValue(.{ + .ty = ty.toType(), + .val = (try mod.intern(.{ .opt = .{ .ty = ty, .val = .none } })).toValue(), + }); + } + return o.null_opt_usize; } fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void { @@ -5243,7 +5215,7 @@ pub const FuncGen = struct { const msg_decl = mod.declPtr(msg_decl_index); const msg_len = msg_decl.ty.childType(mod).arrayLen(mod); const msg_ptr = try o.lowerValue(try msg_decl.internValue(mod)); - const null_opt_addr_global = try o.getNullOptAddr(); + const null_opt_addr_global = try fg.resolveNullOptUsize(); const target = mod.getTarget(); const llvm_usize = try o.lowerType(Type.usize); // example: @@ -5257,7 +5229,7 @@ pub const FuncGen = struct { msg_ptr.toLlvm(&o.builder), (try o.builder.intConst(llvm_usize, msg_len)).toLlvm(&o.builder), (try o.builder.nullConst(.ptr)).toLlvm(&o.builder), - null_opt_addr_global, + null_opt_addr_global.toLlvm(&o.builder), }; const panic_func = mod.funcInfo(mod.panic_func_index); const panic_decl = mod.declPtr(panic_func.owner_decl); @@ -6872,11 +6844,11 @@ pub const FuncGen = struct { const operand = try self.resolveInst(un_op); const operand_ty = self.typeOf(un_op); const optional_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; - const optional_llvm_ty = (try o.lowerType(optional_ty)).toLlvm(&o.builder); + const optional_llvm_ty = try o.lowerType(optional_ty); const payload_ty = optional_ty.optionalChild(mod); if (optional_ty.optionalReprIsPayload(mod)) { const loaded = if (operand_is_ptr) - self.builder.buildLoad(optional_llvm_ty, operand, "") + self.builder.buildLoad(optional_llvm_ty.toLlvm(&o.builder), operand, "") else operand; if (payload_ty.isSlice(mod)) { @@ -6887,21 +6859,21 @@ pub const FuncGen = struct { )); return self.builder.buildICmp(pred, slice_ptr, (try o.builder.nullConst(ptr_ty)).toLlvm(&o.builder), ""); } - return self.builder.buildICmp(pred, loaded, optional_llvm_ty.constNull(), ""); + return self.builder.buildICmp(pred, loaded, (try o.builder.zeroInitConst(optional_llvm_ty)).toLlvm(&o.builder), ""); } comptime assert(optional_layout_version == 3); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const loaded = if (operand_is_ptr) - self.builder.buildLoad(optional_llvm_ty, operand, "") + self.builder.buildLoad(optional_llvm_ty.toLlvm(&o.builder), operand, "") else operand; return self.builder.buildICmp(pred, loaded, (try o.builder.intConst(.i8, 0)).toLlvm(&o.builder), ""); } const is_by_ref = operand_is_ptr or isByRef(optional_ty, mod); - const non_null_bit = try self.optIsNonNull(optional_llvm_ty, operand, is_by_ref); + const non_null_bit = try self.optIsNonNull(optional_llvm_ty.toLlvm(&o.builder), operand, is_by_ref); if (pred == .EQ) { return self.builder.buildNot(non_null_bit, ""); } else { @@ -7549,24 +7521,18 @@ pub const FuncGen = struct { } if (scalar_ty.isSignedInt(mod)) { const inst_llvm_ty = try o.lowerType(inst_ty); - const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; - const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { - const vec_len = inst_ty.vectorLen(mod); - - const shifts = try self.gpa.alloc(*llvm.Value, vec_len); - defer self.gpa.free(shifts); - - @memset(shifts, (try o.builder.intConst(try o.lowerType(scalar_ty), scalar_bit_size_minus_one)).toLlvm(&o.builder)); - break :const_vector llvm.constVector(shifts.ptr, vec_len); - } else (try o.builder.intConst(inst_llvm_ty, scalar_bit_size_minus_one)).toLlvm(&o.builder); + const bit_size_minus_one = try o.builder.splatConst(inst_llvm_ty, try o.builder.intConst( + inst_llvm_ty.scalarType(&o.builder), + inst_llvm_ty.scalarBits(&o.builder) - 1, + )); const div = self.builder.buildSDiv(lhs, rhs, ""); const rem = self.builder.buildSRem(lhs, rhs, ""); const div_sign = self.builder.buildXor(lhs, rhs, ""); - const div_sign_mask = self.builder.buildAShr(div_sign, bit_size_minus_one, ""); - const zero = inst_llvm_ty.toLlvm(&o.builder).constNull(); - const rem_nonzero = self.builder.buildICmp(.NE, rem, zero, ""); - const correction = self.builder.buildSelect(rem_nonzero, div_sign_mask, zero, ""); + const div_sign_mask = self.builder.buildAShr(div_sign, bit_size_minus_one.toLlvm(&o.builder), ""); + const zero = try o.builder.zeroInitConst(inst_llvm_ty); + const rem_nonzero = self.builder.buildICmp(.NE, rem, zero.toLlvm(&o.builder), ""); + const correction = self.builder.buildSelect(rem_nonzero, div_sign_mask, zero.toLlvm(&o.builder), ""); return self.builder.buildNSWAdd(div, correction, ""); } return self.builder.buildUDiv(lhs, rhs, ""); @@ -7620,29 +7586,23 @@ pub const FuncGen = struct { const a = try self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs }); const b = try self.buildFloatOp(.add, inst_ty, 2, .{ a, rhs }); const c = try self.buildFloatOp(.fmod, inst_ty, 2, .{ b, rhs }); - const zero = inst_llvm_ty.toLlvm(&o.builder).constNull(); - const ltz = try self.buildFloatCmp(.lt, inst_ty, .{ lhs, zero }); + const zero = try o.builder.zeroInitConst(inst_llvm_ty); + const ltz = try self.buildFloatCmp(.lt, inst_ty, .{ lhs, zero.toLlvm(&o.builder) }); return self.builder.buildSelect(ltz, c, a, ""); } if (scalar_ty.isSignedInt(mod)) { - const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; - const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { - const vec_len = inst_ty.vectorLen(mod); - - const shifts = try self.gpa.alloc(*llvm.Value, vec_len); - defer self.gpa.free(shifts); - - @memset(shifts, (try o.builder.intConst(try o.lowerType(scalar_ty), scalar_bit_size_minus_one)).toLlvm(&o.builder)); - break :const_vector llvm.constVector(shifts.ptr, vec_len); - } else (try o.builder.intConst(inst_llvm_ty, scalar_bit_size_minus_one)).toLlvm(&o.builder); + const bit_size_minus_one = try o.builder.splatConst(inst_llvm_ty, try o.builder.intConst( + inst_llvm_ty.scalarType(&o.builder), + inst_llvm_ty.scalarBits(&o.builder) - 1, + )); const rem = self.builder.buildSRem(lhs, rhs, ""); const div_sign = self.builder.buildXor(lhs, rhs, ""); - const div_sign_mask = self.builder.buildAShr(div_sign, bit_size_minus_one, ""); + const div_sign_mask = self.builder.buildAShr(div_sign, bit_size_minus_one.toLlvm(&o.builder), ""); const rhs_masked = self.builder.buildAnd(rhs, div_sign_mask, ""); - const zero = inst_llvm_ty.toLlvm(&o.builder).constNull(); - const rem_nonzero = self.builder.buildICmp(.NE, rem, zero, ""); - const correction = self.builder.buildSelect(rem_nonzero, rhs_masked, zero, ""); + const zero = try o.builder.zeroInitConst(inst_llvm_ty); + const rem_nonzero = self.builder.buildICmp(.NE, rem, zero.toLlvm(&o.builder), ""); + const correction = self.builder.buildSelect(rem_nonzero, rhs_masked, zero.toLlvm(&o.builder), ""); return self.builder.buildNSWAdd(rem, correction, ""); } return self.builder.buildURem(lhs, rhs, ""); @@ -7953,17 +7913,17 @@ pub const FuncGen = struct { // In this case we can generate a softfloat negation by XORing the // bits with a constant. const int_ty = try o.builder.intType(@intCast(float_bits)); - const one = (try o.builder.intConst(int_ty, 1)).toLlvm(&o.builder); + const one = try o.builder.intConst(int_ty, 1); const shift_amt = try o.builder.intConst(int_ty, float_bits - 1); - const sign_mask = one.constShl(shift_amt.toLlvm(&o.builder)); + const sign_mask = try o.builder.binConst(.shl, one, shift_amt); const result = if (ty.zigTypeTag(mod) == .Vector) blk: { - const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(mod), sign_mask, ""); + const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(mod), sign_mask.toLlvm(&o.builder), ""); const cast_ty = try o.builder.vectorType(.normal, ty.vectorLen(mod), int_ty); const bitcasted_operand = self.builder.buildBitCast(params[0], cast_ty.toLlvm(&o.builder), ""); break :blk self.builder.buildXor(bitcasted_operand, splat_sign_mask, ""); } else blk: { const bitcasted_operand = self.builder.buildBitCast(params[0], int_ty.toLlvm(&o.builder), ""); - break :blk self.builder.buildXor(bitcasted_operand, sign_mask, ""); + break :blk self.builder.buildXor(bitcasted_operand, sign_mask.toLlvm(&o.builder), ""); }; return self.builder.buildBitCast(result, llvm_ty.toLlvm(&o.builder), ""); }, @@ -8886,9 +8846,9 @@ pub const FuncGen = struct { const len = try self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); if (intrinsic_len0_traps) { - try self.safeWasmMemset(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); + try self.safeWasmMemset(dest_ptr, fill_byte.toLlvm(&o.builder), len, dest_ptr_align, is_volatile); } else { - _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); + _ = self.builder.buildMemSet(dest_ptr, fill_byte.toLlvm(&o.builder), len, dest_ptr_align, is_volatile); } return null; } @@ -8987,8 +8947,9 @@ pub const FuncGen = struct { dest_ptr_align: u32, is_volatile: bool, ) !void { - const llvm_usize_ty = self.context.intType(self.dg.object.target.ptrBitWidth()); - const cond = try self.cmp(len, llvm_usize_ty.constInt(0, .False), Type.usize, .neq); + const o = self.dg.object; + const llvm_usize_ty = try o.lowerType(Type.usize); + const cond = try self.cmp(len, (try o.builder.intConst(llvm_usize_ty, 0)).toLlvm(&o.builder), Type.usize, .neq); const memset_block = self.context.appendBasicBlock(self.llvm_func, "MemsetTrapSkip"); const end_block = self.context.appendBasicBlock(self.llvm_func, "MemsetTrapEnd"); _ = self.builder.buildCondBr(cond, memset_block, end_block); @@ -9020,8 +8981,8 @@ pub const FuncGen = struct { std.Target.wasm.featureSetHas(o.target.cpu.features, .bulk_memory) and dest_ptr_ty.isSlice(mod)) { - const llvm_usize_ty = self.context.intType(self.dg.object.target.ptrBitWidth()); - const cond = try self.cmp(len, llvm_usize_ty.constInt(0, .False), Type.usize, .neq); + const llvm_usize_ty = try o.lowerType(Type.usize); + const cond = try self.cmp(len, (try o.builder.intConst(llvm_usize_ty, 0)).toLlvm(&o.builder), Type.usize, .neq); const memcpy_block = self.context.appendBasicBlock(self.llvm_func, "MemcpyTrapSkip"); const end_block = self.context.appendBasicBlock(self.llvm_func, "MemcpyTrapEnd"); _ = self.builder.buildCondBr(cond, memcpy_block, end_block); @@ -9183,19 +9144,13 @@ pub const FuncGen = struct { if (operand_ty.zigTypeTag(mod) == .Vector) { const vec_len = operand_ty.vectorLen(mod); operand_llvm_ty = try o.builder.vectorType(.normal, vec_len, scalar_ty); + } else operand_llvm_ty = scalar_ty; - const shifts = try self.gpa.alloc(*llvm.Value, vec_len); - defer self.gpa.free(shifts); - @memset(shifts, (try o.builder.intConst(scalar_ty, 8)).toLlvm(&o.builder)); - const shift_vec = llvm.constVector(shifts.ptr, vec_len); + const shift_amt = + try o.builder.splatConst(operand_llvm_ty, try o.builder.intConst(scalar_ty, 8)); + const extended = self.builder.buildZExt(operand, operand_llvm_ty.toLlvm(&o.builder), ""); + operand = self.builder.buildShl(extended, shift_amt.toLlvm(&o.builder), ""); - const extended = self.builder.buildZExt(operand, operand_llvm_ty.toLlvm(&o.builder), ""); - operand = self.builder.buildShl(extended, shift_vec, ""); - } else { - const extended = self.builder.buildZExt(operand, scalar_ty.toLlvm(&o.builder), ""); - operand = self.builder.buildShl(extended, (try o.builder.intConst(scalar_ty, 8)).toLlvm(&o.builder), ""); - operand_llvm_ty = scalar_ty; - } bits = bits + 8; } @@ -9358,11 +9313,8 @@ pub const FuncGen = struct { const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod); const llvm_fn_name = try o.builder.fmt("__zig_tag_name_{}", .{fqn.fmt(&mod.intern_pool)}); - const slice_ty = Type.slice_const_u8_sentinel_0; - const ret_ty = try o.lowerType(slice_ty); - const llvm_ret_ty = ret_ty.toLlvm(&o.builder); + const ret_ty = try o.lowerType(Type.slice_const_u8_sentinel_0); const usize_ty = try o.lowerType(Type.usize); - const slice_alignment = slice_ty.abiAlignment(mod); const fn_type = try o.builder.fnType(ret_ty, &.{ try o.lowerType(enum_type.tag_ty.toType()), @@ -9399,33 +9351,38 @@ pub const FuncGen = struct { const tag_int_value = fn_val.getParam(0); const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @as(c_uint, @intCast(enum_type.names.len))); - const array_ptr_indices: [2]*llvm.Value = .{ - (try o.builder.intConst(usize_ty, 0)).toLlvm(&o.builder), - } ** 2; - for (enum_type.names, 0..) |name_ip, field_index_usize| { const field_index = @as(u32, @intCast(field_index_usize)); - const name = mod.intern_pool.stringToSlice(name_ip); - const str_init = self.context.constString(name.ptr, @as(c_uint, @intCast(name.len)), .False); - const str_init_llvm_ty = str_init.typeOf(); - const str_global = o.llvm_module.addGlobal(str_init_llvm_ty, ""); - str_global.setInitializer(str_init); - str_global.setLinkage(.Private); - str_global.setGlobalConstant(.True); - str_global.setUnnamedAddr(.True); - str_global.setAlignment(1); - - const slice_fields = [_]*llvm.Value{ - str_init_llvm_ty.constInBoundsGEP(str_global, &array_ptr_indices, array_ptr_indices.len), - (try o.builder.intConst(usize_ty, name.len)).toLlvm(&o.builder), + const name = try o.builder.string(mod.intern_pool.stringToSlice(name_ip)); + const str_init = try o.builder.stringNullConst(name); + const str_ty = str_init.typeOf(&o.builder); + const str_llvm_global = o.llvm_module.addGlobal(str_ty.toLlvm(&o.builder), ""); + str_llvm_global.setInitializer(str_init.toLlvm(&o.builder)); + str_llvm_global.setLinkage(.Private); + str_llvm_global.setGlobalConstant(.True); + str_llvm_global.setUnnamedAddr(.True); + str_llvm_global.setAlignment(1); + + var str_global = Builder.Global{ + .linkage = .private, + .unnamed_addr = .unnamed_addr, + .type = str_ty, + .alignment = comptime Builder.Alignment.fromByteUnits(1), + .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, }; - const slice_init = llvm_ret_ty.constNamedStruct(&slice_fields, slice_fields.len); - const slice_global = o.llvm_module.addGlobal(slice_init.typeOf(), ""); - slice_global.setInitializer(slice_init); - slice_global.setLinkage(.Private); - slice_global.setGlobalConstant(.True); - slice_global.setUnnamedAddr(.True); - slice_global.setAlignment(slice_alignment); + var str_variable = Builder.Variable{ + .global = @enumFromInt(o.builder.globals.count()), + .mutability = .constant, + .init = str_init, + }; + try o.builder.llvm.globals.append(o.gpa, str_llvm_global); + const global_index = try o.builder.addGlobal(.empty, str_global); + try o.builder.variables.append(o.gpa, str_variable); + + const slice_val = try o.builder.structConst(ret_ty, &.{ + global_index.toConst(), + try o.builder.intConst(usize_ty, name.toSlice(&o.builder).?.len), + }); const return_block = self.context.appendBasicBlock(fn_val, "Name"); const this_tag_int_value = @@ -9433,9 +9390,7 @@ pub const FuncGen = struct { switch_instr.addCase(this_tag_int_value.toLlvm(&o.builder), return_block); self.builder.positionBuilderAtEnd(return_block); - const loaded = self.builder.buildLoad(llvm_ret_ty, slice_global, ""); - loaded.setAlignment(slice_alignment); - _ = self.builder.buildRet(loaded); + _ = self.builder.buildRet(slice_val.toLlvm(&o.builder)); } self.builder.positionBuilderAtEnd(bad_value_block); @@ -9530,22 +9485,25 @@ pub const FuncGen = struct { // when changing code, so Zig uses negative numbers to index the // second vector. These start at -1 and go down, and are easiest to use // with the ~ operator. Here we convert between the two formats. - const values = try self.gpa.alloc(*llvm.Value, mask_len); + const values = try self.gpa.alloc(Builder.Constant, mask_len); defer self.gpa.free(values); for (values, 0..) |*val, i| { const elem = try mask.elemValue(mod, i); if (elem.isUndef(mod)) { - val.* = Builder.Type.i32.toLlvm(&o.builder).getUndef(); + val.* = try o.builder.undefConst(.i32); } else { const int = elem.toSignedInt(mod); const unsigned = if (int >= 0) @as(u32, @intCast(int)) else @as(u32, @intCast(~int + a_len)); - val.* = (try o.builder.intConst(.i32, unsigned)).toLlvm(&o.builder); + val.* = try o.builder.intConst(.i32, unsigned); } } - const llvm_mask_value = llvm.constVector(values.ptr, mask_len); - return self.builder.buildShuffleVector(a, b, llvm_mask_value, ""); + const llvm_mask_value = try o.builder.vectorConst( + try o.builder.vectorType(.normal, mask_len, .i32), + values, + ); + return self.builder.buildShuffleVector(a, b, llvm_mask_value.toLlvm(&o.builder), ""); } /// Reduce a vector by repeatedly applying `llvm_fn` to produce an accumulated result. @@ -9816,7 +9774,7 @@ pub const FuncGen = struct { .val = sent_val, }); - try self.store(elem_ptr, elem_ptr_ty, llvm_elem, .NotAtomic); + try self.store(elem_ptr, elem_ptr_ty, llvm_elem.toLlvm(&o.builder), .NotAtomic); } return alloca_inst; @@ -10431,12 +10389,13 @@ pub const FuncGen = struct { else self.builder.buildBitCast(elem, value_bits_type.toLlvm(&o.builder), ""); - var mask_val = (try o.builder.intConst(value_bits_type, -1)).toLlvm(&o.builder); - mask_val = mask_val.constZExt(containing_int_ty.toLlvm(&o.builder)); - mask_val = mask_val.constShl(shift_amt.toLlvm(&o.builder)); - mask_val = mask_val.constNot(); + var mask_val = try o.builder.intConst(value_bits_type, -1); + mask_val = try o.builder.castConst(.zext, mask_val, containing_int_ty); + mask_val = try o.builder.binConst(.shl, mask_val, shift_amt); + mask_val = + try o.builder.binConst(.xor, mask_val, try o.builder.intConst(containing_int_ty, -1)); - const anded_containing_int = self.builder.buildAnd(containing_int, mask_val, ""); + const anded_containing_int = self.builder.buildAnd(containing_int, mask_val.toLlvm(&o.builder), ""); const extended_value = self.builder.buildZExt(value_bits, containing_int_ty.toLlvm(&o.builder), ""); const shifted_value = self.builder.buildShl(extended_value, shift_amt.toLlvm(&o.builder), ""); const ored_value = self.builder.buildOr(shifted_value, anded_containing_int, ""); diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 2ed8633f59..09ed51315a 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -251,14 +251,41 @@ pub const Type = enum(u32) { }; } - pub fn isFn(self: Type, builder: *const Builder) bool { + pub fn isFloatingPoint(self: Type) bool { + return switch (self) { + .half, .bfloat, .float, .double, .fp128, .x86_fp80, .ppc_fp128 => true, + else => false, + }; + } + + pub fn isInteger(self: Type, builder: *const Builder) bool { + return switch (self) { + .i1, .i8, .i16, .i29, .i32, .i64, .i80, .i128 => true, + else => switch (self.tag(builder)) { + .integer => true, + else => false, + }, + }; + } + + pub fn isPointer(self: Type, builder: *const Builder) bool { + return switch (self) { + .ptr => true, + else => switch (self.tag(builder)) { + .pointer => true, + else => false, + }, + }; + } + + pub fn isFunction(self: Type, builder: *const Builder) bool { return switch (self.tag(builder)) { .function, .vararg_function => true, else => false, }; } - pub fn fnKind(self: Type, builder: *const Builder) Type.Function.Kind { + pub fn functionKind(self: Type, builder: *const Builder) Type.Function.Kind { return switch (self.tag(builder)) { .function => .normal, .vararg_function => .vararg, @@ -345,6 +372,20 @@ pub const Type = enum(u32) { }; } + pub fn scalarType(self: Type, builder: *const Builder) Type { + if (self.isFloatingPoint()) return self; + const item = builder.type_items.items[@intFromEnum(self)]; + return switch (item.tag) { + .integer, + .pointer, + => self, + .vector, + .scalable_vector, + => builder.typeExtraData(Type.Vector, item.data).child, + else => unreachable, + }; + } + pub fn vectorLen(self: Type, builder: *const Builder) u32 { const item = builder.type_items.items[@intFromEnum(self)]; return switch (item.tag) { @@ -809,17 +850,17 @@ pub const Global = struct { } pub fn rename(self: Index, new_name: String, builder: *Builder) Allocator.Error!void { - try builder.ensureUnusedCapacityGlobal(new_name); + try builder.ensureUnusedGlobalCapacity(new_name); self.renameAssumeCapacity(new_name, builder); } pub fn takeName(self: Index, other: Index, builder: *Builder) Allocator.Error!void { - try builder.ensureUnusedCapacityGlobal(.empty); + try builder.ensureUnusedGlobalCapacity(.empty); self.takeNameAssumeCapacity(other, builder); } pub fn replace(self: Index, other: Index, builder: *Builder) Allocator.Error!void { - try builder.ensureUnusedCapacityGlobal(.empty); + try builder.ensureUnusedGlobalCapacity(.empty); self.replaceAssumeCapacity(other, builder); } @@ -1047,6 +1088,7 @@ pub const Constant = enum(u32) { string, string_null, vector, + splat, zeroinitializer, undef, poison, @@ -1126,6 +1168,11 @@ pub const Constant = enum(u32) { type: Type, }; + pub const Splat = extern struct { + type: Type, + value: Constant, + }; + pub const BlockAddress = extern struct { function: Function.Index, block: Function.Block.Index, @@ -1217,6 +1264,7 @@ pub const Constant = enum(u32) { .array, .vector, => builder.constantExtraData(Aggregate, item.data).type, + .splat => builder.constantExtraData(Splat, item.data).type, .string, .string_null, => builder.arrayTypeAssumeCapacity( @@ -1270,28 +1318,10 @@ pub const Constant = enum(u32) { }, .icmp, .fcmp => { const ty = builder.constantExtraData(Compare, item.data).lhs.typeOf(builder); - return switch (ty) { - .half, - .bfloat, - .float, - .double, - .fp128, - .x86_fp80, - .ppc_fp128, - .i1, - .i8, - .i16, - .i29, - .i32, - .i64, - .i80, - .i128, - => ty, - else => if (ty.isVector(builder)) switch (ty.vectorKind(builder)) { - inline else => |kind| builder - .vectorTypeAssumeCapacity(kind, ty.vectorLen(builder), .i1), - } else ty, - }; + return if (ty.isVector(builder)) switch (ty.vectorKind(builder)) { + inline else => |kind| builder + .vectorTypeAssumeCapacity(kind, ty.vectorLen(builder), .i1), + } else ty; }, .extractelement => builder.constantExtraData(ExtractElement, item.data) .arg.typeOf(builder).childType(builder), @@ -1479,7 +1509,6 @@ pub const Constant = enum(u32) { const len = extra.data.type.aggregateLen(data.builder); const vals: []const Constant = @ptrCast(data.builder.constant_extra.items[extra.end..][0..len]); - try writer.writeAll(switch (tag) { .structure => "{ ", .packed_structure => "<{ ", @@ -1499,6 +1528,16 @@ pub const Constant = enum(u32) { else => unreachable, }); }, + .splat => { + const extra = data.builder.constantExtraData(Splat, item.data); + const len = extra.type.vectorLen(data.builder); + try writer.writeByte('<'); + for (0..len) |index| { + if (index > 0) try writer.writeAll(", "); + try writer.print("{%}", .{extra.value.fmt(data.builder)}); + } + try writer.writeByte('>'); + }, inline .string, .string_null, => |tag| try writer.print("c{\"" ++ switch (tag) { @@ -2093,7 +2132,7 @@ pub fn namedTypeSetBody( pub fn addGlobal(self: *Builder, name: String, global: Global) Allocator.Error!Global.Index { assert(!name.isAnon()); try self.ensureUnusedTypeCapacity(1, null, 0); - try self.ensureUnusedCapacityGlobal(name); + try self.ensureUnusedGlobalCapacity(name); return self.addGlobalAssumeCapacity(name, global); } @@ -2231,8 +2270,17 @@ pub fn vectorConst(self: *Builder, ty: Type, vals: []const Constant) Allocator.E return self.vectorConstAssumeCapacity(ty, vals); } +pub fn splatConst(self: *Builder, ty: Type, val: Constant) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, Constant.Splat, 0); + return self.splatConstAssumeCapacity(ty, val); +} + pub fn zeroInitConst(self: *Builder, ty: Type) Allocator.Error!Constant { - try self.ensureUnusedConstantCapacity(1, null, 0); + try self.ensureUnusedConstantCapacity(1, Constant.Fp128, 0); + try self.constant_limbs.ensureUnusedCapacity( + self.gpa, + Constant.Integer.limbs + comptime std.math.big.int.calcLimbLen(0), + ); return self.zeroInitConstAssumeCapacity(ty); } @@ -2477,7 +2525,7 @@ fn isValidIdentifier(id: []const u8) bool { return true; } -fn ensureUnusedCapacityGlobal(self: *Builder, name: String) Allocator.Error!void { +fn ensureUnusedGlobalCapacity(self: *Builder, name: String) Allocator.Error!void { if (self.useLibLlvm()) try self.llvm.globals.ensureUnusedCapacity(self.gpa, 1); try self.string_map.ensureUnusedCapacity(self.gpa, 1); if (name.toSlice(self)) |id| try self.string_bytes.ensureUnusedCapacity(self.gpa, id.len + @@ -2571,6 +2619,7 @@ fn vectorTypeAssumeCapacity( len: u32, child: Type, ) Type { + assert(child.isFloatingPoint() or child.isInteger(self) or child.isPointer(self)); const tag: Type.Tag = switch (kind) { .normal => .vector, .scalable => .scalable_vector, @@ -3321,14 +3370,13 @@ fn vectorConstAssumeCapacity( ty: Type, vals: []const Constant, ) if (build_options.have_llvm) Allocator.Error!Constant else Constant { - if (std.debug.runtime_safety) { - const type_item = self.type_items.items[@intFromEnum(ty)]; - assert(type_item.tag == .vector); - const extra = self.typeExtraData(Type.Vector, type_item.data); - assert(extra.len == vals.len); - for (vals) |val| assert(extra.child == val.typeOf(self)); - } + assert(ty.isVector(self)); + assert(ty.vectorLen(self) == vals.len); + for (vals) |val| assert(ty.childType(self) == val.typeOf(self)); + for (vals[1..]) |val| { + if (vals[0] != val) break; + } else return self.splatConstAssumeCapacity(ty, vals[0]); for (vals) |val| { if (!val.isZeroInit(self)) break; } else return self.zeroInitConstAssumeCapacity(ty); @@ -3351,23 +3399,91 @@ fn vectorConstAssumeCapacity( return result.constant; } +fn splatConstAssumeCapacity( + self: *Builder, + ty: Type, + val: Constant, +) if (build_options.have_llvm) Allocator.Error!Constant else Constant { + assert(ty.scalarType(self) == val.typeOf(self)); + + if (!ty.isVector(self)) return val; + if (val.isZeroInit(self)) return self.zeroInitConstAssumeCapacity(ty); + + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: Constant.Splat) u32 { + return @truncate(std.hash.Wyhash.hash( + comptime std.hash.uint32(@intFromEnum(Constant.Tag.splat)), + std.mem.asBytes(&key), + )); + } + pub fn eql(ctx: @This(), lhs_key: Constant.Splat, _: void, rhs_index: usize) bool { + if (ctx.builder.constant_items.items(.tag)[rhs_index] != .splat) return false; + const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index]; + const rhs_extra = ctx.builder.constantExtraData(Constant.Splat, rhs_data); + return std.meta.eql(lhs_key, rhs_extra); + } + }; + const data = Constant.Splat{ .type = ty, .value = val }; + const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.constant_items.appendAssumeCapacity(.{ + .tag = .splat, + .data = self.addConstantExtraAssumeCapacity(data), + }); + if (self.useLibLlvm()) { + const ExpectedContents = [expected_fields_len]*llvm.Value; + var stack align(@alignOf(ExpectedContents)) = + std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); + const allocator = stack.get(); + + const llvm_vals = try allocator.alloc(*llvm.Value, ty.vectorLen(self)); + defer allocator.free(llvm_vals); + @memset(llvm_vals, val.toLlvm(self)); + + self.llvm.constants.appendAssumeCapacity( + llvm.constVector(llvm_vals.ptr, @intCast(llvm_vals.len)), + ); + } + } + return @enumFromInt(gop.index); +} + fn zeroInitConstAssumeCapacity(self: *Builder, ty: Type) Constant { - switch (self.type_items.items[@intFromEnum(ty)].tag) { - .simple, - .function, - .vararg_function, - .integer, - .pointer, - => unreachable, - .target, - .vector, - .scalable_vector, - .small_array, - .array, - .structure, - .packed_structure, - .named_structure, - => {}, + switch (ty) { + inline .half, + .bfloat, + .float, + .double, + .fp128, + .x86_fp80, + => |tag| return @field(Builder, @tagName(tag) ++ "ConstAssumeCapacity")(self, 0.0), + .ppc_fp128 => return self.ppc_fp128ConstAssumeCapacity(.{ 0.0, 0.0 }), + .token => return .none, + .i1 => return .false, + else => switch (self.type_items.items[@intFromEnum(ty)].tag) { + .simple, + .function, + .vararg_function, + => unreachable, + .integer => { + var limbs: [std.math.big.int.calcLimbLen(0)]std.math.big.Limb = undefined; + const bigint = std.math.big.int.Mutable.init(&limbs, 0); + return self.bigIntConstAssumeCapacity(ty, bigint.toConst()) catch unreachable; + }, + .pointer => return self.nullConstAssumeCapacity(ty), + .target, + .vector, + .scalable_vector, + .small_array, + .array, + .structure, + .packed_structure, + .named_structure, + => {}, + }, } const result = self.getOrPutConstantNoExtraAssumeCapacity( .{ .tag = .zeroinitializer, .data = @intFromEnum(ty) }, @@ -4034,7 +4150,10 @@ pub inline fn useLibLlvm(self: *const Builder) bool { const assert = std.debug.assert; const build_options = @import("build_options"); const builtin = @import("builtin"); -const llvm = @import("bindings.zig"); +const llvm = if (build_options.have_llvm) + @import("bindings.zig") +else + @compileError("LLVM unavailable"); const log = std.log.scoped(.llvm); const std = @import("std"); -- cgit v1.2.3 From 3f46e747a8adc61d0274c1cafa8abe8d02e6b10b Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 13 Jul 2023 05:02:27 -0400 Subject: llvm: convert basic block creation --- src/codegen/llvm.zig | 449 +++++++++++++++++++++--------------------- src/codegen/llvm/Builder.zig | 374 +++++++++++++++++++++++++++-------- src/codegen/llvm/bindings.zig | 2 +- 3 files changed, 516 insertions(+), 309 deletions(-) (limited to 'src') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 5a326d634e..8bf37cafaf 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -602,6 +602,7 @@ pub const Object = struct { var builder = try Builder.init(.{ .allocator = gpa, .use_lib_llvm = options.use_lib_llvm, + .strip = options.strip, .name = options.root_name, .target = options.target, .triple = llvm_target_triple, @@ -682,7 +683,7 @@ pub const Object = struct { target_machine = llvm.TargetMachine.create( builder.llvm.target.?, - builder.target_triple.toSlice(&builder).?.ptr, + builder.target_triple.toSlice(&builder).?, if (options.target.cpu.model.llvm_name) |s| s.ptr else null, options.llvm_cpu_features, opt_level, @@ -749,7 +750,6 @@ pub const Object = struct { self.named_enum_map.deinit(gpa); self.type_map.deinit(gpa); self.extern_collisions.deinit(gpa); - self.builder.deinit(); self.* = undefined; } @@ -850,22 +850,19 @@ pub const Object = struct { error_name_table_ptr_global.toLlvm(&o.builder).setInitializer(error_name_table_ptr); } - fn genCmpLtErrorsLenFunction(object: *Object) !void { + fn genCmpLtErrorsLenFunction(o: *Object) !void { // If there is no such function in the module, it means the source code does not need it. - const llvm_fn = object.llvm_module.getNamedFunction(lt_errors_fn_name) orelse return; - const mod = object.module; + const name = o.builder.stringIfExists(lt_errors_fn_name) orelse return; + const llvm_fn = o.builder.getGlobal(name) orelse return; + const mod = o.module; const errors_len = mod.global_error_set.count(); - // Delete previous implementation. We replace it with every flush() because the - // total number of errors may have changed. - while (llvm_fn.getFirstBasicBlock()) |bb| { - bb.deleteBasicBlock(); - } + var wip = Builder.WipFunction.init(&o.builder, llvm_fn.ptrConst(&o.builder).kind.function); + defer wip.deinit(); - const builder = object.context.createBuilder(); - - const entry_block = object.context.appendBasicBlock(llvm_fn, "Entry"); - builder.positionBuilderAtEnd(entry_block); + const builder = wip.llvm.builder; + const entry_block = try wip.block("Entry"); + builder.positionBuilderAtEnd(entry_block.toLlvm(&wip)); builder.clearCurrentDebugLocation(); // Example source of the following LLVM IR: @@ -873,10 +870,12 @@ pub const Object = struct { // return index < total_errors_len; // } - const lhs = llvm_fn.getParam(0); - const rhs = try object.builder.intConst(Builder.Type.err_int, errors_len); - const is_lt = builder.buildICmp(.ULT, lhs, rhs.toLlvm(&object.builder), ""); + const lhs = llvm_fn.toLlvm(&o.builder).getParam(0); + const rhs = try o.builder.intConst(Builder.Type.err_int, errors_len); + const is_lt = builder.buildICmp(.ULT, lhs, rhs.toLlvm(&o.builder), ""); _ = builder.buildRet(is_lt); + + try wip.finish(); } fn genModuleLevelAssembly(object: *Object) !void { @@ -1158,11 +1157,14 @@ pub const Object = struct { bb.deleteBasicBlock(); } - const builder = o.context.createBuilder(); + var deinit_wip = true; + var wip = Builder.WipFunction.init(&o.builder, function); + defer if (deinit_wip) wip.deinit(); - function.ptr(&o.builder).body = {}; - const entry_block = o.context.appendBasicBlock(llvm_func, "Entry"); - builder.positionBuilderAtEnd(entry_block); + const builder = wip.llvm.builder; + const entry_block = try wip.block("Entry"); + wip.cursor = .{ .block = entry_block }; + builder.positionBuilderAtEnd(entry_block.toLlvm(&wip)); // This gets the LLVM values from the function and stores them in `dg.args`. const fn_info = mod.typeToFunc(decl.ty).?; @@ -1260,8 +1262,7 @@ pub const Object = struct { llvm_arg_i += 1; const param_llvm_ty = (try o.lowerType(param_ty)).toLlvm(&o.builder); - const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod))); - const int_llvm_ty = (try o.builder.intType(@intCast(abi_size * 8))).toLlvm(&o.builder); + const int_llvm_ty = (try o.builder.intType(@intCast(param_ty.abiSize(mod) * 8))).toLlvm(&o.builder); const alignment = @max( param_ty.abiAlignment(mod), o.target_data.abiAlignmentOfType(int_llvm_ty), @@ -1317,11 +1318,10 @@ pub const Object = struct { const param_alignment = param_ty.abiAlignment(mod); const arg_ptr = try o.buildAllocaInner(builder, llvm_func, false, param_llvm_ty, param_alignment, target); const llvm_ty = (try o.builder.structType(.normal, field_types)).toLlvm(&o.builder); - for (0..field_types.len) |field_i_usize| { - const field_i = @as(c_uint, @intCast(field_i_usize)); + for (0..field_types.len) |field_i| { const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; - const field_ptr = builder.buildStructGEP(llvm_ty, arg_ptr, field_i, ""); + const field_ptr = builder.buildStructGEP(llvm_ty, arg_ptr, @intCast(field_i), ""); const store_inst = builder.buildStore(param, field_ptr); store_inst.setAlignment(target.ptrBitWidth() / 8); } @@ -1422,6 +1422,7 @@ pub const Object = struct { .liveness = liveness, .context = o.context, .dg = &dg, + .wip = wip, .builder = builder, .ret_ptr = ret_ptr, .args = args.items, @@ -1438,6 +1439,7 @@ pub const Object = struct { .err_ret_trace = err_ret_trace, }; defer fg.deinit(); + deinit_wip = false; fg.genBody(air.getMainBody()) catch |err| switch (err) { error.CodegenFail => { @@ -1449,6 +1451,8 @@ pub const Object = struct { else => |e| return e, }; + try fg.wip.finish(); + try o.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } @@ -1517,11 +1521,11 @@ pub const Object = struct { if (self.di_map.get(decl)) |di_node| { const decl_name_slice = decl_name.toSlice(&self.builder).?; if (try decl.isFunction(mod)) { - const di_func = @as(*llvm.DISubprogram, @ptrCast(di_node)); + const di_func: *llvm.DISubprogram = @ptrCast(di_node); const linkage_name = llvm.MDString.get(self.context, decl_name_slice.ptr, decl_name_slice.len); di_func.replaceLinkageName(linkage_name); } else { - const di_global = @as(*llvm.DIGlobalVariable, @ptrCast(di_node)); + const di_global: *llvm.DIGlobalVariable = @ptrCast(di_node); const linkage_name = llvm.MDString.get(self.context, decl_name_slice.ptr, decl_name_slice.len); di_global.replaceLinkageName(linkage_name); } @@ -1554,11 +1558,11 @@ pub const Object = struct { if (self.di_map.get(decl)) |di_node| { const exp_name_slice = exp_name.toSlice(&self.builder).?; if (try decl.isFunction(mod)) { - const di_func = @as(*llvm.DISubprogram, @ptrCast(di_node)); + const di_func: *llvm.DISubprogram = @ptrCast(di_node); const linkage_name = llvm.MDString.get(self.context, exp_name_slice.ptr, exp_name_slice.len); di_func.replaceLinkageName(linkage_name); } else { - const di_global = @as(*llvm.DIGlobalVariable, @ptrCast(di_node)); + const di_global: *llvm.DIGlobalVariable = @ptrCast(di_node); const linkage_name = llvm.MDString.get(self.context, exp_name_slice.ptr, exp_name_slice.len); di_global.replaceLinkageName(linkage_name); } @@ -1661,7 +1665,7 @@ pub const Object = struct { const gop = try o.di_map.getOrPut(gpa, file); errdefer assert(o.di_map.remove(file)); if (gop.found_existing) { - return @as(*llvm.DIFile, @ptrCast(gop.value_ptr.*)); + return @ptrCast(gop.value_ptr.*); } const dir_path_z = d: { var buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined; @@ -1809,7 +1813,7 @@ pub const Object = struct { ty.abiSize(mod) * 8, ty.abiAlignment(mod) * 8, enumerators.ptr, - @as(c_int, @intCast(enumerators.len)), + @intCast(enumerators.len), try o.lowerDebugType(int_ty, .full), "", ); @@ -1984,7 +1988,7 @@ pub const Object = struct { ty.abiSize(mod) * 8, ty.abiAlignment(mod) * 8, try o.lowerDebugType(ty.childType(mod), .full), - @as(i64, @intCast(ty.arrayLen(mod))), + @intCast(ty.arrayLen(mod)), ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(array_di_ty)); @@ -2289,7 +2293,7 @@ pub const Object = struct { 0, // flags null, // derived from di_fields.items.ptr, - @as(c_int, @intCast(di_fields.items.len)), + @intCast(di_fields.items.len), 0, // run time lang null, // vtable holder "", // unique id @@ -2376,7 +2380,7 @@ pub const Object = struct { 0, // flags null, // derived from di_fields.items.ptr, - @as(c_int, @intCast(di_fields.items.len)), + @intCast(di_fields.items.len), 0, // run time lang null, // vtable holder "", // unique id @@ -2488,7 +2492,7 @@ pub const Object = struct { ty.abiAlignment(mod) * 8, // align in bits 0, // flags di_fields.items.ptr, - @as(c_int, @intCast(di_fields.items.len)), + @intCast(di_fields.items.len), 0, // run time lang "", // unique id ); @@ -2601,7 +2605,7 @@ pub const Object = struct { const fn_di_ty = dib.createSubroutineType( param_di_types.items.ptr, - @as(c_int, @intCast(param_di_types.items.len)), + @intCast(param_di_types.items.len), 0, ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. @@ -4248,9 +4252,9 @@ pub const Object = struct { ) void { const llvm_attr = o.context.createStringAttribute( name.ptr, - @as(c_uint, @intCast(name.len)), + @intCast(name.len), value.ptr, - @as(c_uint, @intCast(value.len)), + @intCast(value.len), ); val.addAttributeAtIndex(index, llvm_attr); } @@ -4364,11 +4368,7 @@ pub const Object = struct { } const entry_block = llvm_func.getFirstBasicBlock().?; - if (entry_block.getFirstInstruction()) |first_inst| { - builder.positionBuilder(entry_block, first_inst); - } else { - builder.positionBuilderAtEnd(entry_block); - } + builder.positionBuilder(entry_block, entry_block.getFirstInstruction()); builder.clearCurrentDebugLocation(); break :blk builder.buildAllocaInAddressSpace(llvm_ty, @intFromEnum(address_space), ""); @@ -4502,6 +4502,7 @@ pub const FuncGen = struct { dg: *DeclGen, air: Air, liveness: Liveness, + wip: Builder.WipFunction, context: *llvm.Context, builder: *llvm.Builder, di_scope: ?*llvm.DIScope, @@ -4542,7 +4543,7 @@ pub const FuncGen = struct { /// This data structure is used to implement breaking to blocks. blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, struct { - parent_bb: *llvm.BasicBlock, + parent_bb: Builder.Function.Block.Index, breaks: *BreakList, }), @@ -4555,7 +4556,7 @@ pub const FuncGen = struct { }); fn deinit(self: *FuncGen) void { - self.builder.dispose(); + self.wip.deinit(); self.dbg_inlined.deinit(self.gpa); self.dbg_block_stack.deinit(self.gpa); self.func_inst_table.deinit(self.gpa); @@ -4898,7 +4899,7 @@ pub const FuncGen = struct { fn airCall(self: *FuncGen, inst: Air.Inst.Index, attr: llvm.CallAttr) !?*llvm.Value { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); - const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len])); + const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]); const o = self.dg.object; const mod = o.module; const ip = &mod.intern_pool; @@ -4987,8 +4988,7 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod))); - const int_llvm_ty = (try o.builder.intType(@intCast(abi_size * 8))).toLlvm(&o.builder); + const int_llvm_ty = (try o.builder.intType(@intCast(param_ty.abiSize(mod) * 8))).toLlvm(&o.builder); if (isByRef(param_ty, mod)) { const alignment = param_ty.abiAlignment(mod); @@ -5034,9 +5034,8 @@ pub const FuncGen = struct { const llvm_ty = (try o.builder.structType(.normal, llvm_types)).toLlvm(&o.builder); try llvm_args.ensureUnusedCapacity(it.types_len); - for (llvm_types, 0..) |field_ty, i_usize| { - const i = @as(c_uint, @intCast(i_usize)); - const field_ptr = self.builder.buildStructGEP(llvm_ty, arg_ptr, i, ""); + for (llvm_types, 0..) |field_ty, i| { + const field_ptr = self.builder.buildStructGEP(llvm_ty, arg_ptr, @intCast(i), ""); const load_inst = self.builder.buildLoad(field_ty.toLlvm(&o.builder), field_ptr, ""); load_inst.setAlignment(target.ptrBitWidth() / 8); llvm_args.appendAssumeCapacity(load_inst); @@ -5091,7 +5090,7 @@ pub const FuncGen = struct { (try o.lowerType(zig_fn_ty)).toLlvm(&o.builder), llvm_fn, llvm_args.items.ptr, - @as(c_uint, @intCast(llvm_args.items.len)), + @intCast(llvm_args.items.len), toLlvmCallConv(fn_info.cc, target), attr, "", @@ -5256,7 +5255,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(un_op); const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.store(ret_ptr, ptr_ty, operand, .NotAtomic); - _ = self.builder.buildRetVoid(); + try self.wip.retVoid(); return null; } const fn_info = mod.typeToFunc(self.dg.decl.ty).?; @@ -5268,7 +5267,7 @@ pub const FuncGen = struct { const int = try o.builder.intConst(Builder.Type.err_int, 0); _ = self.builder.buildRet(int.toLlvm(&o.builder)); } else { - _ = self.builder.buildRetVoid(); + try self.wip.retVoid(); } return null; } @@ -5316,12 +5315,12 @@ pub const FuncGen = struct { const int = try o.builder.intConst(Builder.Type.err_int, 0); _ = self.builder.buildRet(int.toLlvm(&o.builder)); } else { - _ = self.builder.buildRetVoid(); + try self.wip.retVoid(); } return null; } if (self.ret_ptr != null) { - _ = self.builder.buildRetVoid(); + try self.wip.retVoid(); return null; } const ptr = try self.resolveInst(un_op); @@ -5476,33 +5475,33 @@ pub const FuncGen = struct { const rhs_non_null_i2 = self.builder.buildZExt(rhs_non_null, llvm_i2.toLlvm(&o.builder), ""); const lhs_shifted = self.builder.buildShl(lhs_non_null_i2, (try o.builder.intConst(llvm_i2, 1)).toLlvm(&o.builder), ""); const lhs_rhs_ored = self.builder.buildOr(lhs_shifted, rhs_non_null_i2, ""); - const both_null_block = self.context.appendBasicBlock(self.llvm_func, "BothNull"); - const mixed_block = self.context.appendBasicBlock(self.llvm_func, "Mixed"); - const both_pl_block = self.context.appendBasicBlock(self.llvm_func, "BothNonNull"); - const end_block = self.context.appendBasicBlock(self.llvm_func, "End"); - const llvm_switch = self.builder.buildSwitch(lhs_rhs_ored, mixed_block, 2); + const both_null_block = try self.wip.block("BothNull"); + const mixed_block = try self.wip.block("Mixed"); + const both_pl_block = try self.wip.block("BothNonNull"); + const end_block = try self.wip.block("End"); + const llvm_switch = self.builder.buildSwitch(lhs_rhs_ored, mixed_block.toLlvm(&self.wip), 2); const llvm_i2_00 = try o.builder.intConst(llvm_i2, 0b00); const llvm_i2_11 = try o.builder.intConst(llvm_i2, 0b11); - llvm_switch.addCase(llvm_i2_00.toLlvm(&o.builder), both_null_block); - llvm_switch.addCase(llvm_i2_11.toLlvm(&o.builder), both_pl_block); + llvm_switch.addCase(llvm_i2_00.toLlvm(&o.builder), both_null_block.toLlvm(&self.wip)); + llvm_switch.addCase(llvm_i2_11.toLlvm(&o.builder), both_pl_block.toLlvm(&self.wip)); - self.builder.positionBuilderAtEnd(both_null_block); - _ = self.builder.buildBr(end_block); + self.builder.positionBuilderAtEnd(both_null_block.toLlvm(&self.wip)); + _ = self.builder.buildBr(end_block.toLlvm(&self.wip)); - self.builder.positionBuilderAtEnd(mixed_block); - _ = self.builder.buildBr(end_block); + self.builder.positionBuilderAtEnd(mixed_block.toLlvm(&self.wip)); + _ = self.builder.buildBr(end_block.toLlvm(&self.wip)); - self.builder.positionBuilderAtEnd(both_pl_block); + self.builder.positionBuilderAtEnd(both_pl_block.toLlvm(&self.wip)); const lhs_payload = try self.optPayloadHandle(opt_llvm_ty, lhs, scalar_ty, true); const rhs_payload = try self.optPayloadHandle(opt_llvm_ty, rhs, scalar_ty, true); const payload_cmp = try self.cmp(lhs_payload, rhs_payload, payload_ty, op); - _ = self.builder.buildBr(end_block); + _ = self.builder.buildBr(end_block.toLlvm(&self.wip)); const both_pl_block_end = self.builder.getInsertBlock(); - self.builder.positionBuilderAtEnd(end_block); + self.builder.positionBuilderAtEnd(end_block.toLlvm(&self.wip)); const incoming_blocks: [3]*llvm.BasicBlock = .{ - both_null_block, - mixed_block, + both_null_block.toLlvm(&self.wip), + mixed_block.toLlvm(&self.wip), both_pl_block_end, }; const llvm_i1_0 = Builder.Constant.false.toLlvm(&o.builder); @@ -5552,7 +5551,6 @@ pub const FuncGen = struct { const extra = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; const inst_ty = self.typeOfIndex(inst); - const parent_bb = self.context.createBasicBlock("Block"); if (inst_ty.isNoReturn(mod)) { try self.genBody(body); @@ -5562,6 +5560,7 @@ pub const FuncGen = struct { var breaks: BreakList = .{}; defer breaks.deinit(self.gpa); + const parent_bb = try self.wip.block("Block"); try self.blocks.putNoClobber(self.gpa, inst, .{ .parent_bb = parent_bb, .breaks = &breaks, @@ -5570,8 +5569,7 @@ pub const FuncGen = struct { try self.genBody(body); - self.llvm_func.appendExistingBasicBlock(parent_bb); - self.builder.positionBuilderAtEnd(parent_bb); + self.builder.positionBuilderAtEnd(parent_bb.toLlvm(&self.wip)); // Create a phi node only if the block returns a value. const is_body = inst_ty.zigTypeTag(mod) == .Fn; @@ -5585,7 +5583,7 @@ pub const FuncGen = struct { // of function pointers, however the phi makes it a runtime value and therefore // the LLVM type has to be wrapped in a pointer. if (is_body or isByRef(inst_ty, mod)) { - break :ty self.context.pointerType(0); + break :ty Builder.Type.ptr.toLlvm(&o.builder); } break :ty raw_llvm_ty; }; @@ -5594,7 +5592,7 @@ pub const FuncGen = struct { phi_node.addIncoming( breaks.items(.val).ptr, breaks.items(.bb).ptr, - @as(c_uint, @intCast(breaks.len)), + @intCast(breaks.len), ); return phi_node; } @@ -5617,7 +5615,7 @@ pub const FuncGen = struct { .val = val, }); } - _ = self.builder.buildBr(block.parent_bb); + _ = self.builder.buildBr(block.parent_bb.toLlvm(&self.wip)); return null; } @@ -5628,14 +5626,14 @@ pub const FuncGen = struct { const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - const then_block = self.context.appendBasicBlock(self.llvm_func, "Then"); - const else_block = self.context.appendBasicBlock(self.llvm_func, "Else"); - _ = self.builder.buildCondBr(cond, then_block, else_block); + const then_block = try self.wip.block("Then"); + const else_block = try self.wip.block("Else"); + _ = self.builder.buildCondBr(cond, then_block.toLlvm(&self.wip), else_block.toLlvm(&self.wip)); - self.builder.positionBuilderAtEnd(then_block); + self.builder.positionBuilderAtEnd(then_block.toLlvm(&self.wip)); try self.genBody(then_body); - self.builder.positionBuilderAtEnd(else_block); + self.builder.positionBuilderAtEnd(else_block.toLlvm(&self.wip)); try self.genBody(else_body); // No need to reset the insert cursor since this instruction is noreturn. @@ -5707,14 +5705,14 @@ pub const FuncGen = struct { break :err fg.builder.buildICmp(.NE, loaded, zero, ""); }; - const return_block = fg.context.appendBasicBlock(fg.llvm_func, "TryRet"); - const continue_block = fg.context.appendBasicBlock(fg.llvm_func, "TryCont"); - _ = fg.builder.buildCondBr(is_err, return_block, continue_block); + const return_block = try fg.wip.block("TryRet"); + const continue_block = try fg.wip.block("TryCont"); + _ = fg.builder.buildCondBr(is_err, return_block.toLlvm(&fg.wip), continue_block.toLlvm(&fg.wip)); - fg.builder.positionBuilderAtEnd(return_block); + fg.builder.positionBuilderAtEnd(return_block.toLlvm(&fg.wip)); try fg.genBody(body); - fg.builder.positionBuilderAtEnd(continue_block); + fg.builder.positionBuilderAtEnd(continue_block.toLlvm(&fg.wip)); } if (is_unused) { return null; @@ -5745,24 +5743,24 @@ pub const FuncGen = struct { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); - const else_block = self.context.appendBasicBlock(self.llvm_func, "Else"); + const else_block = try self.wip.block("Else"); const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); const cond_int = if (cond.typeOf().getTypeKind() == .Pointer) self.builder.buildPtrToInt(cond, llvm_usize, "") else cond; - const llvm_switch = self.builder.buildSwitch(cond_int, else_block, switch_br.data.cases_len); + const llvm_switch = self.builder.buildSwitch(cond_int, else_block.toLlvm(&self.wip), switch_br.data.cases_len); var extra_index: usize = switch_br.end; var case_i: u32 = 0; while (case_i < switch_br.data.cases_len) : (case_i += 1) { const case = self.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len])); + const items: []const Air.Inst.Ref = @ptrCast(self.air.extra[case.end..][0..case.data.items_len]); const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + case.data.items_len + case_body.len; - const case_block = self.context.appendBasicBlock(self.llvm_func, "Case"); + const case_block = try self.wip.block("Case"); for (items) |item| { const llvm_item = try self.resolveInst(item); @@ -5770,14 +5768,14 @@ pub const FuncGen = struct { llvm_item.constPtrToInt(llvm_usize) else llvm_item; - llvm_switch.addCase(llvm_int_item, case_block); + llvm_switch.addCase(llvm_int_item, case_block.toLlvm(&self.wip)); } - self.builder.positionBuilderAtEnd(case_block); + self.builder.positionBuilderAtEnd(case_block.toLlvm(&self.wip)); try self.genBody(case_body); } - self.builder.positionBuilderAtEnd(else_block); + self.builder.positionBuilderAtEnd(else_block.toLlvm(&self.wip)); const else_body = self.air.extra[extra_index..][0..switch_br.data.else_body_len]; if (else_body.len != 0) { try self.genBody(else_body); @@ -5795,10 +5793,10 @@ pub const FuncGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; - const loop_block = self.context.appendBasicBlock(self.llvm_func, "Loop"); - _ = self.builder.buildBr(loop_block); + const loop_block = try self.wip.block("Loop"); + _ = self.builder.buildBr(loop_block.toLlvm(&self.wip)); - self.builder.positionBuilderAtEnd(loop_block); + self.builder.positionBuilderAtEnd(loop_block.toLlvm(&self.wip)); try self.genBody(body); // TODO instead of this logic, change AIR to have the property that @@ -5808,7 +5806,7 @@ pub const FuncGen = struct { // be while(true) instead of for(body), which will eliminate 1 branch on // a hot path. if (body.len == 0 or !self.typeOfIndex(body[body.len - 1]).isNoReturn(mod)) { - _ = self.builder.buildBr(loop_block); + _ = self.builder.buildBr(loop_block.toLlvm(&self.wip)); } return null; } @@ -5858,8 +5856,7 @@ pub const FuncGen = struct { } } - const operand_bits = @as(u16, @intCast(operand_scalar_ty.bitSize(mod))); - const rt_int_bits = compilerRtIntBits(operand_bits); + const rt_int_bits = compilerRtIntBits(@intCast(operand_scalar_ty.bitSize(mod))); const rt_int_ty = try o.builder.intType(rt_int_bits); var extended = e: { if (operand_scalar_ty.isSignedInt(mod)) { @@ -6193,13 +6190,11 @@ pub const FuncGen = struct { const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); const elem_llvm_ty = (try o.lowerType(field_ty)).toLlvm(&o.builder); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { - const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); - const same_size_int = (try o.builder.intType(@intCast(elem_bits))).toLlvm(&o.builder); + const same_size_int = (try o.builder.intType(@intCast(field_ty.bitSize(mod)))).toLlvm(&o.builder); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); } else if (field_ty.isPtrAtRuntime(mod)) { - const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); - const same_size_int = (try o.builder.intType(@intCast(elem_bits))).toLlvm(&o.builder); + const same_size_int = (try o.builder.intType(@intCast(field_ty.bitSize(mod)))).toLlvm(&o.builder); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); } @@ -6215,13 +6210,11 @@ pub const FuncGen = struct { const containing_int = struct_llvm_val; const elem_llvm_ty = (try o.lowerType(field_ty)).toLlvm(&o.builder); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { - const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); - const same_size_int = (try o.builder.intType(@intCast(elem_bits))).toLlvm(&o.builder); + const same_size_int = (try o.builder.intType(@intCast(field_ty.bitSize(mod)))).toLlvm(&o.builder); const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); } else if (field_ty.isPtrAtRuntime(mod)) { - const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); - const same_size_int = (try o.builder.intType(@intCast(elem_bits))).toLlvm(&o.builder); + const same_size_int = (try o.builder.intType(@intCast(field_ty.bitSize(mod)))).toLlvm(&o.builder); const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, ""); return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); } @@ -6310,8 +6303,8 @@ pub const FuncGen = struct { fn airDbgStmt(self: *FuncGen, inst: Air.Inst.Index) ?*llvm.Value { const di_scope = self.di_scope orelse return null; const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; - self.prev_dbg_line = @as(c_uint, @intCast(self.base_line + dbg_stmt.line + 1)); - self.prev_dbg_column = @as(c_uint, @intCast(dbg_stmt.column + 1)); + self.prev_dbg_line = @intCast(self.base_line + dbg_stmt.line + 1); + self.prev_dbg_column = @intCast(dbg_stmt.column + 1); const inlined_at = if (self.dbg_inlined.items.len > 0) self.dbg_inlined.items[self.dbg_inlined.items.len - 1].loc else @@ -6491,12 +6484,12 @@ pub const FuncGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; - const clobbers_len = @as(u31, @truncate(extra.data.flags)); + const clobbers_len: u31 = @truncate(extra.data.flags); var extra_i: usize = extra.end; - const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len])); + const outputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]); extra_i += outputs.len; - const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len])); + const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]); extra_i += inputs.len; var llvm_constraints: std.ArrayListUnmanaged(u8) = .{}; @@ -6779,7 +6772,7 @@ pub const FuncGen = struct { const llvm_fn_ty = llvm.functionType( ret_llvm_ty.toLlvm(&o.builder), llvm_param_types.ptr, - @as(c_uint, @intCast(param_count)), + @intCast(param_count), .False, ); const asm_fn = llvm.getInlineAsm( @@ -6797,7 +6790,7 @@ pub const FuncGen = struct { llvm_fn_ty, asm_fn, llvm_param_values.ptr, - @as(c_uint, @intCast(param_count)), + @intCast(param_count), .C, .Auto, "", @@ -6814,7 +6807,7 @@ pub const FuncGen = struct { if (llvm_ret_indirect[i]) continue; const output_value = if (return_count > 1) b: { - break :b self.builder.buildExtractValue(call, @as(c_uint, @intCast(llvm_ret_i)), ""); + break :b self.builder.buildExtractValue(call, @intCast(llvm_ret_i), ""); } else call; if (output != .none) { @@ -7364,14 +7357,14 @@ pub const FuncGen = struct { false => fg.builder.buildOrReduce(overflow_bit), }; - const fail_block = fg.context.appendBasicBlock(fg.llvm_func, "OverflowFail"); - const ok_block = fg.context.appendBasicBlock(fg.llvm_func, "OverflowOk"); - _ = fg.builder.buildCondBr(scalar_overflow_bit, fail_block, ok_block); + const fail_block = try fg.wip.block("OverflowFail"); + const ok_block = try fg.wip.block("OverflowOk"); + _ = fg.builder.buildCondBr(scalar_overflow_bit, fail_block.toLlvm(&fg.wip), ok_block.toLlvm(&fg.wip)); - fg.builder.positionBuilderAtEnd(fail_block); + fg.builder.positionBuilderAtEnd(fail_block.toLlvm(&fg.wip)); try fg.buildSimplePanic(.integer_overflow); - fg.builder.positionBuilderAtEnd(ok_block); + fg.builder.positionBuilderAtEnd(ok_block.toLlvm(&fg.wip)); return fg.builder.buildExtractValue(result_struct, 0, ""); } @@ -7729,8 +7722,7 @@ pub const FuncGen = struct { vector_len: usize, ) !*llvm.Value { const o = self.dg.object; - const args_len = @as(c_uint, @intCast(args_vectors.len)); - assert(args_len <= 3); + assert(args_vectors.len <= 3); var i: usize = 0; var result = result_vector; @@ -7741,7 +7733,7 @@ pub const FuncGen = struct { for (args_vectors, 0..) |arg_vector, k| { args[k] = self.builder.buildExtractElement(arg_vector, index_i32, ""); } - const result_elem = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args_len, .C, .Auto, ""); + const result_elem = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, @intCast(args_vectors.len), .C, .Auto, ""); result = self.builder.buildInsertElement(result, result_elem, index_i32, ""); } return result; @@ -8744,8 +8736,8 @@ pub const FuncGen = struct { return null; const ordering = toLlvmAtomicOrdering(atomic_load.order); const abi_ty = try o.getAtomicAbiType(elem_ty, false); - const ptr_alignment = @as(u32, @intCast(ptr_info.flags.alignment.toByteUnitsOptional() orelse - ptr_info.child.toType().abiAlignment(mod))); + const ptr_alignment: u32 = @intCast(ptr_info.flags.alignment.toByteUnitsOptional() orelse + ptr_info.child.toType().abiAlignment(mod)); const ptr_volatile = llvm.Bool.fromBool(ptr_info.flags.is_volatile); const elem_llvm_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); @@ -8887,9 +8879,9 @@ pub const FuncGen = struct { // end: // ... const entry_block = self.builder.getInsertBlock(); - const loop_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetLoop"); - const body_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetBody"); - const end_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetEnd"); + const loop_block = try self.wip.block("InlineMemsetLoop"); + const body_block = try self.wip.block("InlineMemsetBody"); + const end_block = try self.wip.block("InlineMemsetEnd"); const usize_ty = try o.lowerType(Type.usize); const len = switch (ptr_ty.ptrSize(mod)) { @@ -8900,14 +8892,14 @@ pub const FuncGen = struct { const elem_llvm_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); const len_gep = [_]*llvm.Value{len}; const end_ptr = self.builder.buildInBoundsGEP(elem_llvm_ty, dest_ptr, &len_gep, len_gep.len, ""); - _ = self.builder.buildBr(loop_block); + _ = self.builder.buildBr(loop_block.toLlvm(&self.wip)); - self.builder.positionBuilderAtEnd(loop_block); - const it_ptr = self.builder.buildPhi(self.context.pointerType(0), ""); + self.builder.positionBuilderAtEnd(loop_block.toLlvm(&self.wip)); + const it_ptr = self.builder.buildPhi(Builder.Type.ptr.toLlvm(&o.builder), ""); const end = self.builder.buildICmp(.NE, it_ptr, end_ptr, ""); - _ = self.builder.buildCondBr(end, body_block, end_block); + _ = self.builder.buildCondBr(end, body_block.toLlvm(&self.wip), end_block.toLlvm(&self.wip)); - self.builder.positionBuilderAtEnd(body_block); + self.builder.positionBuilderAtEnd(body_block.toLlvm(&self.wip)); const elem_abi_alignment = elem_ty.abiAlignment(mod); const it_ptr_alignment = @min(elem_abi_alignment, dest_ptr_align); if (isByRef(elem_ty, mod)) { @@ -8928,12 +8920,12 @@ pub const FuncGen = struct { (try o.builder.intConst(usize_ty, 1)).toLlvm(&o.builder), }; const next_ptr = self.builder.buildInBoundsGEP(elem_llvm_ty, it_ptr, &one_gep, one_gep.len, ""); - _ = self.builder.buildBr(loop_block); + _ = self.builder.buildBr(loop_block.toLlvm(&self.wip)); - self.builder.positionBuilderAtEnd(end_block); + self.builder.positionBuilderAtEnd(end_block.toLlvm(&self.wip)); const incoming_values: [2]*llvm.Value = .{ next_ptr, dest_ptr }; - const incoming_blocks: [2]*llvm.BasicBlock = .{ body_block, entry_block }; + const incoming_blocks: [2]*llvm.BasicBlock = .{ body_block.toLlvm(&self.wip), entry_block }; it_ptr.addIncoming(&incoming_values, &incoming_blocks, 2); return null; @@ -8950,13 +8942,13 @@ pub const FuncGen = struct { const o = self.dg.object; const llvm_usize_ty = try o.lowerType(Type.usize); const cond = try self.cmp(len, (try o.builder.intConst(llvm_usize_ty, 0)).toLlvm(&o.builder), Type.usize, .neq); - const memset_block = self.context.appendBasicBlock(self.llvm_func, "MemsetTrapSkip"); - const end_block = self.context.appendBasicBlock(self.llvm_func, "MemsetTrapEnd"); - _ = self.builder.buildCondBr(cond, memset_block, end_block); - self.builder.positionBuilderAtEnd(memset_block); + const memset_block = try self.wip.block("MemsetTrapSkip"); + const end_block = try self.wip.block("MemsetTrapEnd"); + _ = self.builder.buildCondBr(cond, memset_block.toLlvm(&self.wip), end_block.toLlvm(&self.wip)); + self.builder.positionBuilderAtEnd(memset_block.toLlvm(&self.wip)); _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); - _ = self.builder.buildBr(end_block); - self.builder.positionBuilderAtEnd(end_block); + _ = self.builder.buildBr(end_block.toLlvm(&self.wip)); + self.builder.positionBuilderAtEnd(end_block.toLlvm(&self.wip)); } fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { @@ -8983,10 +8975,10 @@ pub const FuncGen = struct { { const llvm_usize_ty = try o.lowerType(Type.usize); const cond = try self.cmp(len, (try o.builder.intConst(llvm_usize_ty, 0)).toLlvm(&o.builder), Type.usize, .neq); - const memcpy_block = self.context.appendBasicBlock(self.llvm_func, "MemcpyTrapSkip"); - const end_block = self.context.appendBasicBlock(self.llvm_func, "MemcpyTrapEnd"); - _ = self.builder.buildCondBr(cond, memcpy_block, end_block); - self.builder.positionBuilderAtEnd(memcpy_block); + const memcpy_block = try self.wip.block("MemcpyTrapSkip"); + const end_block = try self.wip.block("MemcpyTrapEnd"); + _ = self.builder.buildCondBr(cond, memcpy_block.toLlvm(&self.wip), end_block.toLlvm(&self.wip)); + self.builder.positionBuilderAtEnd(memcpy_block.toLlvm(&self.wip)); _ = self.builder.buildMemCpy( dest_ptr, dest_ptr_ty.ptrAlignment(mod), @@ -8995,8 +8987,8 @@ pub const FuncGen = struct { len, is_volatile, ); - _ = self.builder.buildBr(end_block); - self.builder.positionBuilderAtEnd(end_block); + _ = self.builder.buildBr(end_block.toLlvm(&self.wip)); + self.builder.positionBuilderAtEnd(end_block.toLlvm(&self.wip)); return null; } @@ -9179,31 +9171,31 @@ pub const FuncGen = struct { const error_set_ty = self.air.getRefType(ty_op.ty); const names = error_set_ty.errorSetNames(mod); - const valid_block = self.context.appendBasicBlock(self.llvm_func, "Valid"); - const invalid_block = self.context.appendBasicBlock(self.llvm_func, "Invalid"); - const end_block = self.context.appendBasicBlock(self.llvm_func, "End"); - const switch_instr = self.builder.buildSwitch(operand, invalid_block, @as(c_uint, @intCast(names.len))); + const valid_block = try self.wip.block("Valid"); + const invalid_block = try self.wip.block("Invalid"); + const end_block = try self.wip.block("End"); + const switch_instr = self.builder.buildSwitch(operand, invalid_block.toLlvm(&self.wip), @intCast(names.len)); for (names) |name| { - const err_int = @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(name).?)); + const err_int = mod.global_error_set.getIndex(name).?; const this_tag_int_value = try o.lowerValue((try mod.intValue(Type.err_int, err_int)).toIntern()); - switch_instr.addCase(this_tag_int_value.toLlvm(&o.builder), valid_block); + switch_instr.addCase(this_tag_int_value.toLlvm(&o.builder), valid_block.toLlvm(&self.wip)); } - self.builder.positionBuilderAtEnd(valid_block); - _ = self.builder.buildBr(end_block); + self.builder.positionBuilderAtEnd(valid_block.toLlvm(&self.wip)); + _ = self.builder.buildBr(end_block.toLlvm(&self.wip)); - self.builder.positionBuilderAtEnd(invalid_block); - _ = self.builder.buildBr(end_block); + self.builder.positionBuilderAtEnd(invalid_block.toLlvm(&self.wip)); + _ = self.builder.buildBr(end_block.toLlvm(&self.wip)); - self.builder.positionBuilderAtEnd(end_block); + self.builder.positionBuilderAtEnd(end_block.toLlvm(&self.wip)); const incoming_values: [2]*llvm.Value = .{ Builder.Constant.true.toLlvm(&o.builder), Builder.Constant.false.toLlvm(&o.builder), }; const incoming_blocks: [2]*llvm.BasicBlock = .{ - valid_block, invalid_block, + valid_block.toLlvm(&self.wip), invalid_block.toLlvm(&self.wip), }; const phi_node = self.builder.buildPhi(Builder.Type.i1.toLlvm(&o.builder), ""); phi_node.addIncoming(&incoming_values, &incoming_blocks, 2); @@ -9250,8 +9242,11 @@ pub const FuncGen = struct { }; var function = Builder.Function{ .global = @enumFromInt(o.builder.globals.count()), - .body = {}, }; + try o.builder.llvm.globals.append(self.gpa, fn_val); + _ = try o.builder.addGlobal(llvm_fn_name, global); + try o.builder.functions.append(self.gpa, function); + gop.value_ptr.* = global.kind.function; const prev_block = self.builder.getInsertBlock(); const prev_debug_location = self.builder.getCurrentDebugLocation2(); @@ -9262,31 +9257,30 @@ pub const FuncGen = struct { } } - const entry_block = self.context.appendBasicBlock(fn_val, "Entry"); - self.builder.positionBuilderAtEnd(entry_block); + var wip = Builder.WipFunction.init(&o.builder, global.kind.function); + defer wip.deinit(); + + const entry_block = try wip.block("Entry"); + self.builder.positionBuilderAtEnd(entry_block.toLlvm(&wip)); self.builder.clearCurrentDebugLocation(); - const named_block = self.context.appendBasicBlock(fn_val, "Named"); - const unnamed_block = self.context.appendBasicBlock(fn_val, "Unnamed"); + const named_block = try wip.block("Named"); + const unnamed_block = try wip.block("Unnamed"); const tag_int_value = fn_val.getParam(0); - const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @as(c_uint, @intCast(enum_type.names.len))); + const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block.toLlvm(&wip), @intCast(enum_type.names.len)); - for (enum_type.names, 0..) |_, field_index_usize| { - const field_index = @as(u32, @intCast(field_index_usize)); + for (0..enum_type.names.len) |field_index| { const this_tag_int_value = - try o.lowerValue((try mod.enumValueFieldIndex(enum_ty, field_index)).toIntern()); - switch_instr.addCase(this_tag_int_value.toLlvm(&o.builder), named_block); + try o.lowerValue((try mod.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern()); + switch_instr.addCase(this_tag_int_value.toLlvm(&o.builder), named_block.toLlvm(&wip)); } - self.builder.positionBuilderAtEnd(named_block); + self.builder.positionBuilderAtEnd(named_block.toLlvm(&wip)); _ = self.builder.buildRet(Builder.Constant.true.toLlvm(&o.builder)); - self.builder.positionBuilderAtEnd(unnamed_block); + self.builder.positionBuilderAtEnd(unnamed_block.toLlvm(&wip)); _ = self.builder.buildRet(Builder.Constant.false.toLlvm(&o.builder)); - try o.builder.llvm.globals.append(self.gpa, fn_val); - _ = try o.builder.addGlobal(llvm_fn_name, global); - try o.builder.functions.append(self.gpa, function); - gop.value_ptr.* = global.kind.function; + try wip.finish(); return fn_val; } @@ -9331,8 +9325,10 @@ pub const FuncGen = struct { }; var function = Builder.Function{ .global = @enumFromInt(o.builder.globals.count()), - .body = {}, }; + try o.builder.llvm.globals.append(self.gpa, fn_val); + gop.value_ptr.* = try o.builder.addGlobal(llvm_fn_name, global); + try o.builder.functions.append(self.gpa, function); const prev_block = self.builder.getInsertBlock(); const prev_debug_location = self.builder.getCurrentDebugLocation2(); @@ -9343,16 +9339,18 @@ pub const FuncGen = struct { } } - const entry_block = self.context.appendBasicBlock(fn_val, "Entry"); - self.builder.positionBuilderAtEnd(entry_block); + var wip = Builder.WipFunction.init(&o.builder, global.kind.function); + defer wip.deinit(); + + const entry_block = try wip.block("Entry"); + self.builder.positionBuilderAtEnd(entry_block.toLlvm(&wip)); self.builder.clearCurrentDebugLocation(); - const bad_value_block = self.context.appendBasicBlock(fn_val, "BadValue"); + const bad_value_block = try wip.block("BadValue"); const tag_int_value = fn_val.getParam(0); - const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @as(c_uint, @intCast(enum_type.names.len))); + const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block.toLlvm(&wip), @intCast(enum_type.names.len)); - for (enum_type.names, 0..) |name_ip, field_index_usize| { - const field_index = @as(u32, @intCast(field_index_usize)); + for (enum_type.names, 0..) |name_ip, field_index| { const name = try o.builder.string(mod.intern_pool.stringToSlice(name_ip)); const str_init = try o.builder.stringNullConst(name); const str_ty = str_init.typeOf(&o.builder); @@ -9384,35 +9382,32 @@ pub const FuncGen = struct { try o.builder.intConst(usize_ty, name.toSlice(&o.builder).?.len), }); - const return_block = self.context.appendBasicBlock(fn_val, "Name"); + const return_block = try wip.block("Name"); const this_tag_int_value = - try o.lowerValue((try mod.enumValueFieldIndex(enum_ty, field_index)).toIntern()); - switch_instr.addCase(this_tag_int_value.toLlvm(&o.builder), return_block); + try o.lowerValue((try mod.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern()); + switch_instr.addCase(this_tag_int_value.toLlvm(&o.builder), return_block.toLlvm(&wip)); - self.builder.positionBuilderAtEnd(return_block); + self.builder.positionBuilderAtEnd(return_block.toLlvm(&wip)); _ = self.builder.buildRet(slice_val.toLlvm(&o.builder)); } - self.builder.positionBuilderAtEnd(bad_value_block); + self.builder.positionBuilderAtEnd(bad_value_block.toLlvm(&wip)); _ = self.builder.buildUnreachable(); - try o.builder.llvm.globals.append(self.gpa, fn_val); - gop.value_ptr.* = try o.builder.addGlobal(llvm_fn_name, global); - try o.builder.functions.append(self.gpa, function); + try wip.finish(); return fn_val; } fn getCmpLtErrorsLenFunction(self: *FuncGen) !*llvm.Value { const o = self.dg.object; - if (o.llvm_module.getNamedFunction(lt_errors_fn_name)) |llvm_fn| { - return llvm_fn; - } + const name = try o.builder.string(lt_errors_fn_name); + if (o.builder.getGlobal(name)) |llvm_fn| return llvm_fn.toLlvm(&o.builder); // Function signature: fn (anyerror) bool const fn_type = try o.builder.fnType(.i1, &.{Builder.Type.err_int}, .normal); - const llvm_fn = o.llvm_module.addFunction(lt_errors_fn_name, fn_type.toLlvm(&o.builder)); + const llvm_fn = o.llvm_module.addFunction(name.toSlice(&o.builder).?, fn_type.toLlvm(&o.builder)); llvm_fn.setLinkage(.Internal); llvm_fn.setFunctionCallConv(.Fast); @@ -9425,13 +9420,12 @@ pub const FuncGen = struct { }; var function = Builder.Function{ .global = @enumFromInt(o.builder.globals.count()), - .body = {}, }; try o.builder.llvm.globals.append(self.gpa, llvm_fn); - _ = try o.builder.addGlobal(try o.builder.string(lt_errors_fn_name), global); + const global_index = try o.builder.addGlobal(name, global); try o.builder.functions.append(self.gpa, function); - return llvm_fn; + return global_index.toLlvm(&o.builder); } fn airErrorName(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { @@ -9494,7 +9488,7 @@ pub const FuncGen = struct { val.* = try o.builder.undefConst(.i32); } else { const int = elem.toSignedInt(mod); - const unsigned = if (int >= 0) @as(u32, @intCast(int)) else @as(u32, @intCast(~int + a_len)); + const unsigned: u32 = @intCast(if (int >= 0) int else ~int + a_len); val.* = try o.builder.intConst(.i32, unsigned); } } @@ -9537,21 +9531,21 @@ pub const FuncGen = struct { _ = self.builder.buildStore(accum_init, accum_ptr); // Setup the loop - const loop = self.context.appendBasicBlock(self.llvm_func, "ReduceLoop"); - const loop_exit = self.context.appendBasicBlock(self.llvm_func, "AfterReduce"); - _ = self.builder.buildBr(loop); + const loop = try self.wip.block("ReduceLoop"); + const loop_exit = try self.wip.block("AfterReduce"); + _ = self.builder.buildBr(loop.toLlvm(&self.wip)); { - self.builder.positionBuilderAtEnd(loop); + self.builder.positionBuilderAtEnd(loop.toLlvm(&self.wip)); // while (i < vec.len) const i = self.builder.buildLoad(usize_ty.toLlvm(&o.builder), i_ptr, ""); const cond = self.builder.buildICmp(.ULT, i, llvm_vector_len.toLlvm(&o.builder), ""); - const loop_then = self.context.appendBasicBlock(self.llvm_func, "ReduceLoopThen"); + const loop_then = try self.wip.block("ReduceLoopThen"); - _ = self.builder.buildCondBr(cond, loop_then, loop_exit); + _ = self.builder.buildCondBr(cond, loop_then.toLlvm(&self.wip), loop_exit.toLlvm(&self.wip)); { - self.builder.positionBuilderAtEnd(loop_then); + self.builder.positionBuilderAtEnd(loop_then.toLlvm(&self.wip)); // accum = f(accum, vec[i]); const accum = self.builder.buildLoad(llvm_result_ty, accum_ptr, ""); @@ -9563,11 +9557,11 @@ pub const FuncGen = struct { // i += 1 const new_i = self.builder.buildAdd(i, (try o.builder.intConst(usize_ty, 1)).toLlvm(&o.builder), ""); _ = self.builder.buildStore(new_i, i_ptr); - _ = self.builder.buildBr(loop); + _ = self.builder.buildBr(loop.toLlvm(&self.wip)); } } - self.builder.positionBuilderAtEnd(loop_exit); + self.builder.positionBuilderAtEnd(loop_exit.toLlvm(&self.wip)); return self.builder.buildLoad(llvm_result_ty, accum_ptr, ""); } @@ -9656,8 +9650,8 @@ pub const FuncGen = struct { const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const result_ty = self.typeOfIndex(inst); - const len = @as(usize, @intCast(result_ty.arrayLen(mod))); - const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len])); + const len: usize = @intCast(result_ty.arrayLen(mod)); + const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]); const llvm_result_ty = (try o.lowerType(result_ty)).toLlvm(&o.builder); switch (result_ty.zigTypeTag(mod)) { @@ -9685,8 +9679,8 @@ pub const FuncGen = struct { if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const non_int_val = try self.resolveInst(elem); - const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod))); - const small_int_ty = (try o.builder.intType(@intCast(ty_bit_size))).toLlvm(&o.builder); + const ty_bit_size: u16 = @intCast(field.ty.bitSize(mod)); + const small_int_ty = (try o.builder.intType(ty_bit_size)).toLlvm(&o.builder); const small_int_val = if (field.ty.isPtrAtRuntime(mod)) self.builder.buildPtrToInt(non_int_val, small_int_ty, "") else @@ -9798,8 +9792,7 @@ pub const FuncGen = struct { const int_llvm_ty = (try o.builder.intType(@intCast(big_bits))).toLlvm(&o.builder); const field = union_obj.fields.values()[extra.field_index]; const non_int_val = try self.resolveInst(extra.init); - const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod))); - const small_int_ty = (try o.builder.intType(@intCast(ty_bit_size))).toLlvm(&o.builder); + const small_int_ty = (try o.builder.intType(@intCast(field.ty.bitSize(mod)))).toLlvm(&o.builder); const small_int_val = if (field.ty.isPtrAtRuntime(mod)) self.builder.buildPtrToInt(non_int_val, small_int_ty, "") else @@ -10273,8 +10266,8 @@ pub const FuncGen = struct { const elem_ty = info.child.toType(); if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; - const ptr_alignment = @as(u32, @intCast(info.flags.alignment.toByteUnitsOptional() orelse - elem_ty.abiAlignment(mod))); + const ptr_alignment: u32 = @intCast(info.flags.alignment.toByteUnitsOptional() orelse + elem_ty.abiAlignment(mod)); const ptr_volatile = llvm.Bool.fromBool(info.flags.is_volatile); assert(info.flags.vector_index != .runtime); @@ -10306,7 +10299,7 @@ pub const FuncGen = struct { containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); - const elem_bits = @as(c_uint, @intCast(ptr_ty.childType(mod).bitSize(mod))); + const elem_bits = ptr_ty.childType(mod).bitSize(mod); const shift_amt = try o.builder.intConst(containing_int_ty, info.packed_offset.bit_offset); const shifted_value = self.builder.buildLShr(containing_int, shift_amt.toLlvm(&o.builder), ""); const elem_llvm_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); @@ -10379,7 +10372,7 @@ pub const FuncGen = struct { assert(ordering == .NotAtomic); containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); - const elem_bits = @as(c_uint, @intCast(ptr_ty.childType(mod).bitSize(mod))); + const elem_bits = ptr_ty.childType(mod).bitSize(mod); const shift_amt = try o.builder.intConst(containing_int_ty, info.packed_offset.bit_offset); // Convert to equally-sized integer type in order to perform the bit // operations on the value to store @@ -10451,11 +10444,11 @@ pub const FuncGen = struct { if (!target_util.hasValgrindSupport(target)) return default_value; const llvm_usize = try o.lowerType(Type.usize); - const usize_alignment = @as(c_uint, @intCast(Type.usize.abiSize(mod))); + const usize_alignment = Type.usize.abiSize(mod); const array_llvm_ty = (try o.builder.arrayType(6, llvm_usize)).toLlvm(&o.builder); const array_ptr = fg.valgrind_client_request_array orelse a: { - const array_ptr = try fg.buildAlloca(array_llvm_ty, usize_alignment); + const array_ptr = try fg.buildAlloca(array_llvm_ty, @intCast(usize_alignment)); fg.valgrind_client_request_array = array_ptr; break :a array_ptr; }; @@ -10467,7 +10460,7 @@ pub const FuncGen = struct { }; const elem_ptr = fg.builder.buildInBoundsGEP(array_llvm_ty, array_ptr, &indexes, indexes.len, ""); const store_inst = fg.builder.buildStore(elem, elem_ptr); - store_inst.setAlignment(usize_alignment); + store_inst.setAlignment(@intCast(usize_alignment)); } const arch_specific: struct { @@ -11642,23 +11635,23 @@ const AnnotatedDITypePtr = enum(usize) { fn initFwd(di_type: *llvm.DIType) AnnotatedDITypePtr { const addr = @intFromPtr(di_type); assert(@as(u1, @truncate(addr)) == 0); - return @as(AnnotatedDITypePtr, @enumFromInt(addr | 1)); + return @enumFromInt(addr | 1); } fn initFull(di_type: *llvm.DIType) AnnotatedDITypePtr { const addr = @intFromPtr(di_type); - return @as(AnnotatedDITypePtr, @enumFromInt(addr)); + return @enumFromInt(addr); } fn init(di_type: *llvm.DIType, resolve: Object.DebugResolveStatus) AnnotatedDITypePtr { const addr = @intFromPtr(di_type); const bit = @intFromBool(resolve == .fwd); - return @as(AnnotatedDITypePtr, @enumFromInt(addr | bit)); + return @enumFromInt(addr | bit); } fn toDIType(self: AnnotatedDITypePtr) *llvm.DIType { const fixed_addr = @intFromEnum(self) & ~@as(usize, 1); - return @as(*llvm.DIType, @ptrFromInt(fixed_addr)); + return @ptrFromInt(fixed_addr); } fn isFwdOnly(self: AnnotatedDITypePtr) bool { diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 09ed51315a..69f4ab5d71 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -1,5 +1,6 @@ gpa: Allocator, use_lib_llvm: bool, +strip: bool, llvm: if (build_options.have_llvm) struct { context: *llvm.Context, @@ -12,33 +13,33 @@ llvm: if (build_options.have_llvm) struct { constants: std.ArrayListUnmanaged(*llvm.Value) = .{}, } else void, -source_filename: String = .none, -data_layout: String = .none, -target_triple: String = .none, - -string_map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, -string_bytes: std.ArrayListUnmanaged(u8) = .{}, -string_indices: std.ArrayListUnmanaged(u32) = .{}, - -types: std.AutoArrayHashMapUnmanaged(String, Type) = .{}, -next_unnamed_type: String = @enumFromInt(0), -next_unique_type_id: std.AutoHashMapUnmanaged(String, u32) = .{}, -type_map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, -type_items: std.ArrayListUnmanaged(Type.Item) = .{}, -type_extra: std.ArrayListUnmanaged(u32) = .{}, - -globals: std.AutoArrayHashMapUnmanaged(String, Global) = .{}, -next_unnamed_global: String = @enumFromInt(0), -next_replaced_global: String = .none, -next_unique_global_id: std.AutoHashMapUnmanaged(String, u32) = .{}, -aliases: std.ArrayListUnmanaged(Alias) = .{}, -variables: std.ArrayListUnmanaged(Variable) = .{}, -functions: std.ArrayListUnmanaged(Function) = .{}, - -constant_map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, -constant_items: std.MultiArrayList(Constant.Item) = .{}, -constant_extra: std.ArrayListUnmanaged(u32) = .{}, -constant_limbs: std.ArrayListUnmanaged(std.math.big.Limb) = .{}, +source_filename: String, +data_layout: String, +target_triple: String, + +string_map: std.AutoArrayHashMapUnmanaged(void, void), +string_bytes: std.ArrayListUnmanaged(u8), +string_indices: std.ArrayListUnmanaged(u32), + +types: std.AutoArrayHashMapUnmanaged(String, Type), +next_unnamed_type: String, +next_unique_type_id: std.AutoHashMapUnmanaged(String, u32), +type_map: std.AutoArrayHashMapUnmanaged(void, void), +type_items: std.ArrayListUnmanaged(Type.Item), +type_extra: std.ArrayListUnmanaged(u32), + +globals: std.AutoArrayHashMapUnmanaged(String, Global), +next_unnamed_global: String, +next_replaced_global: String, +next_unique_global_id: std.AutoHashMapUnmanaged(String, u32), +aliases: std.ArrayListUnmanaged(Alias), +variables: std.ArrayListUnmanaged(Variable), +functions: std.ArrayListUnmanaged(Function), + +constant_map: std.AutoArrayHashMapUnmanaged(void, void), +constant_items: std.MultiArrayList(Constant.Item), +constant_extra: std.ArrayListUnmanaged(u32), +constant_limbs: std.ArrayListUnmanaged(std.math.big.Limb), pub const expected_fields_len = 32; pub const expected_gep_indices_len = 8; @@ -46,6 +47,7 @@ pub const expected_gep_indices_len = 8; pub const Options = struct { allocator: Allocator, use_lib_llvm: bool = false, + strip: bool = true, name: []const u8 = &.{}, target: std.Target = builtin.target, triple: []const u8 = &.{}, @@ -986,9 +988,11 @@ pub const Variable = struct { pub const Function = struct { global: Global.Index, - body: ?void = null, - instructions: std.ArrayListUnmanaged(Instruction) = .{}, - blocks: std.ArrayListUnmanaged(Block) = .{}, + blocks: []const Block = &.{}, + instructions: std.MultiArrayList(Instruction) = .{}, + names: ?[*]const String = null, + metadata: ?[*]const Metadata = null, + extra: []const u32 = &.{}, pub const Index = enum(u32) { none = std.math.maxInt(u32), @@ -1007,28 +1011,236 @@ pub const Function = struct { } }; + pub const Block = struct { + instruction: Instruction.Index, + + pub const Index = WipFunction.Block.Index; + }; + pub const Instruction = struct { tag: Tag, + data: u32, pub const Tag = enum { arg, block, + @"ret void", + ret, }; - pub const Index = enum(u32) { _ }; + pub const Index = enum(u32) { + _, + + pub fn name(self: Instruction.Index, function: *const Function) String { + return if (function.names) |names| + names[@intFromEnum(self)] + else + @enumFromInt(@intFromEnum(self)); + } + }; }; + pub fn deinit(self: *Function, gpa: Allocator) void { + gpa.free(self.extra); + if (self.metadata) |metadata| gpa.free(metadata[0..self.instructions.len]); + if (self.names) |names| gpa.free(names[0..self.instructions.len]); + self.instructions.deinit(gpa); + self.* = undefined; + } +}; + +pub const WipFunction = struct { + builder: *Builder, + function: Function.Index, + llvm: if (build_options.have_llvm) struct { + builder: *llvm.Builder, + blocks: std.ArrayListUnmanaged(*llvm.BasicBlock), + instructions: std.ArrayListUnmanaged(*llvm.Value), + } else void, + cursor: Cursor, + blocks: std.ArrayListUnmanaged(Block), + instructions: std.MultiArrayList(Instruction), + names: std.ArrayListUnmanaged(String), + metadata: std.ArrayListUnmanaged(Metadata), + extra: std.ArrayListUnmanaged(u32), + + pub const Cursor = struct { block: Block.Index, instruction: u32 = 0 }; + pub const Block = struct { - body: std.ArrayListUnmanaged(Instruction.Index) = .{}, + name: String, + incoming: u32, + instructions: std.ArrayListUnmanaged(Instruction.Index), - pub const Index = enum(u32) { _ }; + const Index = enum(u32) { + _, + + pub fn toLlvm(self: Index, wip: *const WipFunction) *llvm.BasicBlock { + assert(wip.builder.useLibLlvm()); + return wip.llvm.blocks.items[@intFromEnum(self)]; + } + }; }; - pub fn deinit(self: *Function, gpa: Allocator) void { - self.instructions.deinit(gpa); - self.blocks.deinit(gpa); + pub const Instruction = Function.Instruction; + + pub fn init(builder: *Builder, function: Function.Index) WipFunction { + if (builder.useLibLlvm()) { + const llvm_function = function.toLlvm(builder); + while (llvm_function.getFirstBasicBlock()) |bb| bb.deleteBasicBlock(); + } + return .{ + .builder = builder, + .function = function, + .llvm = if (builder.useLibLlvm()) .{ + .builder = builder.llvm.context.createBuilder(), + .blocks = .{}, + .instructions = .{}, + } else undefined, + .cursor = undefined, + .blocks = .{}, + .instructions = .{}, + .names = .{}, + .metadata = .{}, + .extra = .{}, + }; + } + + pub fn block(self: *WipFunction, name: []const u8) Allocator.Error!Block.Index { + try self.blocks.ensureUnusedCapacity(self.builder.gpa, 1); + if (self.builder.useLibLlvm()) try self.llvm.blocks.ensureUnusedCapacity(self.builder.gpa, 1); + + const index: Block.Index = @enumFromInt(self.blocks.items.len); + const final_name = if (self.builder.strip) .empty else try self.builder.string(name); + self.blocks.appendAssumeCapacity(.{ .name = final_name, .incoming = 0, .instructions = .{} }); + if (self.builder.useLibLlvm()) self.llvm.blocks.appendAssumeCapacity( + self.builder.llvm.context.appendBasicBlock( + self.function.toLlvm(self.builder), + final_name.toSlice(self.builder).?, + ), + ); + return index; + } + + pub fn retVoid(self: *WipFunction) Allocator.Error!void { + _ = try self.addInst(.{ .tag = .@"ret void", .data = undefined }, .none); + if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity( + self.llvm.builder.buildRetVoid(), + ); + } + + pub fn finish(self: *WipFunction) Allocator.Error!void { + const gpa = self.builder.gpa; + const function = self.function.ptr(self.builder); + const final_instructions_len = self.blocks.items.len + self.instructions.len; + + const blocks = try gpa.alloc(Function.Block, self.blocks.items.len); + errdefer gpa.free(blocks); + + const instructions = try gpa.alloc(Instruction.Index, self.instructions.len); + defer gpa.free(instructions); + + const names = if (self.builder.strip) null else try gpa.alloc(String, final_instructions_len); + errdefer if (names) |new_names| gpa.free(new_names); + + const metadata = + if (self.builder.strip) null else try gpa.alloc(Metadata, final_instructions_len); + errdefer if (metadata) |new_metadata| gpa.free(new_metadata); + + gpa.free(function.blocks); + function.blocks = &.{}; + if (function.names) |old_names| gpa.free(old_names[0..function.instructions.len]); + function.names = null; + if (function.metadata) |old_metadata| gpa.free(old_metadata[0..function.instructions.len]); + function.metadata = null; + + function.instructions.shrinkRetainingCapacity(0); + try function.instructions.setCapacity(gpa, final_instructions_len); + errdefer function.instructions.shrinkRetainingCapacity(0); + + { + var final_instruction: Instruction.Index = @enumFromInt(0); + for (blocks, self.blocks.items) |*final_block, current_block| { + final_block.instruction = final_instruction; + final_instruction = @enumFromInt(@intFromEnum(final_instruction) + 1); + for (current_block.instructions.items) |instruction| { + instructions[@intFromEnum(instruction)] = final_instruction; + final_instruction = @enumFromInt(@intFromEnum(final_instruction) + 1); + } + } + } + + var next_name: String = @enumFromInt(0); + for (self.blocks.items) |current_block| { + const block_instruction: Instruction.Index = @enumFromInt(function.instructions.len); + function.instructions.appendAssumeCapacity(.{ + .tag = .block, + .data = current_block.incoming, + }); + if (names) |new_names| + new_names[@intFromEnum(block_instruction)] = switch (current_block.name) { + .empty => name: { + const name = next_name; + next_name = @enumFromInt(@intFromEnum(name) + 1); + break :name name; + }, + else => |name| name, + }; + for (current_block.instructions.items) |instruction_index| { + var instruction = self.instructions.get(@intFromEnum(instruction_index)); + switch (instruction.tag) { + .block => unreachable, + .@"ret void" => {}, + else => unreachable, + } + function.instructions.appendAssumeCapacity(instruction); + } + } + + function.extra = try self.extra.toOwnedSlice(gpa); + function.blocks = blocks; + function.names = if (names) |new_names| new_names.ptr else null; + function.metadata = if (metadata) |new_metadata| new_metadata.ptr else null; + } + + pub fn deinit(self: *WipFunction) void { + self.extra.deinit(self.builder.gpa); + self.instructions.deinit(self.builder.gpa); + for (self.blocks.items) |*b| b.instructions.deinit(self.builder.gpa); + self.blocks.deinit(self.builder.gpa); + if (self.builder.useLibLlvm()) self.llvm.builder.dispose(); self.* = undefined; } + + fn addInst( + self: *WipFunction, + instruction: Instruction, + name: String, + ) Allocator.Error!Instruction.Index { + const block_instructions = &self.blocks.items[@intFromEnum(self.cursor.block)].instructions; + try self.instructions.ensureUnusedCapacity(self.builder.gpa, 1); + try self.names.ensureUnusedCapacity(self.builder.gpa, 1); + try block_instructions.ensureUnusedCapacity(self.builder.gpa, 1); + if (self.builder.useLibLlvm()) { + try self.llvm.instructions.ensureUnusedCapacity(self.builder.gpa, 1); + + if (false) self.llvm.builder.positionBuilder( + self.cursor.block.toLlvm(self), + if (self.cursor.instruction < block_instructions.items.len) + self.llvm.instructions.items[ + @intFromEnum(block_instructions.items[self.cursor.instruction]) + ] + else + null, + ); + } + + const index: Instruction.Index = @enumFromInt(self.instructions.len); + self.instructions.appendAssumeCapacity(instruction); + self.names.appendAssumeCapacity(name); + block_instructions.insertAssumeCapacity(self.cursor.instruction, index); + self.cursor.instruction += 1; + return index; + } }; pub const FloatCondition = enum(u4) { @@ -1696,6 +1908,8 @@ pub const Value = enum(u32) { } }; +pub const Metadata = enum(u32) { _ }; + pub const InitError = error{ InvalidLlvmTriple, } || Allocator.Error; @@ -1704,7 +1918,37 @@ pub fn init(options: Options) InitError!Builder { var self = Builder{ .gpa = options.allocator, .use_lib_llvm = options.use_lib_llvm, + .strip = options.strip, + .llvm = undefined, + + .source_filename = .none, + .data_layout = .none, + .target_triple = .none, + + .string_map = .{}, + .string_bytes = .{}, + .string_indices = .{}, + + .types = .{}, + .next_unnamed_type = @enumFromInt(0), + .next_unique_type_id = .{}, + .type_map = .{}, + .type_items = .{}, + .type_extra = .{}, + + .globals = .{}, + .next_unnamed_global = @enumFromInt(0), + .next_replaced_global = .none, + .next_unique_global_id = .{}, + .aliases = .{}, + .variables = .{}, + .functions = .{}, + + .constant_map = .{}, + .constant_items = .{}, + .constant_extra = .{}, + .constant_limbs = .{}, }; if (self.useLibLlvm()) self.llvm = .{ .context = llvm.Context.create() }; errdefer self.deinit(); @@ -1726,7 +1970,7 @@ pub fn init(options: Options) InitError!Builder { var error_message: [*:0]const u8 = undefined; var target: *llvm.Target = undefined; if (llvm.Target.getFromTriple( - self.target_triple.toSlice(&self).?.ptr, + self.target_triple.toSlice(&self).?, &target, &error_message, ).toBool()) { @@ -1739,7 +1983,7 @@ pub fn init(options: Options) InitError!Builder { return InitError.InvalidLlvmTriple; } self.llvm.target = target; - self.llvm.module.?.setTarget(self.target_triple.toSlice(&self).?.ptr); + self.llvm.module.?.setTarget(self.target_triple.toSlice(&self).?); } } @@ -2448,7 +2692,7 @@ pub fn dump(self: *Builder, writer: anytype) (@TypeOf(writer).Error || Allocator try writer.print( \\{s}{}{}{}{} {} {}( , .{ - if (function.body) |_| "define" else "declare", + if (function.instructions.len > 0) "define" else "declare", global.linkage, global.preemption, global.visibility, @@ -2469,48 +2713,18 @@ pub fn dump(self: *Builder, writer: anytype) (@TypeOf(writer).Error || Allocator else => unreachable, } try writer.print("){}{}", .{ global.unnamed_addr, global.alignment }); - if (function.body) |_| { - try writer.writeAll(" {\n ret "); - void: { - try writer.print("{%}", .{switch (extra.data.ret) { - .void => |tag| { - try writer.writeAll(@tagName(tag)); - break :void; - }, - inline .half, - .bfloat, - .float, - .double, - .fp128, - .x86_fp80, - => |tag| try @field(Builder, @tagName(tag) ++ "Const")(self, 0.0), - .ppc_fp128 => try self.ppc_fp128Const(.{ 0.0, 0.0 }), - .x86_amx, - .x86_mmx, - .label, - .metadata, - => unreachable, - .token => Constant.none, - else => switch (extra.data.ret.tag(self)) { - .simple, - .function, - .vararg_function, - => unreachable, - .integer => try self.intConst(extra.data.ret, 0), - .pointer => try self.nullConst(extra.data.ret), - .target, - .vector, - .scalable_vector, - .small_array, - .array, - .structure, - .packed_structure, - .named_structure, - => try self.zeroInitConst(extra.data.ret), - }, - }.fmt(self)}); + if (function.instructions.len > 0) { + try writer.writeAll(" {\n"); + for (0..function.instructions.len) |index| { + const instruction_index: Function.Instruction.Index = @enumFromInt(index); + const instruction = function.instructions.get(index); + switch (instruction.tag) { + .block => try writer.print("{}:\n", .{instruction_index.name(&function).fmt(self)}), + .@"ret void" => |tag| try writer.print(" {s}\n", .{@tagName(tag)}), + else => unreachable, + } } - try writer.writeAll("\n}"); + try writer.writeByte('}'); } try writer.writeAll("\n\n"); } diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index c3c471fc2e..adc6223830 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -621,7 +621,7 @@ pub const Builder = opaque { extern fn LLVMPositionBuilder( Builder: *Builder, Block: *BasicBlock, - Instr: *Value, + Instr: ?*Value, ) void; pub const positionBuilderAtEnd = LLVMPositionBuilderAtEnd; -- cgit v1.2.3 From ef84e869925d8a95e8e57895c421f398903b5f4f Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 13 Jul 2023 20:12:23 -0400 Subject: llvm: convert cursor positioning --- src/codegen/llvm.zig | 50 ++++++++++++++++++++++++++++++++++++++------ src/codegen/llvm/Builder.zig | 3 ++- 2 files changed, 46 insertions(+), 7 deletions(-) (limited to 'src') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 8bf37cafaf..69985a0f73 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -862,6 +862,7 @@ pub const Object = struct { const builder = wip.llvm.builder; const entry_block = try wip.block("Entry"); + wip.cursor = .{ .block = entry_block }; builder.positionBuilderAtEnd(entry_block.toLlvm(&wip)); builder.clearCurrentDebugLocation(); @@ -1206,7 +1207,7 @@ pub const Object = struct { if (isByRef(param_ty, mod)) { const alignment = param_ty.abiAlignment(mod); const param_llvm_ty = param.typeOf(); - const arg_ptr = try o.buildAllocaInner(builder, llvm_func, false, param_llvm_ty, alignment, target); + const arg_ptr = try o.buildAllocaInner(&wip, builder, llvm_func, false, param_llvm_ty, alignment, target); const store_inst = builder.buildStore(param, arg_ptr); store_inst.setAlignment(alignment); args.appendAssumeCapacity(arg_ptr); @@ -1267,7 +1268,7 @@ pub const Object = struct { param_ty.abiAlignment(mod), o.target_data.abiAlignmentOfType(int_llvm_ty), ); - const arg_ptr = try o.buildAllocaInner(builder, llvm_func, false, param_llvm_ty, alignment, target); + const arg_ptr = try o.buildAllocaInner(&wip, builder, llvm_func, false, param_llvm_ty, alignment, target); const store_inst = builder.buildStore(param, arg_ptr); store_inst.setAlignment(alignment); @@ -1316,7 +1317,7 @@ pub const Object = struct { const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); const param_llvm_ty = (try o.lowerType(param_ty)).toLlvm(&o.builder); const param_alignment = param_ty.abiAlignment(mod); - const arg_ptr = try o.buildAllocaInner(builder, llvm_func, false, param_llvm_ty, param_alignment, target); + const arg_ptr = try o.buildAllocaInner(&wip, builder, llvm_func, false, param_llvm_ty, param_alignment, target); const llvm_ty = (try o.builder.structType(.normal, field_types)).toLlvm(&o.builder); for (0..field_types.len) |field_i| { const param = llvm_func.getParam(llvm_arg_i); @@ -1349,7 +1350,7 @@ pub const Object = struct { llvm_arg_i += 1; const alignment = param_ty.abiAlignment(mod); - const arg_ptr = try o.buildAllocaInner(builder, llvm_func, false, param_llvm_ty, alignment, target); + const arg_ptr = try o.buildAllocaInner(&wip, builder, llvm_func, false, param_llvm_ty, alignment, target); _ = builder.buildStore(param, arg_ptr); if (isByRef(param_ty, mod)) { @@ -1367,7 +1368,7 @@ pub const Object = struct { llvm_arg_i += 1; const alignment = param_ty.abiAlignment(mod); - const arg_ptr = try o.buildAllocaInner(builder, llvm_func, false, param_llvm_ty, alignment, target); + const arg_ptr = try o.buildAllocaInner(&wip, builder, llvm_func, false, param_llvm_ty, alignment, target); _ = builder.buildStore(param, arg_ptr); if (isByRef(param_ty, mod)) { @@ -4348,6 +4349,7 @@ pub const Object = struct { fn buildAllocaInner( o: *Object, + wip: *Builder.WipFunction, builder: *llvm.Builder, llvm_func: *llvm.Value, di_scope_non_null: bool, @@ -4358,9 +4360,11 @@ pub const Object = struct { const address_space = llvmAllocaAddressSpace(target); const alloca = blk: { + const prev_cursor = wip.cursor; const prev_block = builder.getInsertBlock(); const prev_debug_location = builder.getCurrentDebugLocation2(); defer { + wip.cursor = prev_cursor; builder.positionBuilderAtEnd(prev_block); if (di_scope_non_null) { builder.setCurrentDebugLocation2(prev_debug_location); @@ -4368,6 +4372,7 @@ pub const Object = struct { } const entry_block = llvm_func.getFirstBasicBlock().?; + wip.cursor = .{ .block = .entry }; builder.positionBuilder(entry_block, entry_block.getFirstInstruction()); builder.clearCurrentDebugLocation(); @@ -5485,12 +5490,15 @@ pub const FuncGen = struct { llvm_switch.addCase(llvm_i2_00.toLlvm(&o.builder), both_null_block.toLlvm(&self.wip)); llvm_switch.addCase(llvm_i2_11.toLlvm(&o.builder), both_pl_block.toLlvm(&self.wip)); + self.wip.cursor = .{ .block = both_null_block }; self.builder.positionBuilderAtEnd(both_null_block.toLlvm(&self.wip)); _ = self.builder.buildBr(end_block.toLlvm(&self.wip)); + self.wip.cursor = .{ .block = mixed_block }; self.builder.positionBuilderAtEnd(mixed_block.toLlvm(&self.wip)); _ = self.builder.buildBr(end_block.toLlvm(&self.wip)); + self.wip.cursor = .{ .block = both_pl_block }; self.builder.positionBuilderAtEnd(both_pl_block.toLlvm(&self.wip)); const lhs_payload = try self.optPayloadHandle(opt_llvm_ty, lhs, scalar_ty, true); const rhs_payload = try self.optPayloadHandle(opt_llvm_ty, rhs, scalar_ty, true); @@ -5498,6 +5506,7 @@ pub const FuncGen = struct { _ = self.builder.buildBr(end_block.toLlvm(&self.wip)); const both_pl_block_end = self.builder.getInsertBlock(); + self.wip.cursor = .{ .block = end_block }; self.builder.positionBuilderAtEnd(end_block.toLlvm(&self.wip)); const incoming_blocks: [3]*llvm.BasicBlock = .{ both_null_block.toLlvm(&self.wip), @@ -5569,6 +5578,7 @@ pub const FuncGen = struct { try self.genBody(body); + self.wip.cursor = .{ .block = parent_bb }; self.builder.positionBuilderAtEnd(parent_bb.toLlvm(&self.wip)); // Create a phi node only if the block returns a value. @@ -5630,9 +5640,11 @@ pub const FuncGen = struct { const else_block = try self.wip.block("Else"); _ = self.builder.buildCondBr(cond, then_block.toLlvm(&self.wip), else_block.toLlvm(&self.wip)); + self.wip.cursor = .{ .block = then_block }; self.builder.positionBuilderAtEnd(then_block.toLlvm(&self.wip)); try self.genBody(then_body); + self.wip.cursor = .{ .block = else_block }; self.builder.positionBuilderAtEnd(else_block.toLlvm(&self.wip)); try self.genBody(else_body); @@ -5709,9 +5721,11 @@ pub const FuncGen = struct { const continue_block = try fg.wip.block("TryCont"); _ = fg.builder.buildCondBr(is_err, return_block.toLlvm(&fg.wip), continue_block.toLlvm(&fg.wip)); + fg.wip.cursor = .{ .block = return_block }; fg.builder.positionBuilderAtEnd(return_block.toLlvm(&fg.wip)); try fg.genBody(body); + fg.wip.cursor = .{ .block = continue_block }; fg.builder.positionBuilderAtEnd(continue_block.toLlvm(&fg.wip)); } if (is_unused) { @@ -5771,10 +5785,12 @@ pub const FuncGen = struct { llvm_switch.addCase(llvm_int_item, case_block.toLlvm(&self.wip)); } + self.wip.cursor = .{ .block = case_block }; self.builder.positionBuilderAtEnd(case_block.toLlvm(&self.wip)); try self.genBody(case_body); } + self.wip.cursor = .{ .block = else_block }; self.builder.positionBuilderAtEnd(else_block.toLlvm(&self.wip)); const else_body = self.air.extra[extra_index..][0..switch_br.data.else_body_len]; if (else_body.len != 0) { @@ -5796,6 +5812,7 @@ pub const FuncGen = struct { const loop_block = try self.wip.block("Loop"); _ = self.builder.buildBr(loop_block.toLlvm(&self.wip)); + self.wip.cursor = .{ .block = loop_block }; self.builder.positionBuilderAtEnd(loop_block.toLlvm(&self.wip)); try self.genBody(body); @@ -7361,9 +7378,11 @@ pub const FuncGen = struct { const ok_block = try fg.wip.block("OverflowOk"); _ = fg.builder.buildCondBr(scalar_overflow_bit, fail_block.toLlvm(&fg.wip), ok_block.toLlvm(&fg.wip)); + fg.wip.cursor = .{ .block = fail_block }; fg.builder.positionBuilderAtEnd(fail_block.toLlvm(&fg.wip)); try fg.buildSimplePanic(.integer_overflow); + fg.wip.cursor = .{ .block = ok_block }; fg.builder.positionBuilderAtEnd(ok_block.toLlvm(&fg.wip)); return fg.builder.buildExtractValue(result_struct, 0, ""); } @@ -8483,7 +8502,7 @@ pub const FuncGen = struct { const o = self.dg.object; const mod = o.module; const target = mod.getTarget(); - return o.buildAllocaInner(self.builder, self.llvm_func, self.di_scope != null, llvm_ty, alignment, target); + return o.buildAllocaInner(&self.wip, self.builder, self.llvm_func, self.di_scope != null, llvm_ty, alignment, target); } fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value { @@ -8894,11 +8913,13 @@ pub const FuncGen = struct { const end_ptr = self.builder.buildInBoundsGEP(elem_llvm_ty, dest_ptr, &len_gep, len_gep.len, ""); _ = self.builder.buildBr(loop_block.toLlvm(&self.wip)); + self.wip.cursor = .{ .block = loop_block }; self.builder.positionBuilderAtEnd(loop_block.toLlvm(&self.wip)); const it_ptr = self.builder.buildPhi(Builder.Type.ptr.toLlvm(&o.builder), ""); const end = self.builder.buildICmp(.NE, it_ptr, end_ptr, ""); _ = self.builder.buildCondBr(end, body_block.toLlvm(&self.wip), end_block.toLlvm(&self.wip)); + self.wip.cursor = .{ .block = body_block }; self.builder.positionBuilderAtEnd(body_block.toLlvm(&self.wip)); const elem_abi_alignment = elem_ty.abiAlignment(mod); const it_ptr_alignment = @min(elem_abi_alignment, dest_ptr_align); @@ -8922,6 +8943,7 @@ pub const FuncGen = struct { const next_ptr = self.builder.buildInBoundsGEP(elem_llvm_ty, it_ptr, &one_gep, one_gep.len, ""); _ = self.builder.buildBr(loop_block.toLlvm(&self.wip)); + self.wip.cursor = .{ .block = end_block }; self.builder.positionBuilderAtEnd(end_block.toLlvm(&self.wip)); const incoming_values: [2]*llvm.Value = .{ next_ptr, dest_ptr }; @@ -8945,9 +8967,11 @@ pub const FuncGen = struct { const memset_block = try self.wip.block("MemsetTrapSkip"); const end_block = try self.wip.block("MemsetTrapEnd"); _ = self.builder.buildCondBr(cond, memset_block.toLlvm(&self.wip), end_block.toLlvm(&self.wip)); + self.wip.cursor = .{ .block = memset_block }; self.builder.positionBuilderAtEnd(memset_block.toLlvm(&self.wip)); _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); _ = self.builder.buildBr(end_block.toLlvm(&self.wip)); + self.wip.cursor = .{ .block = end_block }; self.builder.positionBuilderAtEnd(end_block.toLlvm(&self.wip)); } @@ -8978,6 +9002,7 @@ pub const FuncGen = struct { const memcpy_block = try self.wip.block("MemcpyTrapSkip"); const end_block = try self.wip.block("MemcpyTrapEnd"); _ = self.builder.buildCondBr(cond, memcpy_block.toLlvm(&self.wip), end_block.toLlvm(&self.wip)); + self.wip.cursor = .{ .block = memcpy_block }; self.builder.positionBuilderAtEnd(memcpy_block.toLlvm(&self.wip)); _ = self.builder.buildMemCpy( dest_ptr, @@ -8988,6 +9013,7 @@ pub const FuncGen = struct { is_volatile, ); _ = self.builder.buildBr(end_block.toLlvm(&self.wip)); + self.wip.cursor = .{ .block = end_block }; self.builder.positionBuilderAtEnd(end_block.toLlvm(&self.wip)); return null; } @@ -9182,12 +9208,15 @@ pub const FuncGen = struct { try o.lowerValue((try mod.intValue(Type.err_int, err_int)).toIntern()); switch_instr.addCase(this_tag_int_value.toLlvm(&o.builder), valid_block.toLlvm(&self.wip)); } + self.wip.cursor = .{ .block = valid_block }; self.builder.positionBuilderAtEnd(valid_block.toLlvm(&self.wip)); _ = self.builder.buildBr(end_block.toLlvm(&self.wip)); + self.wip.cursor = .{ .block = invalid_block }; self.builder.positionBuilderAtEnd(invalid_block.toLlvm(&self.wip)); _ = self.builder.buildBr(end_block.toLlvm(&self.wip)); + self.wip.cursor = .{ .block = end_block }; self.builder.positionBuilderAtEnd(end_block.toLlvm(&self.wip)); const incoming_values: [2]*llvm.Value = .{ @@ -9261,6 +9290,7 @@ pub const FuncGen = struct { defer wip.deinit(); const entry_block = try wip.block("Entry"); + wip.cursor = .{ .block = entry_block }; self.builder.positionBuilderAtEnd(entry_block.toLlvm(&wip)); self.builder.clearCurrentDebugLocation(); @@ -9274,9 +9304,11 @@ pub const FuncGen = struct { try o.lowerValue((try mod.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern()); switch_instr.addCase(this_tag_int_value.toLlvm(&o.builder), named_block.toLlvm(&wip)); } + wip.cursor = .{ .block = named_block }; self.builder.positionBuilderAtEnd(named_block.toLlvm(&wip)); _ = self.builder.buildRet(Builder.Constant.true.toLlvm(&o.builder)); + wip.cursor = .{ .block = unnamed_block }; self.builder.positionBuilderAtEnd(unnamed_block.toLlvm(&wip)); _ = self.builder.buildRet(Builder.Constant.false.toLlvm(&o.builder)); @@ -9343,6 +9375,7 @@ pub const FuncGen = struct { defer wip.deinit(); const entry_block = try wip.block("Entry"); + wip.cursor = .{ .block = entry_block }; self.builder.positionBuilderAtEnd(entry_block.toLlvm(&wip)); self.builder.clearCurrentDebugLocation(); @@ -9387,10 +9420,12 @@ pub const FuncGen = struct { try o.lowerValue((try mod.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern()); switch_instr.addCase(this_tag_int_value.toLlvm(&o.builder), return_block.toLlvm(&wip)); + wip.cursor = .{ .block = return_block }; self.builder.positionBuilderAtEnd(return_block.toLlvm(&wip)); _ = self.builder.buildRet(slice_val.toLlvm(&o.builder)); } + wip.cursor = .{ .block = bad_value_block }; self.builder.positionBuilderAtEnd(bad_value_block.toLlvm(&wip)); _ = self.builder.buildUnreachable(); @@ -9535,6 +9570,7 @@ pub const FuncGen = struct { const loop_exit = try self.wip.block("AfterReduce"); _ = self.builder.buildBr(loop.toLlvm(&self.wip)); { + self.wip.cursor = .{ .block = loop }; self.builder.positionBuilderAtEnd(loop.toLlvm(&self.wip)); // while (i < vec.len) @@ -9545,6 +9581,7 @@ pub const FuncGen = struct { _ = self.builder.buildCondBr(cond, loop_then.toLlvm(&self.wip), loop_exit.toLlvm(&self.wip)); { + self.wip.cursor = .{ .block = loop_then }; self.builder.positionBuilderAtEnd(loop_then.toLlvm(&self.wip)); // accum = f(accum, vec[i]); @@ -9561,6 +9598,7 @@ pub const FuncGen = struct { } } + self.wip.cursor = .{ .block = loop_exit }; self.builder.positionBuilderAtEnd(loop_exit.toLlvm(&self.wip)); return self.builder.buildLoad(llvm_result_ty, accum_ptr, ""); } diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 69f4ab5d71..a7aeea1a45 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -1072,6 +1072,7 @@ pub const WipFunction = struct { instructions: std.ArrayListUnmanaged(Instruction.Index), const Index = enum(u32) { + entry, _, pub fn toLlvm(self: Index, wip: *const WipFunction) *llvm.BasicBlock { @@ -1223,7 +1224,7 @@ pub const WipFunction = struct { if (self.builder.useLibLlvm()) { try self.llvm.instructions.ensureUnusedCapacity(self.builder.gpa, 1); - if (false) self.llvm.builder.positionBuilder( + self.llvm.builder.positionBuilder( self.cursor.block.toLlvm(self), if (self.cursor.instruction < block_instructions.items.len) self.llvm.instructions.items[ -- cgit v1.2.3 From 9afb349abaeaf6470329ea8990eb06eb37dd79e1 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 17 Jul 2023 11:38:46 -0400 Subject: llvm: convert most instructions --- src/codegen/llvm.zig | 4552 +++++++++++++++++++++-------------------- src/codegen/llvm/Builder.zig | 4069 +++++++++++++++++++++++++++++++++--- src/codegen/llvm/bindings.zig | 93 +- src/zig_llvm.cpp | 4 - 4 files changed, 6109 insertions(+), 2609 deletions(-) (limited to 'src') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 69985a0f73..7fe5dbb88f 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -549,7 +549,6 @@ pub const Object = struct { /// - *Module.Decl (Non-Fn) => *DIGlobalVariable di_map: std.AutoHashMapUnmanaged(*const anyopaque, *llvm.DINode), di_compile_unit: ?*llvm.DICompileUnit, - context: *llvm.Context, target_machine: *llvm.TargetMachine, target_data: *llvm.TargetData, target: std.Target, @@ -727,7 +726,6 @@ pub const Object = struct { .di_map = .{}, .di_builder = builder.llvm.di_builder, .di_compile_unit = builder.llvm.di_compile_unit, - .context = builder.llvm.context, .target_machine = target_machine, .target_data = target_data, .target = options.target, @@ -803,13 +801,13 @@ pub const Object = struct { .linkage = .private, .unnamed_addr = .unnamed_addr, .type = str_ty, - .alignment = comptime Builder.Alignment.fromByteUnits(1), .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, }; var str_variable = Builder.Variable{ .global = @enumFromInt(o.builder.globals.count()), .mutability = .constant, .init = str_init, + .alignment = comptime Builder.Alignment.fromByteUnits(1), }; try o.builder.llvm.globals.append(o.gpa, str_llvm_global); const global_index = try o.builder.addGlobal(.empty, str_global); @@ -833,13 +831,13 @@ pub const Object = struct { .linkage = .private, .unnamed_addr = .unnamed_addr, .type = llvm_table_ty, - .alignment = Builder.Alignment.fromByteUnits(slice_alignment), .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, }; var variable = Builder.Variable{ .global = @enumFromInt(o.builder.globals.count()), .mutability = .constant, .init = error_name_table_init, + .alignment = Builder.Alignment.fromByteUnits(slice_alignment), }; try o.builder.llvm.globals.append(o.gpa, error_name_table_global); _ = try o.builder.addGlobal(.empty, global); @@ -857,25 +855,19 @@ pub const Object = struct { const mod = o.module; const errors_len = mod.global_error_set.count(); - var wip = Builder.WipFunction.init(&o.builder, llvm_fn.ptrConst(&o.builder).kind.function); + var wip = try Builder.WipFunction.init(&o.builder, llvm_fn.ptrConst(&o.builder).kind.function); defer wip.deinit(); - - const builder = wip.llvm.builder; - const entry_block = try wip.block("Entry"); - wip.cursor = .{ .block = entry_block }; - builder.positionBuilderAtEnd(entry_block.toLlvm(&wip)); - builder.clearCurrentDebugLocation(); + wip.cursor = .{ .block = try wip.block(0, "Entry") }; // Example source of the following LLVM IR: // fn __zig_lt_errors_len(index: u16) bool { // return index < total_errors_len; // } - const lhs = llvm_fn.toLlvm(&o.builder).getParam(0); - const rhs = try o.builder.intConst(Builder.Type.err_int, errors_len); - const is_lt = builder.buildICmp(.ULT, lhs, rhs.toLlvm(&o.builder), ""); - _ = builder.buildRet(is_lt); - + const lhs = wip.arg(0); + const rhs = try o.builder.intValue(Builder.Type.err_int, errors_len); + const is_lt = try wip.icmp(.ult, lhs, rhs, ""); + _ = try wip.ret(is_lt); try wip.finish(); } @@ -1148,29 +1140,26 @@ pub const Object = struct { } if (ip.stringToSliceUnwrap(decl.@"linksection")) |section| { - global.ptr(&o.builder).section = try o.builder.string(section); + function.ptr(&o.builder).section = try o.builder.string(section); llvm_func.setSection(section); } - // Remove all the basic blocks of a function in order to start over, generating - // LLVM IR from an empty function body. - while (llvm_func.getFirstBasicBlock()) |bb| { - bb.deleteBasicBlock(); - } - var deinit_wip = true; - var wip = Builder.WipFunction.init(&o.builder, function); + var wip = try Builder.WipFunction.init(&o.builder, function); defer if (deinit_wip) wip.deinit(); + wip.cursor = .{ .block = try wip.block(0, "Entry") }; const builder = wip.llvm.builder; - const entry_block = try wip.block("Entry"); - wip.cursor = .{ .block = entry_block }; - builder.positionBuilderAtEnd(entry_block.toLlvm(&wip)); + var llvm_arg_i: u32 = 0; // This gets the LLVM values from the function and stores them in `dg.args`. const fn_info = mod.typeToFunc(decl.ty).?; const sret = firstParamSRet(fn_info, mod); - const ret_ptr = if (sret) llvm_func.getParam(0) else null; + const ret_ptr: Builder.Value = if (sret) param: { + const param = wip.arg(llvm_arg_i); + llvm_arg_i += 1; + break :param param; + } else .none; const gpa = o.gpa; if (ccAbiPromoteInt(fn_info.cc, mod, fn_info.return_type.toType())) |s| switch (s) { @@ -1181,205 +1170,183 @@ pub const Object = struct { const err_return_tracing = fn_info.return_type.toType().isError(mod) and mod.comp.bin_file.options.error_return_tracing; - const err_ret_trace = if (err_return_tracing) - llvm_func.getParam(@intFromBool(ret_ptr != null)) - else - null; + const err_ret_trace: Builder.Value = if (err_return_tracing) param: { + const param = wip.arg(llvm_arg_i); + llvm_arg_i += 1; + break :param param; + } else .none; // This is the list of args we will use that correspond directly to the AIR arg // instructions. Depending on the calling convention, this list is not necessarily // a bijection with the actual LLVM parameters of the function. - var args = std.ArrayList(*llvm.Value).init(gpa); - defer args.deinit(); + var args: std.ArrayListUnmanaged(Builder.Value) = .{}; + defer args.deinit(gpa); { - var llvm_arg_i = @as(c_uint, @intFromBool(ret_ptr != null)) + @intFromBool(err_return_tracing); var it = iterateParamTypes(o, fn_info); - while (try it.next()) |lowering| switch (lowering) { - .no_bits => continue, - .byval => { - assert(!it.byval_attr); - const param_index = it.zig_index - 1; - const param_ty = fn_info.param_types.get(ip)[param_index].toType(); - const param = llvm_func.getParam(llvm_arg_i); - try args.ensureUnusedCapacity(1); - - if (isByRef(param_ty, mod)) { - const alignment = param_ty.abiAlignment(mod); - const param_llvm_ty = param.typeOf(); - const arg_ptr = try o.buildAllocaInner(&wip, builder, llvm_func, false, param_llvm_ty, alignment, target); - const store_inst = builder.buildStore(param, arg_ptr); - store_inst.setAlignment(alignment); - args.appendAssumeCapacity(arg_ptr); - } else { - args.appendAssumeCapacity(param); - - o.addByValParamAttrs(llvm_func, param_ty, param_index, fn_info, llvm_arg_i); - } - llvm_arg_i += 1; - }, - .byref => { - const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const param_llvm_ty = try o.lowerType(param_ty); - const param = llvm_func.getParam(llvm_arg_i); - const alignment = param_ty.abiAlignment(mod); - - o.addByRefParamAttrs(llvm_func, llvm_arg_i, alignment, it.byval_attr, param_llvm_ty); - llvm_arg_i += 1; - - try args.ensureUnusedCapacity(1); - - if (isByRef(param_ty, mod)) { - args.appendAssumeCapacity(param); - } else { - const load_inst = builder.buildLoad(param_llvm_ty.toLlvm(&o.builder), param, ""); - load_inst.setAlignment(alignment); - args.appendAssumeCapacity(load_inst); - } - }, - .byref_mut => { - const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const param_llvm_ty = (try o.lowerType(param_ty)).toLlvm(&o.builder); - const param = llvm_func.getParam(llvm_arg_i); - const alignment = param_ty.abiAlignment(mod); + while (try it.next()) |lowering| { + try args.ensureUnusedCapacity(gpa, 1); + + switch (lowering) { + .no_bits => continue, + .byval => { + assert(!it.byval_attr); + const param_index = it.zig_index - 1; + const param_ty = fn_info.param_types.get(ip)[param_index].toType(); + const param = wip.arg(llvm_arg_i); + + if (isByRef(param_ty, mod)) { + const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); + const param_llvm_ty = param.typeOfWip(&wip); + const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target); + _ = try wip.store(.normal, param, arg_ptr, alignment); + args.appendAssumeCapacity(arg_ptr); + } else { + args.appendAssumeCapacity(param); - o.addArgAttr(llvm_func, llvm_arg_i, "noundef"); - llvm_arg_i += 1; + o.addByValParamAttrs(llvm_func, param_ty, param_index, fn_info, @intCast(llvm_arg_i)); + } + llvm_arg_i += 1; + }, + .byref => { + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); + const param_llvm_ty = try o.lowerType(param_ty); + const param = wip.arg(llvm_arg_i); + const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); - try args.ensureUnusedCapacity(1); + o.addByRefParamAttrs(llvm_func, @intCast(llvm_arg_i), @intCast(alignment.toByteUnits() orelse 0), it.byval_attr, param_llvm_ty); + llvm_arg_i += 1; - if (isByRef(param_ty, mod)) { - args.appendAssumeCapacity(param); - } else { - const load_inst = builder.buildLoad(param_llvm_ty, param, ""); - load_inst.setAlignment(alignment); - args.appendAssumeCapacity(load_inst); - } - }, - .abi_sized_int => { - assert(!it.byval_attr); - const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const param = llvm_func.getParam(llvm_arg_i); - llvm_arg_i += 1; + if (isByRef(param_ty, mod)) { + args.appendAssumeCapacity(param); + } else { + args.appendAssumeCapacity(try wip.load(.normal, param_llvm_ty, param, alignment, "")); + } + }, + .byref_mut => { + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); + const param_llvm_ty = try o.lowerType(param_ty); + const param = wip.arg(llvm_arg_i); + const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); - const param_llvm_ty = (try o.lowerType(param_ty)).toLlvm(&o.builder); - const int_llvm_ty = (try o.builder.intType(@intCast(param_ty.abiSize(mod) * 8))).toLlvm(&o.builder); - const alignment = @max( - param_ty.abiAlignment(mod), - o.target_data.abiAlignmentOfType(int_llvm_ty), - ); - const arg_ptr = try o.buildAllocaInner(&wip, builder, llvm_func, false, param_llvm_ty, alignment, target); - const store_inst = builder.buildStore(param, arg_ptr); - store_inst.setAlignment(alignment); + o.addArgAttr(llvm_func, @intCast(llvm_arg_i), "noundef"); + llvm_arg_i += 1; - try args.ensureUnusedCapacity(1); + if (isByRef(param_ty, mod)) { + args.appendAssumeCapacity(param); + } else { + args.appendAssumeCapacity(try wip.load(.normal, param_llvm_ty, param, alignment, "")); + } + }, + .abi_sized_int => { + assert(!it.byval_attr); + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); + const param = wip.arg(llvm_arg_i); + llvm_arg_i += 1; - if (isByRef(param_ty, mod)) { - args.appendAssumeCapacity(arg_ptr); - } else { - const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); - load_inst.setAlignment(alignment); - args.appendAssumeCapacity(load_inst); - } - }, - .slice => { - assert(!it.byval_attr); - const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const ptr_info = param_ty.ptrInfo(mod); + const param_llvm_ty = try o.lowerType(param_ty); + const int_llvm_ty = try o.builder.intType(@intCast(param_ty.abiSize(mod) * 8)); + const alignment = Builder.Alignment.fromByteUnits(@max( + param_ty.abiAlignment(mod), + o.target_data.abiAlignmentOfType(int_llvm_ty.toLlvm(&o.builder)), + )); + const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target); + _ = try wip.store(.normal, param, arg_ptr, alignment); - if (math.cast(u5, it.zig_index - 1)) |i| { - if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) { - o.addArgAttr(llvm_func, llvm_arg_i, "noalias"); + args.appendAssumeCapacity(if (isByRef(param_ty, mod)) + arg_ptr + else + try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, "")); + }, + .slice => { + assert(!it.byval_attr); + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); + const ptr_info = param_ty.ptrInfo(mod); + + if (math.cast(u5, it.zig_index - 1)) |i| { + if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) { + o.addArgAttr(llvm_func, @intCast(llvm_arg_i), "noalias"); + } + } + if (param_ty.zigTypeTag(mod) != .Optional) { + o.addArgAttr(llvm_func, @intCast(llvm_arg_i), "nonnull"); + } + if (ptr_info.flags.is_const) { + o.addArgAttr(llvm_func, @intCast(llvm_arg_i), "readonly"); + } + const elem_align = ptr_info.flags.alignment.toByteUnitsOptional() orelse + @max(ptr_info.child.toType().abiAlignment(mod), 1); + o.addArgAttrInt(llvm_func, @intCast(llvm_arg_i), "align", elem_align); + const ptr_param = wip.arg(llvm_arg_i + 0); + const len_param = wip.arg(llvm_arg_i + 1); + llvm_arg_i += 2; + + const slice_llvm_ty = try o.lowerType(param_ty); + args.appendAssumeCapacity( + try wip.buildAggregate(slice_llvm_ty, &.{ ptr_param, len_param }, ""), + ); + }, + .multiple_llvm_types => { + assert(!it.byval_attr); + const field_types = it.types_buffer[0..it.types_len]; + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); + const param_llvm_ty = try o.lowerType(param_ty); + const param_alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); + const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, param_alignment, target); + const llvm_ty = try o.builder.structType(.normal, field_types); + for (0..field_types.len) |field_i| { + const param = wip.arg(llvm_arg_i); + llvm_arg_i += 1; + const field_ptr = try wip.gepStruct(llvm_ty, arg_ptr, field_i, ""); + const alignment = + Builder.Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); + _ = try wip.store(.normal, param, field_ptr, alignment); } - } - if (param_ty.zigTypeTag(mod) != .Optional) { - o.addArgAttr(llvm_func, llvm_arg_i, "nonnull"); - } - if (ptr_info.flags.is_const) { - o.addArgAttr(llvm_func, llvm_arg_i, "readonly"); - } - const elem_align = ptr_info.flags.alignment.toByteUnitsOptional() orelse - @max(ptr_info.child.toType().abiAlignment(mod), 1); - o.addArgAttrInt(llvm_func, llvm_arg_i, "align", elem_align); - const ptr_param = llvm_func.getParam(llvm_arg_i); - llvm_arg_i += 1; - const len_param = llvm_func.getParam(llvm_arg_i); - llvm_arg_i += 1; - - const slice_llvm_ty = (try o.lowerType(param_ty)).toLlvm(&o.builder); - const partial = builder.buildInsertValue(slice_llvm_ty.getUndef(), ptr_param, 0, ""); - const aggregate = builder.buildInsertValue(partial, len_param, 1, ""); - try args.append(aggregate); - }, - .multiple_llvm_types => { - assert(!it.byval_attr); - const field_types = it.types_buffer[0..it.types_len]; - const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const param_llvm_ty = (try o.lowerType(param_ty)).toLlvm(&o.builder); - const param_alignment = param_ty.abiAlignment(mod); - const arg_ptr = try o.buildAllocaInner(&wip, builder, llvm_func, false, param_llvm_ty, param_alignment, target); - const llvm_ty = (try o.builder.structType(.normal, field_types)).toLlvm(&o.builder); - for (0..field_types.len) |field_i| { - const param = llvm_func.getParam(llvm_arg_i); - llvm_arg_i += 1; - const field_ptr = builder.buildStructGEP(llvm_ty, arg_ptr, @intCast(field_i), ""); - const store_inst = builder.buildStore(param, field_ptr); - store_inst.setAlignment(target.ptrBitWidth() / 8); - } - const is_by_ref = isByRef(param_ty, mod); - const loaded = if (is_by_ref) arg_ptr else l: { - const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); - load_inst.setAlignment(param_alignment); - break :l load_inst; - }; - try args.append(loaded); - }, - .as_u16 => { - assert(!it.byval_attr); - const param = llvm_func.getParam(llvm_arg_i); - llvm_arg_i += 1; - const casted = builder.buildBitCast(param, Builder.Type.half.toLlvm(&o.builder), ""); - try args.ensureUnusedCapacity(1); - args.appendAssumeCapacity(casted); - }, - .float_array => { - const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const param_llvm_ty = (try o.lowerType(param_ty)).toLlvm(&o.builder); - const param = llvm_func.getParam(llvm_arg_i); - llvm_arg_i += 1; + const is_by_ref = isByRef(param_ty, mod); + args.appendAssumeCapacity(if (is_by_ref) + arg_ptr + else + try wip.load(.normal, param_llvm_ty, arg_ptr, param_alignment, "")); + }, + .as_u16 => { + assert(!it.byval_attr); + const param = wip.arg(llvm_arg_i); + llvm_arg_i += 1; + args.appendAssumeCapacity(try wip.cast(.bitcast, param, .half, "")); + }, + .float_array => { + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); + const param_llvm_ty = try o.lowerType(param_ty); + const param = wip.arg(llvm_arg_i); + llvm_arg_i += 1; - const alignment = param_ty.abiAlignment(mod); - const arg_ptr = try o.buildAllocaInner(&wip, builder, llvm_func, false, param_llvm_ty, alignment, target); - _ = builder.buildStore(param, arg_ptr); + const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); + const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target); + _ = try wip.store(.normal, param, arg_ptr, alignment); - if (isByRef(param_ty, mod)) { - try args.append(arg_ptr); - } else { - const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); - load_inst.setAlignment(alignment); - try args.append(load_inst); - } - }, - .i32_array, .i64_array => { - const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const param_llvm_ty = (try o.lowerType(param_ty)).toLlvm(&o.builder); - const param = llvm_func.getParam(llvm_arg_i); - llvm_arg_i += 1; + args.appendAssumeCapacity(if (isByRef(param_ty, mod)) + arg_ptr + else + try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, "")); + }, + .i32_array, .i64_array => { + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); + const param_llvm_ty = try o.lowerType(param_ty); + const param = wip.arg(llvm_arg_i); + llvm_arg_i += 1; - const alignment = param_ty.abiAlignment(mod); - const arg_ptr = try o.buildAllocaInner(&wip, builder, llvm_func, false, param_llvm_ty, alignment, target); - _ = builder.buildStore(param, arg_ptr); + const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); + const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target); + _ = try wip.store(.normal, param, arg_ptr, alignment); - if (isByRef(param_ty, mod)) { - try args.append(arg_ptr); - } else { - const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); - load_inst.setAlignment(alignment); - try args.append(load_inst); - } - }, - }; + args.appendAssumeCapacity(if (isByRef(param_ty, mod)) + arg_ptr + else + try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, "")); + }, + } + } } var di_file: ?*llvm.DIFile = null; @@ -1421,7 +1388,6 @@ pub const Object = struct { .gpa = gpa, .air = air, .liveness = liveness, - .context = o.context, .dg = &dg, .wip = wip, .builder = builder, @@ -1429,9 +1395,8 @@ pub const Object = struct { .args = args.items, .arg_index = 0, .func_inst_table = .{}, - .llvm_func = llvm_func, .blocks = .{}, - .single_threaded = mod.comp.bin_file.options.single_threaded, + .sync_scope = if (mod.comp.bin_file.options.single_threaded) .singlethread else .system, .di_scope = di_scope, .di_file = di_file, .base_line = dg.decl.src_line, @@ -1523,11 +1488,11 @@ pub const Object = struct { const decl_name_slice = decl_name.toSlice(&self.builder).?; if (try decl.isFunction(mod)) { const di_func: *llvm.DISubprogram = @ptrCast(di_node); - const linkage_name = llvm.MDString.get(self.context, decl_name_slice.ptr, decl_name_slice.len); + const linkage_name = llvm.MDString.get(self.builder.llvm.context, decl_name_slice.ptr, decl_name_slice.len); di_func.replaceLinkageName(linkage_name); } else { const di_global: *llvm.DIGlobalVariable = @ptrCast(di_node); - const linkage_name = llvm.MDString.get(self.context, decl_name_slice.ptr, decl_name_slice.len); + const linkage_name = llvm.MDString.get(self.builder.llvm.context, decl_name_slice.ptr, decl_name_slice.len); di_global.replaceLinkageName(linkage_name); } } @@ -1560,11 +1525,11 @@ pub const Object = struct { const exp_name_slice = exp_name.toSlice(&self.builder).?; if (try decl.isFunction(mod)) { const di_func: *llvm.DISubprogram = @ptrCast(di_node); - const linkage_name = llvm.MDString.get(self.context, exp_name_slice.ptr, exp_name_slice.len); + const linkage_name = llvm.MDString.get(self.builder.llvm.context, exp_name_slice.ptr, exp_name_slice.len); di_func.replaceLinkageName(linkage_name); } else { const di_global: *llvm.DIGlobalVariable = @ptrCast(di_node); - const linkage_name = llvm.MDString.get(self.context, exp_name_slice.ptr, exp_name_slice.len); + const linkage_name = llvm.MDString.get(self.builder.llvm.context, exp_name_slice.ptr, exp_name_slice.len); di_global.replaceLinkageName(linkage_name); } } @@ -1598,7 +1563,11 @@ pub const Object = struct { }, } if (mod.intern_pool.stringToSliceUnwrap(exports[0].opts.section)) |section| { - global.ptr(&self.builder).section = try self.builder.string(section); + switch (global.ptrConst(&self.builder).kind) { + inline .variable, .function => |impl_index| impl_index.ptr(&self.builder).section = + try self.builder.string(section), + else => unreachable, + } llvm_global.setSection(section); } if (decl.val.getVariable(mod)) |decl_var| { @@ -1623,7 +1592,7 @@ pub const Object = struct { alias.setAliasee(llvm_global); } else { _ = self.llvm_module.addAlias( - llvm_global.globalGetValueType(), + global.ptrConst(&self.builder).type.toLlvm(&self.builder), 0, llvm_global, exp_name_z, @@ -2773,7 +2742,7 @@ pub const Object = struct { } if (fn_info.alignment.toByteUnitsOptional()) |a| { - global.alignment = Builder.Alignment.fromByteUnits(a); + function.alignment = Builder.Alignment.fromByteUnits(a); llvm_fn.setAlignment(@intCast(a)); } @@ -2944,7 +2913,7 @@ pub const Object = struct { const llvm_ty = ty.toLlvm(&o.builder); if (t.zigTypeTag(mod) == .Opaque) break :check; if (!t.hasRuntimeBits(mod)) break :check; - if (!llvm_ty.isSized().toBool()) break :check; + if (!try ty.isSized(&o.builder)) break :check; const zig_size = t.abiSize(mod); const llvm_size = o.target_data.abiSizeOfType(llvm_ty); @@ -3807,7 +3776,7 @@ pub const Object = struct { } assert(llvm_index == llvm_len); - return try o.builder.structConst(if (need_unnamed) + return o.builder.structConst(if (need_unnamed) try o.builder.structType(struct_ty.structKind(&o.builder), fields) else struct_ty, vals); @@ -3904,7 +3873,7 @@ pub const Object = struct { } assert(llvm_index == llvm_len); - return try o.builder.structConst(if (need_unnamed) + return o.builder.structConst(if (need_unnamed) try o.builder.structType(struct_ty.structKind(&o.builder), fields) else struct_ty, vals); @@ -3978,7 +3947,7 @@ pub const Object = struct { vals[2] = try o.builder.undefConst(fields[2]); len = 3; } - return try o.builder.structConst(if (need_unnamed) + return o.builder.structConst(if (need_unnamed) try o.builder.structType(union_ty.structKind(&o.builder), fields[0..len]) else union_ty, vals[0..len]); @@ -4012,7 +3981,7 @@ pub const Object = struct { const ParentPtr = struct { ty: Type, - llvm_ptr: *llvm.Value, + llvm_ptr: Builder.Value, }; fn lowerParentPtrDecl(o: *Object, decl_index: Module.Decl.Index) Allocator.Error!Builder.Constant { @@ -4040,12 +4009,10 @@ pub const Object = struct { return parent_ptr; } - return o.builder.gepConst(.inbounds, try o.lowerType(eu_ty), parent_ptr, &.{ - try o.builder.intConst(.i32, 0), - try o.builder.intConst(.i32, @as( - i32, - if (payload_ty.abiAlignment(mod) > Type.err_int.abiSize(mod)) 2 else 1, - )), + const index: u32 = + if (payload_ty.abiAlignment(mod) > Type.err_int.abiSize(mod)) 2 else 1; + return o.builder.gepConst(.inbounds, try o.lowerType(eu_ty), parent_ptr, null, &.{ + try o.builder.intConst(.i32, 0), try o.builder.intConst(.i32, index), }); }, .opt_payload => |opt_ptr| { @@ -4061,16 +4028,16 @@ pub const Object = struct { return parent_ptr; } - return o.builder.gepConst(.inbounds, try o.lowerType(opt_ty), parent_ptr, &(.{ - try o.builder.intConst(.i32, 0), - } ** 2)); + return o.builder.gepConst(.inbounds, try o.lowerType(opt_ty), parent_ptr, null, &.{ + try o.builder.intConst(.i32, 0), try o.builder.intConst(.i32, 0), + }); }, .comptime_field => unreachable, .elem => |elem_ptr| { const parent_ptr = try o.lowerParentPtr(elem_ptr.base.toValue(), true); const elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod); - return o.builder.gepConst(.inbounds, try o.lowerType(elem_ty), parent_ptr, &.{ + return o.builder.gepConst(.inbounds, try o.lowerType(elem_ty), parent_ptr, null, &.{ try o.builder.intConst(try o.lowerType(Type.usize), elem_ptr.index), }); }, @@ -4092,9 +4059,9 @@ pub const Object = struct { return parent_ptr; } - return o.builder.gepConst(.inbounds, try o.lowerType(parent_ty), parent_ptr, &.{ - try o.builder.intConst(.i32, 0), - try o.builder.intConst(.i32, @intFromBool( + const parent_llvm_ty = try o.lowerType(parent_ty); + return o.builder.gepConst(.inbounds, parent_llvm_ty, parent_ptr, null, &.{ + try o.builder.intConst(.i32, 0), try o.builder.intConst(.i32, @intFromBool( layout.tag_size > 0 and layout.tag_align >= layout.payload_align, )), }); @@ -4109,7 +4076,8 @@ pub const Object = struct { const prev_bits = b: { var b: usize = 0; for (parent_ty.structFields(mod).values()[0..field_index]) |field| { - if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + if (field.is_comptime) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; b += @intCast(field.ty.bitSize(mod)); } break :b b; @@ -4123,6 +4091,7 @@ pub const Object = struct { .inbounds, try o.lowerType(parent_ty), parent_ptr, + null, if (llvmField(parent_ty, field_index, mod)) |llvm_field| &.{ try o.builder.intConst(.i32, 0), try o.builder.intConst(.i32, llvm_field.index), @@ -4135,9 +4104,9 @@ pub const Object = struct { }, .Pointer => { assert(parent_ty.isSlice(mod)); - return o.builder.gepConst(.inbounds, try o.lowerType(parent_ty), parent_ptr, &.{ - try o.builder.intConst(.i32, 0), - try o.builder.intConst(.i32, field_index), + const parent_llvm_ty = try o.lowerType(parent_ty); + return o.builder.gepConst(.inbounds, parent_llvm_ty, parent_ptr, null, &.{ + try o.builder.intConst(.i32, 0), try o.builder.intConst(.i32, field_index), }); }, else => unreachable, @@ -4167,8 +4136,7 @@ pub const Object = struct { const is_fn_body = decl.ty.zigTypeTag(mod) == .Fn; if ((!is_fn_body and !decl.ty.hasRuntimeBits(mod)) or - (is_fn_body and mod.typeToFunc(decl.ty).?.is_generic)) - return o.lowerPtrToVoid(ty); + (is_fn_body and mod.typeToFunc(decl.ty).?.is_generic)) return o.lowerPtrToVoid(ty); try mod.markDeclAlive(decl); @@ -4240,7 +4208,7 @@ pub const Object = struct { ) void { const kind_id = llvm.getEnumAttributeKindForName(name.ptr, name.len); assert(kind_id != 0); - const llvm_attr = o.context.createEnumAttribute(kind_id, int); + const llvm_attr = o.builder.llvm.context.createEnumAttribute(kind_id, int); val.addAttributeAtIndex(index, llvm_attr); } @@ -4251,7 +4219,7 @@ pub const Object = struct { name: []const u8, value: []const u8, ) void { - const llvm_attr = o.context.createStringAttribute( + const llvm_attr = o.builder.llvm.context.createStringAttribute( name.ptr, @intCast(name.len), value.ptr, @@ -4346,51 +4314,6 @@ pub const Object = struct { llvm_fn.addByValAttr(llvm_arg_i, param_llvm_ty.toLlvm(&o.builder)); } } - - fn buildAllocaInner( - o: *Object, - wip: *Builder.WipFunction, - builder: *llvm.Builder, - llvm_func: *llvm.Value, - di_scope_non_null: bool, - llvm_ty: *llvm.Type, - maybe_alignment: ?c_uint, - target: std.Target, - ) Allocator.Error!*llvm.Value { - const address_space = llvmAllocaAddressSpace(target); - - const alloca = blk: { - const prev_cursor = wip.cursor; - const prev_block = builder.getInsertBlock(); - const prev_debug_location = builder.getCurrentDebugLocation2(); - defer { - wip.cursor = prev_cursor; - builder.positionBuilderAtEnd(prev_block); - if (di_scope_non_null) { - builder.setCurrentDebugLocation2(prev_debug_location); - } - } - - const entry_block = llvm_func.getFirstBasicBlock().?; - wip.cursor = .{ .block = .entry }; - builder.positionBuilder(entry_block, entry_block.getFirstInstruction()); - builder.clearCurrentDebugLocation(); - - break :blk builder.buildAllocaInAddressSpace(llvm_ty, @intFromEnum(address_space), ""); - }; - - if (maybe_alignment) |alignment| { - alloca.setAlignment(alignment); - } - - // The pointer returned from this function should have the generic address space, - // if this isn't the case then cast it to the generic address space. - if (address_space != .default) { - return builder.buildAddrSpaceCast(alloca, Builder.Type.ptr.toLlvm(&o.builder), ""); - } - - return alloca; - } }; pub const DeclGen = struct { @@ -4424,10 +4347,10 @@ pub const DeclGen = struct { const variable = try o.resolveGlobalDecl(decl_index); const global = variable.ptrConst(&o.builder).global; var llvm_global = global.toLlvm(&o.builder); - global.ptr(&o.builder).alignment = Builder.Alignment.fromByteUnits(decl.getAlignment(mod)); + variable.ptr(&o.builder).alignment = Builder.Alignment.fromByteUnits(decl.getAlignment(mod)); llvm_global.setAlignment(decl.getAlignment(mod)); if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |section| { - global.ptr(&o.builder).section = try o.builder.string(section); + variable.ptr(&o.builder).section = try o.builder.string(section); llvm_global.setSection(section); } assert(decl.has_tv); @@ -4439,10 +4362,7 @@ pub const DeclGen = struct { if (init_val != .none) { const llvm_init = try o.lowerValue(init_val); const llvm_init_ty = llvm_init.typeOf(&o.builder); - global.ptr(&o.builder).type = llvm_init_ty; - variable.ptr(&o.builder).mutability = .global; - variable.ptr(&o.builder).init = llvm_init; - if (llvm_global.globalGetValueType() == llvm_init.typeOf(&o.builder).toLlvm(&o.builder)) { + if (global.ptrConst(&o.builder).type == llvm_init_ty) { llvm_global.setInitializer(llvm_init.toLlvm(&o.builder)); } else { // LLVM does not allow us to change the type of globals. So we must @@ -4477,7 +4397,10 @@ pub const DeclGen = struct { new_global; llvm_global.deleteGlobal(); llvm_global = new_global; + variable.ptr(&o.builder).mutability = .global; + global.ptr(&o.builder).type = llvm_init_ty; } + variable.ptr(&o.builder).init = llvm_init; } if (o.di_builder) |dib| { @@ -4508,7 +4431,6 @@ pub const FuncGen = struct { air: Air, liveness: Liveness, wip: Builder.WipFunction, - context: *llvm.Context, builder: *llvm.Builder, di_scope: ?*llvm.DIScope, di_file: ?*llvm.DIFile, @@ -4525,26 +4447,24 @@ pub const FuncGen = struct { /// This stores the LLVM values used in a function, such that they can be referred to /// in other instructions. This table is cleared before every function is generated. - func_inst_table: std.AutoHashMapUnmanaged(Air.Inst.Ref, *llvm.Value), + func_inst_table: std.AutoHashMapUnmanaged(Air.Inst.Ref, Builder.Value), /// If the return type is sret, this is the result pointer. Otherwise null. /// Note that this can disagree with isByRef for the return type in the case /// of C ABI functions. - ret_ptr: ?*llvm.Value, + ret_ptr: Builder.Value, /// Any function that needs to perform Valgrind client requests needs an array alloca /// instruction, however a maximum of one per function is needed. - valgrind_client_request_array: ?*llvm.Value = null, + valgrind_client_request_array: Builder.Value = .none, /// These fields are used to refer to the LLVM value of the function parameters /// in an Arg instruction. /// This list may be shorter than the list according to the zig type system; /// it omits 0-bit types. If the function uses sret as the first parameter, /// this slice does not include it. - args: []const *llvm.Value, - arg_index: c_uint, + args: []const Builder.Value, + arg_index: usize, - llvm_func: *llvm.Value, - - err_ret_trace: ?*llvm.Value = null, + err_ret_trace: Builder.Value = .none, /// This data structure is used to implement breaking to blocks. blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, struct { @@ -4552,13 +4472,16 @@ pub const FuncGen = struct { breaks: *BreakList, }), - single_threaded: bool, + sync_scope: Builder.SyncScope, const DbgState = struct { loc: *llvm.DILocation, scope: *llvm.DIScope, base_line: u32 }; - const BreakList = std.MultiArrayList(struct { - bb: *llvm.BasicBlock, - val: *llvm.Value, - }); + const BreakList = union { + list: std.MultiArrayList(struct { + bb: Builder.Function.Block.Index, + val: Builder.Value, + }), + len: usize, + }; fn deinit(self: *FuncGen) void { self.wip.deinit(); @@ -4573,7 +4496,7 @@ pub const FuncGen = struct { return self.dg.todo(format, args); } - fn resolveInst(self: *FuncGen, inst: Air.Inst.Ref) !*llvm.Value { + fn resolveInst(self: *FuncGen, inst: Air.Inst.Ref) !Builder.Value { const gpa = self.gpa; const gop = try self.func_inst_table.getOrPut(gpa, inst); if (gop.found_existing) return gop.value_ptr.*; @@ -4584,8 +4507,8 @@ pub const FuncGen = struct { .ty = self.typeOf(inst), .val = (try self.air.value(inst, mod)).?, }); - gop.value_ptr.* = llvm_val.toLlvm(&o.builder); - return gop.value_ptr.*; + gop.value_ptr.* = llvm_val.toValue(); + return llvm_val.toValue(); } fn resolveValue(self: *FuncGen, tv: TypedValue) Error!Builder.Constant { @@ -4613,19 +4536,19 @@ pub const FuncGen = struct { .unnamed_addr = .unnamed_addr, .addr_space = llvm_actual_addrspace, .type = llvm_ty, - .alignment = Builder.Alignment.fromByteUnits(llvm_alignment), .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, }; var variable = Builder.Variable{ .global = @enumFromInt(o.builder.globals.count()), .mutability = .constant, .init = llvm_val, + .alignment = Builder.Alignment.fromByteUnits(llvm_alignment), }; try o.builder.llvm.globals.append(o.gpa, llvm_global); const global_index = try o.builder.addGlobal(.empty, global); try o.builder.variables.append(o.gpa, variable); - return try o.builder.convConst( + return o.builder.convConst( .unneeded, global_index.toConst(), try o.builder.ptrType(llvm_wanted_addrspace), @@ -4651,10 +4574,9 @@ pub const FuncGen = struct { const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body, 0..) |inst, i| { - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) - continue; + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; - const opt_value: ?*llvm.Value = switch (air_tags[inst]) { + const val: Builder.Value = switch (air_tags[inst]) { // zig fmt: off .add => try self.airAdd(inst, false), .add_optimized => try self.airAdd(inst, true), @@ -4745,15 +4667,15 @@ pub const FuncGen = struct { .cmp_vector_optimized => try self.airCmpVector(inst, true), .cmp_lt_errors_len => try self.airCmpLtErrorsLen(inst), - .is_non_null => try self.airIsNonNull(inst, false, .NE), - .is_non_null_ptr => try self.airIsNonNull(inst, true , .NE), - .is_null => try self.airIsNonNull(inst, false, .EQ), - .is_null_ptr => try self.airIsNonNull(inst, true , .EQ), + .is_non_null => try self.airIsNonNull(inst, false, .ne), + .is_non_null_ptr => try self.airIsNonNull(inst, true , .ne), + .is_null => try self.airIsNonNull(inst, false, .eq), + .is_null_ptr => try self.airIsNonNull(inst, true , .eq), - .is_non_err => try self.airIsErr(inst, .EQ, false), - .is_non_err_ptr => try self.airIsErr(inst, .EQ, true), - .is_err => try self.airIsErr(inst, .NE, false), - .is_err_ptr => try self.airIsErr(inst, .NE, true), + .is_non_err => try self.airIsErr(inst, .eq, false), + .is_non_err_ptr => try self.airIsErr(inst, .eq, true), + .is_err => try self.airIsErr(inst, .ne, false), + .is_err_ptr => try self.airIsErr(inst, .ne, true), .alloc => try self.airAlloc(inst), .ret_ptr => try self.airRetPtr(inst), @@ -4830,10 +4752,10 @@ pub const FuncGen = struct { .reduce => try self.airReduce(inst, false), .reduce_optimized => try self.airReduce(inst, true), - .atomic_store_unordered => try self.airAtomicStore(inst, .Unordered), - .atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic), - .atomic_store_release => try self.airAtomicStore(inst, .Release), - .atomic_store_seq_cst => try self.airAtomicStore(inst, .SequentiallyConsistent), + .atomic_store_unordered => try self.airAtomicStore(inst, .unordered), + .atomic_store_monotonic => try self.airAtomicStore(inst, .monotonic), + .atomic_store_release => try self.airAtomicStore(inst, .release), + .atomic_store_seq_cst => try self.airAtomicStore(inst, .seq_cst), .struct_field_ptr => try self.airStructFieldPtr(inst), .struct_field_val => try self.airStructFieldVal(body[i..]), @@ -4875,8 +4797,8 @@ pub const FuncGen = struct { .inferred_alloc, .inferred_alloc_comptime => unreachable, - .unreach => self.airUnreach(inst), - .dbg_stmt => self.airDbgStmt(inst), + .unreach => try self.airUnreach(inst), + .dbg_stmt => try self.airDbgStmt(inst), .dbg_inline_begin => try self.airDbgInlineBegin(inst), .dbg_inline_end => try self.airDbgInlineEnd(inst), .dbg_block_begin => try self.airDbgBlockBegin(), @@ -4894,14 +4816,11 @@ pub const FuncGen = struct { .work_group_id => try self.airWorkGroupId(inst), // zig fmt: on }; - if (opt_value) |val| { - const ref = Air.indexToRef(inst); - try self.func_inst_table.putNoClobber(self.gpa, ref, val); - } + if (val != .none) try self.func_inst_table.putNoClobber(self.gpa, Air.indexToRef(inst), val); } } - fn airCall(self: *FuncGen, inst: Air.Inst.Index, attr: llvm.CallAttr) !?*llvm.Value { + fn airCall(self: *FuncGen, inst: Air.Inst.Index, attr: llvm.CallAttr) !Builder.Value { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]); @@ -4924,16 +4843,18 @@ pub const FuncGen = struct { defer llvm_args.deinit(); const ret_ptr = if (!sret) null else blk: { - const llvm_ret_ty = (try o.lowerType(return_type)).toLlvm(&o.builder); - const ret_ptr = try self.buildAlloca(llvm_ret_ty, return_type.abiAlignment(mod)); - try llvm_args.append(ret_ptr); + const llvm_ret_ty = try o.lowerType(return_type); + const alignment = Builder.Alignment.fromByteUnits(return_type.abiAlignment(mod)); + const ret_ptr = try self.buildAlloca(llvm_ret_ty, alignment); + try llvm_args.append(ret_ptr.toLlvm(&self.wip)); break :blk ret_ptr; }; const err_return_tracing = return_type.isError(mod) and o.module.comp.bin_file.options.error_return_tracing; if (err_return_tracing) { - try llvm_args.append(self.err_ret_trace.?); + assert(self.err_ret_trace != .none); + try llvm_args.append(self.err_ret_trace.toLlvm(&self.wip)); } var it = iterateParamTypes(o, fn_info); @@ -4943,14 +4864,13 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const llvm_param_ty = (try o.lowerType(param_ty)).toLlvm(&o.builder); + const llvm_param_ty = try o.lowerType(param_ty); if (isByRef(param_ty, mod)) { - const alignment = param_ty.abiAlignment(mod); - const load_inst = self.builder.buildLoad(llvm_param_ty, llvm_arg, ""); - load_inst.setAlignment(alignment); - try llvm_args.append(load_inst); + const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); + const loaded = try self.wip.load(.normal, llvm_param_ty, llvm_arg, alignment, ""); + try llvm_args.append(loaded.toLlvm(&self.wip)); } else { - try llvm_args.append(llvm_arg); + try llvm_args.append(llvm_arg.toLlvm(&self.wip)); } }, .byref => { @@ -4958,14 +4878,13 @@ pub const FuncGen = struct { const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); if (isByRef(param_ty, mod)) { - try llvm_args.append(llvm_arg); + try llvm_args.append(llvm_arg.toLlvm(&self.wip)); } else { - const alignment = param_ty.abiAlignment(mod); - const param_llvm_ty = llvm_arg.typeOf(); + const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); + const param_llvm_ty = llvm_arg.typeOfWip(&self.wip); const arg_ptr = try self.buildAlloca(param_llvm_ty, alignment); - const store_inst = self.builder.buildStore(llvm_arg, arg_ptr); - store_inst.setAlignment(alignment); - try llvm_args.append(arg_ptr); + _ = try self.wip.store(.normal, llvm_arg, arg_ptr, alignment); + try llvm_args.append(arg_ptr.toLlvm(&self.wip)); } }, .byref_mut => { @@ -4973,56 +4892,46 @@ pub const FuncGen = struct { const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const alignment = param_ty.abiAlignment(mod); - const param_llvm_ty = (try o.lowerType(param_ty)).toLlvm(&o.builder); + const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); + const param_llvm_ty = try o.lowerType(param_ty); const arg_ptr = try self.buildAlloca(param_llvm_ty, alignment); if (isByRef(param_ty, mod)) { - const load_inst = self.builder.buildLoad(param_llvm_ty, llvm_arg, ""); - load_inst.setAlignment(alignment); - - const store_inst = self.builder.buildStore(load_inst, arg_ptr); - store_inst.setAlignment(alignment); - try llvm_args.append(arg_ptr); + const loaded = try self.wip.load(.normal, param_llvm_ty, llvm_arg, alignment, ""); + _ = try self.wip.store(.normal, loaded, arg_ptr, alignment); } else { - const store_inst = self.builder.buildStore(llvm_arg, arg_ptr); - store_inst.setAlignment(alignment); - try llvm_args.append(arg_ptr); + _ = try self.wip.store(.normal, llvm_arg, arg_ptr, alignment); } + try llvm_args.append(arg_ptr.toLlvm(&self.wip)); }, .abi_sized_int => { const arg = args[it.zig_index - 1]; const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const int_llvm_ty = (try o.builder.intType(@intCast(param_ty.abiSize(mod) * 8))).toLlvm(&o.builder); + const int_llvm_ty = try o.builder.intType(@intCast(param_ty.abiSize(mod) * 8)); if (isByRef(param_ty, mod)) { - const alignment = param_ty.abiAlignment(mod); - const load_inst = self.builder.buildLoad(int_llvm_ty, llvm_arg, ""); - load_inst.setAlignment(alignment); - try llvm_args.append(load_inst); + const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); + const loaded = try self.wip.load(.normal, int_llvm_ty, llvm_arg, alignment, ""); + try llvm_args.append(loaded.toLlvm(&self.wip)); } else { // LLVM does not allow bitcasting structs so we must allocate // a local, store as one type, and then load as another type. - const alignment = @max( + const alignment = Builder.Alignment.fromByteUnits(@max( param_ty.abiAlignment(mod), - o.target_data.abiAlignmentOfType(int_llvm_ty), - ); + o.target_data.abiAlignmentOfType(int_llvm_ty.toLlvm(&o.builder)), + )); const int_ptr = try self.buildAlloca(int_llvm_ty, alignment); - const store_inst = self.builder.buildStore(llvm_arg, int_ptr); - store_inst.setAlignment(alignment); - const load_inst = self.builder.buildLoad(int_llvm_ty, int_ptr, ""); - load_inst.setAlignment(alignment); - try llvm_args.append(load_inst); + _ = try self.wip.store(.normal, llvm_arg, int_ptr, alignment); + const loaded = try self.wip.load(.normal, int_llvm_ty, int_ptr, alignment, ""); + try llvm_args.append(loaded.toLlvm(&self.wip)); } }, .slice => { const arg = args[it.zig_index - 1]; const llvm_arg = try self.resolveInst(arg); - const ptr = self.builder.buildExtractValue(llvm_arg, 0, ""); - const len = self.builder.buildExtractValue(llvm_arg, 1, ""); - try llvm_args.ensureUnusedCapacity(2); - llvm_args.appendAssumeCapacity(ptr); - llvm_args.appendAssumeCapacity(len); + const ptr = try self.wip.extractValue(llvm_arg, &.{0}, ""); + const len = try self.wip.extractValue(llvm_arg, &.{1}, ""); + try llvm_args.appendSlice(&.{ ptr.toLlvm(&self.wip), len.toLlvm(&self.wip) }); }, .multiple_llvm_types => { const arg = args[it.zig_index - 1]; @@ -5030,75 +4939,77 @@ pub const FuncGen = struct { const llvm_types = it.types_buffer[0..it.types_len]; const llvm_arg = try self.resolveInst(arg); const is_by_ref = isByRef(param_ty, mod); - const arg_ptr = if (is_by_ref) llvm_arg else p: { - const p = try self.buildAlloca(llvm_arg.typeOf(), null); - const store_inst = self.builder.buildStore(llvm_arg, p); - store_inst.setAlignment(param_ty.abiAlignment(mod)); - break :p p; + const arg_ptr = if (is_by_ref) llvm_arg else ptr: { + const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); + const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment); + _ = try self.wip.store(.normal, llvm_arg, ptr, alignment); + break :ptr ptr; }; - const llvm_ty = (try o.builder.structType(.normal, llvm_types)).toLlvm(&o.builder); + const llvm_ty = try o.builder.structType(.normal, llvm_types); try llvm_args.ensureUnusedCapacity(it.types_len); for (llvm_types, 0..) |field_ty, i| { - const field_ptr = self.builder.buildStructGEP(llvm_ty, arg_ptr, @intCast(i), ""); - const load_inst = self.builder.buildLoad(field_ty.toLlvm(&o.builder), field_ptr, ""); - load_inst.setAlignment(target.ptrBitWidth() / 8); - llvm_args.appendAssumeCapacity(load_inst); + const alignment = + Builder.Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); + const field_ptr = try self.wip.gepStruct(llvm_ty, arg_ptr, i, ""); + const loaded = try self.wip.load(.normal, field_ty, field_ptr, alignment, ""); + llvm_args.appendAssumeCapacity(loaded.toLlvm(&self.wip)); } }, .as_u16 => { const arg = args[it.zig_index - 1]; const llvm_arg = try self.resolveInst(arg); - const casted = self.builder.buildBitCast(llvm_arg, Builder.Type.i16.toLlvm(&o.builder), ""); - try llvm_args.append(casted); + const casted = try self.wip.cast(.bitcast, llvm_arg, .i16, ""); + try llvm_args.append(casted.toLlvm(&self.wip)); }, .float_array => |count| { const arg = args[it.zig_index - 1]; const arg_ty = self.typeOf(arg); var llvm_arg = try self.resolveInst(arg); + const alignment = Builder.Alignment.fromByteUnits(arg_ty.abiAlignment(mod)); if (!isByRef(arg_ty, mod)) { - const p = try self.buildAlloca(llvm_arg.typeOf(), null); - const store_inst = self.builder.buildStore(llvm_arg, p); - store_inst.setAlignment(arg_ty.abiAlignment(mod)); - llvm_arg = store_inst; + const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment); + _ = try self.wip.store(.normal, llvm_arg, ptr, alignment); + llvm_arg = ptr; } const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty, mod).?); const array_ty = try o.builder.arrayType(count, float_ty); - const alignment = arg_ty.abiAlignment(mod); - const load_inst = self.builder.buildLoad(array_ty.toLlvm(&o.builder), llvm_arg, ""); - load_inst.setAlignment(alignment); - try llvm_args.append(load_inst); + const loaded = try self.wip.load(.normal, array_ty, llvm_arg, alignment, ""); + try llvm_args.append(loaded.toLlvm(&self.wip)); }, .i32_array, .i64_array => |arr_len| { const elem_size: u8 = if (lowering == .i32_array) 32 else 64; const arg = args[it.zig_index - 1]; const arg_ty = self.typeOf(arg); var llvm_arg = try self.resolveInst(arg); + const alignment = Builder.Alignment.fromByteUnits(arg_ty.abiAlignment(mod)); if (!isByRef(arg_ty, mod)) { - const p = try self.buildAlloca(llvm_arg.typeOf(), null); - const store_inst = self.builder.buildStore(llvm_arg, p); - store_inst.setAlignment(arg_ty.abiAlignment(mod)); - llvm_arg = store_inst; + const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment); + _ = try self.wip.store(.normal, llvm_arg, ptr, alignment); + llvm_arg = ptr; } - const array_ty = try o.builder.arrayType(arr_len, try o.builder.intType(@intCast(elem_size))); - const alignment = arg_ty.abiAlignment(mod); - const load_inst = self.builder.buildLoad(array_ty.toLlvm(&o.builder), llvm_arg, ""); - load_inst.setAlignment(alignment); - try llvm_args.append(load_inst); + const array_ty = + try o.builder.arrayType(arr_len, try o.builder.intType(@intCast(elem_size))); + const loaded = try self.wip.load(.normal, array_ty, llvm_arg, alignment, ""); + try llvm_args.append(loaded.toLlvm(&self.wip)); }, }; - const call = self.builder.buildCall( - (try o.lowerType(zig_fn_ty)).toLlvm(&o.builder), - llvm_fn, - llvm_args.items.ptr, - @intCast(llvm_args.items.len), - toLlvmCallConv(fn_info.cc, target), - attr, - "", + const llvm_fn_ty = try o.lowerType(zig_fn_ty); + const call = (try self.wip.unimplemented(llvm_fn_ty.functionReturn(&o.builder), "")).finish( + self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + llvm_fn.toLlvm(&self.wip), + llvm_args.items.ptr, + @intCast(llvm_args.items.len), + toLlvmCallConv(fn_info.cc, target), + attr, + "", + ), + &self.wip, ); if (callee_ty.zigTypeTag(mod) == .Pointer) { @@ -5111,7 +5022,7 @@ pub const FuncGen = struct { const param_index = it.zig_index - 1; const param_ty = fn_info.param_types.get(ip)[param_index].toType(); if (!isByRef(param_ty, mod)) { - o.addByValParamAttrs(call, param_ty, param_index, fn_info, it.llvm_index - 1); + o.addByValParamAttrs(call.toLlvm(&self.wip), param_ty, param_index, fn_info, it.llvm_index - 1); } }, .byref => { @@ -5119,10 +5030,10 @@ pub const FuncGen = struct { const param_ty = fn_info.param_types.get(ip)[param_index].toType(); const param_llvm_ty = try o.lowerType(param_ty); const alignment = param_ty.abiAlignment(mod); - o.addByRefParamAttrs(call, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); + o.addByRefParamAttrs(call.toLlvm(&self.wip), it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); }, .byref_mut => { - o.addArgAttr(call, it.llvm_index - 1, "noundef"); + o.addArgAttr(call.toLlvm(&self.wip), it.llvm_index - 1, "noundef"); }, // No attributes needed for these. .no_bits, @@ -5142,70 +5053,63 @@ pub const FuncGen = struct { if (math.cast(u5, it.zig_index - 1)) |i| { if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) { - o.addArgAttr(call, llvm_arg_i, "noalias"); + o.addArgAttr(call.toLlvm(&self.wip), llvm_arg_i, "noalias"); } } if (param_ty.zigTypeTag(mod) != .Optional) { - o.addArgAttr(call, llvm_arg_i, "nonnull"); + o.addArgAttr(call.toLlvm(&self.wip), llvm_arg_i, "nonnull"); } if (ptr_info.flags.is_const) { - o.addArgAttr(call, llvm_arg_i, "readonly"); + o.addArgAttr(call.toLlvm(&self.wip), llvm_arg_i, "readonly"); } const elem_align = ptr_info.flags.alignment.toByteUnitsOptional() orelse @max(ptr_info.child.toType().abiAlignment(mod), 1); - o.addArgAttrInt(call, llvm_arg_i, "align", elem_align); + o.addArgAttrInt(call.toLlvm(&self.wip), llvm_arg_i, "align", elem_align); }, }; } if (fn_info.return_type == .noreturn_type and attr != .AlwaysTail) { - return null; + return .none; } if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(mod)) { - return null; + return .none; } - const llvm_ret_ty = (try o.lowerType(return_type)).toLlvm(&o.builder); + const llvm_ret_ty = try o.lowerType(return_type); if (ret_ptr) |rp| { - call.setCallSret(llvm_ret_ty); + call.toLlvm(&self.wip).setCallSret(llvm_ret_ty.toLlvm(&o.builder)); if (isByRef(return_type, mod)) { return rp; } else { // our by-ref status disagrees with sret so we must load. - const loaded = self.builder.buildLoad(llvm_ret_ty, rp, ""); - loaded.setAlignment(return_type.abiAlignment(mod)); - return loaded; + const return_alignment = Builder.Alignment.fromByteUnits(return_type.abiAlignment(mod)); + return self.wip.load(.normal, llvm_ret_ty, rp, return_alignment, ""); } } - const abi_ret_ty = (try lowerFnRetTy(o, fn_info)).toLlvm(&o.builder); + const abi_ret_ty = try lowerFnRetTy(o, fn_info); if (abi_ret_ty != llvm_ret_ty) { // In this case the function return type is honoring the calling convention by having // a different LLVM type than the usual one. We solve this here at the callsite // by using our canonical type, then loading it if necessary. - const alignment = o.target_data.abiAlignmentOfType(abi_ret_ty); - const rp = try self.buildAlloca(llvm_ret_ty, alignment); - const store_inst = self.builder.buildStore(call, rp); - store_inst.setAlignment(alignment); - if (isByRef(return_type, mod)) { - return rp; - } else { - const load_inst = self.builder.buildLoad(llvm_ret_ty, rp, ""); - load_inst.setAlignment(alignment); - return load_inst; - } + const rp = try self.buildAlloca(llvm_ret_ty, .default); + _ = try self.wip.store(.normal, call, rp, .default); + return if (isByRef(return_type, mod)) + rp + else + try self.wip.load(.normal, llvm_ret_ty, rp, .default, ""); } if (isByRef(return_type, mod)) { // our by-ref status disagrees with sret so we must allocate, store, // and return the allocation pointer. - const alignment = return_type.abiAlignment(mod); + const alignment = Builder.Alignment.fromByteUnits(return_type.abiAlignment(mod)); const rp = try self.buildAlloca(llvm_ret_ty, alignment); - const store_inst = self.builder.buildStore(call, rp); - store_inst.setAlignment(alignment); + _ = try self.wip.store(.normal, call, rp, alignment); return rp; } else { return call; @@ -5239,7 +5143,7 @@ pub const FuncGen = struct { const panic_decl = mod.declPtr(panic_func.owner_decl); const fn_info = mod.typeToFunc(panic_decl.ty).?; const panic_global = try o.resolveLlvmFunction(panic_func.owner_decl); - _ = fg.builder.buildCall( + _ = (try fg.wip.unimplemented(.void, "")).finish(fg.builder.buildCall( (try o.lowerType(panic_decl.ty)).toLlvm(&o.builder), panic_global.toLlvm(&o.builder), &args, @@ -5247,21 +5151,21 @@ pub const FuncGen = struct { toLlvmCallConv(fn_info.cc, target), .Auto, "", - ); - _ = fg.builder.buildUnreachable(); + ), &fg.wip); + _ = try fg.wip.@"unreachable"(); } - fn airRet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airRet(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const ret_ty = self.typeOf(un_op); - if (self.ret_ptr) |ret_ptr| { + if (self.ret_ptr != .none) { const operand = try self.resolveInst(un_op); const ptr_ty = try mod.singleMutPtrType(ret_ty); - try self.store(ret_ptr, ptr_ty, operand, .NotAtomic); - try self.wip.retVoid(); - return null; + try self.store(self.ret_ptr, ptr_ty, operand, .none); + _ = try self.wip.retVoid(); + return .none; } const fn_info = mod.typeToFunc(self.dg.decl.ty).?; if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -5269,43 +5173,37 @@ pub const FuncGen = struct { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. - const int = try o.builder.intConst(Builder.Type.err_int, 0); - _ = self.builder.buildRet(int.toLlvm(&o.builder)); + _ = try self.wip.ret(try o.builder.intValue(Builder.Type.err_int, 0)); } else { - try self.wip.retVoid(); + _ = try self.wip.retVoid(); } - return null; + return .none; } - const abi_ret_ty = (try lowerFnRetTy(o, fn_info)).toLlvm(&o.builder); + const abi_ret_ty = try lowerFnRetTy(o, fn_info); const operand = try self.resolveInst(un_op); - const alignment = ret_ty.abiAlignment(mod); + const alignment = Builder.Alignment.fromByteUnits(ret_ty.abiAlignment(mod)); if (isByRef(ret_ty, mod)) { // operand is a pointer however self.ret_ptr is null so that means // we need to return a value. - const load_inst = self.builder.buildLoad(abi_ret_ty, operand, ""); - load_inst.setAlignment(alignment); - _ = self.builder.buildRet(load_inst); - return null; + _ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, operand, alignment, "")); + return .none; } - const llvm_ret_ty = operand.typeOf(); + const llvm_ret_ty = operand.typeOfWip(&self.wip); if (abi_ret_ty == llvm_ret_ty) { - _ = self.builder.buildRet(operand); - return null; + _ = try self.wip.ret(operand); + return .none; } const rp = try self.buildAlloca(llvm_ret_ty, alignment); - const store_inst = self.builder.buildStore(operand, rp); - store_inst.setAlignment(alignment); - const load_inst = self.builder.buildLoad(abi_ret_ty, rp, ""); - load_inst.setAlignment(alignment); - _ = self.builder.buildRet(load_inst); - return null; + _ = try self.wip.store(.normal, operand, rp, alignment); + _ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, rp, alignment, "")); + return .none; } - fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const un_op = self.air.instructions.items(.data)[inst].un_op; @@ -5317,106 +5215,121 @@ pub const FuncGen = struct { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. - const int = try o.builder.intConst(Builder.Type.err_int, 0); - _ = self.builder.buildRet(int.toLlvm(&o.builder)); + _ = try self.wip.ret(try o.builder.intValue(Builder.Type.err_int, 0)); } else { - try self.wip.retVoid(); + _ = try self.wip.retVoid(); } - return null; + return .none; } - if (self.ret_ptr != null) { - try self.wip.retVoid(); - return null; + if (self.ret_ptr != .none) { + _ = try self.wip.retVoid(); + return .none; } const ptr = try self.resolveInst(un_op); - const abi_ret_ty = (try lowerFnRetTy(o, fn_info)).toLlvm(&o.builder); - const loaded = self.builder.buildLoad(abi_ret_ty, ptr, ""); - loaded.setAlignment(ret_ty.abiAlignment(mod)); - _ = self.builder.buildRet(loaded); - return null; + const abi_ret_ty = try lowerFnRetTy(o, fn_info); + const alignment = Builder.Alignment.fromByteUnits(ret_ty.abiAlignment(mod)); + _ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, ptr, alignment, "")); + return .none; } - fn airCVaArg(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airCVaArg(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const list = try self.resolveInst(ty_op.operand); const arg_ty = self.air.getRefType(ty_op.ty); - const llvm_arg_ty = (try o.lowerType(arg_ty)).toLlvm(&o.builder); + const llvm_arg_ty = try o.lowerType(arg_ty); - return self.builder.buildVAArg(list, llvm_arg_ty, ""); + return self.wip.vaArg(list, llvm_arg_ty, ""); } - fn airCVaCopy(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airCVaCopy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_list = try self.resolveInst(ty_op.operand); const va_list_ty = self.air.getRefType(ty_op.ty); - const llvm_va_list_ty = (try o.lowerType(va_list_ty)).toLlvm(&o.builder); + const llvm_va_list_ty = try o.lowerType(va_list_ty); const mod = o.module; - const result_alignment = va_list_ty.abiAlignment(mod); + const result_alignment = Builder.Alignment.fromByteUnits(va_list_ty.abiAlignment(mod)); const dest_list = try self.buildAlloca(llvm_va_list_ty, result_alignment); const llvm_fn_name = "llvm.va_copy"; - const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: { - const fn_type = try o.builder.fnType(.void, &.{ .ptr, .ptr }, .normal); - break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type.toLlvm(&o.builder)); - }; + const llvm_fn_ty = try o.builder.fnType(.void, &.{ .ptr, .ptr }, .normal); + const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse + o.llvm_module.addFunction(llvm_fn_name, llvm_fn_ty.toLlvm(&o.builder)); - const args: [2]*llvm.Value = .{ dest_list, src_list }; - _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); + const args: [2]*llvm.Value = .{ dest_list.toLlvm(&self.wip), src_list.toLlvm(&self.wip) }; + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + llvm_fn, + &args, + args.len, + .Fast, + .Auto, + "", + ), &self.wip); - if (isByRef(va_list_ty, mod)) { - return dest_list; - } else { - const loaded = self.builder.buildLoad(llvm_va_list_ty, dest_list, ""); - loaded.setAlignment(result_alignment); - return loaded; - } + return if (isByRef(va_list_ty, mod)) + dest_list + else + try self.wip.load(.normal, llvm_va_list_ty, dest_list, result_alignment, ""); } - fn airCVaEnd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airCVaEnd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const un_op = self.air.instructions.items(.data)[inst].un_op; const list = try self.resolveInst(un_op); const llvm_fn_name = "llvm.va_end"; - const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: { - const fn_type = try o.builder.fnType(.void, &.{.ptr}, .normal); - break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type.toLlvm(&o.builder)); - }; - const args: [1]*llvm.Value = .{list}; - _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); - return null; + const llvm_fn_ty = try o.builder.fnType(.void, &.{.ptr}, .normal); + const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse + o.llvm_module.addFunction(llvm_fn_name, llvm_fn_ty.toLlvm(&o.builder)); + + const args: [1]*llvm.Value = .{list.toLlvm(&self.wip)}; + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + llvm_fn, + &args, + args.len, + .Fast, + .Auto, + "", + ), &self.wip); + return .none; } - fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const va_list_ty = self.typeOfIndex(inst); - const llvm_va_list_ty = (try o.lowerType(va_list_ty)).toLlvm(&o.builder); + const llvm_va_list_ty = try o.lowerType(va_list_ty); - const result_alignment = va_list_ty.abiAlignment(mod); + const result_alignment = Builder.Alignment.fromByteUnits(va_list_ty.abiAlignment(mod)); const list = try self.buildAlloca(llvm_va_list_ty, result_alignment); const llvm_fn_name = "llvm.va_start"; - const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: { - const fn_type = try o.builder.fnType(.void, &.{.ptr}, .normal); - break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type.toLlvm(&o.builder)); - }; - const args: [1]*llvm.Value = .{list}; - _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); + const llvm_fn_ty = try o.builder.fnType(.void, &.{.ptr}, .normal); + const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse + o.llvm_module.addFunction(llvm_fn_name, llvm_fn_ty.toLlvm(&o.builder)); - if (isByRef(va_list_ty, mod)) { - return list; - } else { - const loaded = self.builder.buildLoad(llvm_va_list_ty, list, ""); - loaded.setAlignment(result_alignment); - return loaded; - } + const args: [1]*llvm.Value = .{list.toLlvm(&self.wip)}; + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + llvm_fn, + &args, + args.len, + .Fast, + .Auto, + "", + ), &self.wip); + + return if (isByRef(va_list_ty, mod)) + list + else + try self.wip.load(.normal, llvm_va_list_ty, list, result_alignment, ""); } - fn airCmp(self: *FuncGen, inst: Air.Inst.Index, op: math.CompareOperator, want_fast_math: bool) !?*llvm.Value { + fn airCmp(self: *FuncGen, inst: Air.Inst.Index, op: math.CompareOperator, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -5427,7 +5340,7 @@ pub const FuncGen = struct { return self.cmp(lhs, rhs, operand_ty, op); } - fn airCmpVector(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airCmpVector(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -5441,21 +5354,30 @@ pub const FuncGen = struct { return self.cmp(lhs, rhs, vec_ty, cmp_op); } - fn airCmpLtErrorsLen(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airCmpLtErrorsLen(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { + const o = self.dg.object; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const llvm_fn = try self.getCmpLtErrorsLenFunction(); - const args: [1]*llvm.Value = .{operand}; - return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); + const args: [1]*llvm.Value = .{operand.toLlvm(&self.wip)}; + return (try self.wip.unimplemented(.i1, "")).finish(self.builder.buildCall( + llvm_fn.typeOf(&o.builder).toLlvm(&o.builder), + llvm_fn.toLlvm(&o.builder), + &args, + args.len, + .Fast, + .Auto, + "", + ), &self.wip); } fn cmp( self: *FuncGen, - lhs: *llvm.Value, - rhs: *llvm.Value, + lhs: Builder.Value, + rhs: Builder.Value, operand_ty: Type, op: math.CompareOperator, - ) Allocator.Error!*llvm.Value { + ) Allocator.Error!Builder.Value { const o = self.dg.object; const mod = o.module; const scalar_ty = operand_ty.scalarType(mod); @@ -5472,50 +5394,48 @@ pub const FuncGen = struct { // We need to emit instructions to check for equality/inequality // of optionals that are not pointers. const is_by_ref = isByRef(scalar_ty, mod); - const opt_llvm_ty = (try o.lowerType(scalar_ty)).toLlvm(&o.builder); - const lhs_non_null = try self.optIsNonNull(opt_llvm_ty, lhs, is_by_ref); - const rhs_non_null = try self.optIsNonNull(opt_llvm_ty, rhs, is_by_ref); + const opt_llvm_ty = try o.lowerType(scalar_ty); + const lhs_non_null = try self.optCmpNull(.ne, opt_llvm_ty, lhs, is_by_ref); + const rhs_non_null = try self.optCmpNull(.ne, opt_llvm_ty, rhs, is_by_ref); const llvm_i2 = try o.builder.intType(2); - const lhs_non_null_i2 = self.builder.buildZExt(lhs_non_null, llvm_i2.toLlvm(&o.builder), ""); - const rhs_non_null_i2 = self.builder.buildZExt(rhs_non_null, llvm_i2.toLlvm(&o.builder), ""); - const lhs_shifted = self.builder.buildShl(lhs_non_null_i2, (try o.builder.intConst(llvm_i2, 1)).toLlvm(&o.builder), ""); - const lhs_rhs_ored = self.builder.buildOr(lhs_shifted, rhs_non_null_i2, ""); - const both_null_block = try self.wip.block("BothNull"); - const mixed_block = try self.wip.block("Mixed"); - const both_pl_block = try self.wip.block("BothNonNull"); - const end_block = try self.wip.block("End"); - const llvm_switch = self.builder.buildSwitch(lhs_rhs_ored, mixed_block.toLlvm(&self.wip), 2); - const llvm_i2_00 = try o.builder.intConst(llvm_i2, 0b00); - const llvm_i2_11 = try o.builder.intConst(llvm_i2, 0b11); - llvm_switch.addCase(llvm_i2_00.toLlvm(&o.builder), both_null_block.toLlvm(&self.wip)); - llvm_switch.addCase(llvm_i2_11.toLlvm(&o.builder), both_pl_block.toLlvm(&self.wip)); + const lhs_non_null_i2 = try self.wip.cast(.zext, lhs_non_null, llvm_i2, ""); + const rhs_non_null_i2 = try self.wip.cast(.zext, rhs_non_null, llvm_i2, ""); + const lhs_shifted = try self.wip.bin(.shl, lhs_non_null_i2, try o.builder.intValue(llvm_i2, 1), ""); + const lhs_rhs_ored = try self.wip.bin(.@"or", lhs_shifted, rhs_non_null_i2, ""); + const both_null_block = try self.wip.block(1, "BothNull"); + const mixed_block = try self.wip.block(1, "Mixed"); + const both_pl_block = try self.wip.block(1, "BothNonNull"); + const end_block = try self.wip.block(3, "End"); + var wip_switch = try self.wip.@"switch"(lhs_rhs_ored, mixed_block, 2); + defer wip_switch.finish(&self.wip); + try wip_switch.addCase( + try o.builder.intConst(llvm_i2, 0b00), + both_null_block, + &self.wip, + ); + try wip_switch.addCase( + try o.builder.intConst(llvm_i2, 0b11), + both_pl_block, + &self.wip, + ); self.wip.cursor = .{ .block = both_null_block }; - self.builder.positionBuilderAtEnd(both_null_block.toLlvm(&self.wip)); - _ = self.builder.buildBr(end_block.toLlvm(&self.wip)); + _ = try self.wip.br(end_block); self.wip.cursor = .{ .block = mixed_block }; - self.builder.positionBuilderAtEnd(mixed_block.toLlvm(&self.wip)); - _ = self.builder.buildBr(end_block.toLlvm(&self.wip)); + _ = try self.wip.br(end_block); self.wip.cursor = .{ .block = both_pl_block }; - self.builder.positionBuilderAtEnd(both_pl_block.toLlvm(&self.wip)); const lhs_payload = try self.optPayloadHandle(opt_llvm_ty, lhs, scalar_ty, true); const rhs_payload = try self.optPayloadHandle(opt_llvm_ty, rhs, scalar_ty, true); const payload_cmp = try self.cmp(lhs_payload, rhs_payload, payload_ty, op); - _ = self.builder.buildBr(end_block.toLlvm(&self.wip)); - const both_pl_block_end = self.builder.getInsertBlock(); + _ = try self.wip.br(end_block); + const both_pl_block_end = self.wip.cursor.block; self.wip.cursor = .{ .block = end_block }; - self.builder.positionBuilderAtEnd(end_block.toLlvm(&self.wip)); - const incoming_blocks: [3]*llvm.BasicBlock = .{ - both_null_block.toLlvm(&self.wip), - mixed_block.toLlvm(&self.wip), - both_pl_block_end, - }; - const llvm_i1_0 = Builder.Constant.false.toLlvm(&o.builder); - const llvm_i1_1 = Builder.Constant.true.toLlvm(&o.builder); - const incoming_values: [3]*llvm.Value = .{ + const llvm_i1_0 = try o.builder.intValue(.i1, 0); + const llvm_i1_1 = try o.builder.intValue(.i1, 1); + const incoming_values: [3]Builder.Value = .{ switch (op) { .eq => llvm_i1_1, .neq => llvm_i1_0, @@ -5529,31 +5449,30 @@ pub const FuncGen = struct { payload_cmp, }; - const phi_node = self.builder.buildPhi(Builder.Type.i1.toLlvm(&o.builder), ""); - comptime assert(incoming_values.len == incoming_blocks.len); - phi_node.addIncoming( + const phi = try self.wip.phi(.i1, ""); + try phi.finish( &incoming_values, - &incoming_blocks, - incoming_values.len, + &.{ both_null_block, mixed_block, both_pl_block_end }, + &self.wip, ); - return phi_node; + return phi.toValue(); }, .Float => return self.buildFloatCmp(op, operand_ty, .{ lhs, rhs }), else => unreachable, }; const is_signed = int_ty.isSignedInt(mod); - const operation: llvm.IntPredicate = switch (op) { - .eq => .EQ, - .neq => .NE, - .lt => if (is_signed) llvm.IntPredicate.SLT else .ULT, - .lte => if (is_signed) llvm.IntPredicate.SLE else .ULE, - .gt => if (is_signed) llvm.IntPredicate.SGT else .UGT, - .gte => if (is_signed) llvm.IntPredicate.SGE else .UGE, + const cond: Builder.IntegerCondition = switch (op) { + .eq => .eq, + .neq => .ne, + .lt => if (is_signed) .slt else .ult, + .lte => if (is_signed) .sle else .ule, + .gt => if (is_signed) .sgt else .ugt, + .gte => if (is_signed) .sge else .uge, }; - return self.builder.buildICmp(operation, lhs, rhs, ""); + return self.wip.icmp(cond, lhs, rhs, ""); } - fn airBlock(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airBlock(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -5563,13 +5482,15 @@ pub const FuncGen = struct { if (inst_ty.isNoReturn(mod)) { try self.genBody(body); - return null; + return .none; } - var breaks: BreakList = .{}; - defer breaks.deinit(self.gpa); + const have_block_result = inst_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod); - const parent_bb = try self.wip.block("Block"); + var breaks: BreakList = if (have_block_result) .{ .list = .{} } else .{ .len = 0 }; + defer if (have_block_result) breaks.list.deinit(self.gpa); + + const parent_bb = try self.wip.block(0, "Block"); try self.blocks.putNoClobber(self.gpa, inst, .{ .parent_bb = parent_bb, .breaks = &breaks, @@ -5579,35 +5500,32 @@ pub const FuncGen = struct { try self.genBody(body); self.wip.cursor = .{ .block = parent_bb }; - self.builder.positionBuilderAtEnd(parent_bb.toLlvm(&self.wip)); // Create a phi node only if the block returns a value. - const is_body = inst_ty.zigTypeTag(mod) == .Fn; - if (!is_body and !inst_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; - - const raw_llvm_ty = (try o.lowerType(inst_ty)).toLlvm(&o.builder); - - const llvm_ty = ty: { - // If the zig tag type is a function, this represents an actual function body; not - // a pointer to it. LLVM IR allows the call instruction to use function bodies instead - // of function pointers, however the phi makes it a runtime value and therefore - // the LLVM type has to be wrapped in a pointer. - if (is_body or isByRef(inst_ty, mod)) { - break :ty Builder.Type.ptr.toLlvm(&o.builder); - } - break :ty raw_llvm_ty; - }; + if (have_block_result) { + const raw_llvm_ty = try o.lowerType(inst_ty); + const llvm_ty: Builder.Type = ty: { + // If the zig tag type is a function, this represents an actual function body; not + // a pointer to it. LLVM IR allows the call instruction to use function bodies instead + // of function pointers, however the phi makes it a runtime value and therefore + // the LLVM type has to be wrapped in a pointer. + if (inst_ty.zigTypeTag(mod) == .Fn or isByRef(inst_ty, mod)) { + break :ty .ptr; + } + break :ty raw_llvm_ty; + }; - const phi_node = self.builder.buildPhi(llvm_ty, ""); - phi_node.addIncoming( - breaks.items(.val).ptr, - breaks.items(.bb).ptr, - @intCast(breaks.len), - ); - return phi_node; + parent_bb.ptr(&self.wip).incoming = @intCast(breaks.list.len); + const phi = try self.wip.phi(llvm_ty, ""); + try phi.finish(breaks.list.items(.val), breaks.list.items(.bb), &self.wip); + return phi.toValue(); + } else { + parent_bb.ptr(&self.wip).incoming = @intCast(breaks.len); + return .none; + } } - fn airBr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const branch = self.air.instructions.items(.data)[inst].br; const block = self.blocks.get(branch.block_inst).?; @@ -5615,44 +5533,39 @@ pub const FuncGen = struct { // Add the values to the lists only if the break provides a value. const operand_ty = self.typeOf(branch.operand); const mod = o.module; - if (operand_ty.hasRuntimeBitsIgnoreComptime(mod) or operand_ty.zigTypeTag(mod) == .Fn) { + if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { const val = try self.resolveInst(branch.operand); // For the phi node, we need the basic blocks and the values of the // break instructions. - try block.breaks.append(self.gpa, .{ - .bb = self.builder.getInsertBlock(), - .val = val, - }); - } - _ = self.builder.buildBr(block.parent_bb.toLlvm(&self.wip)); - return null; + try block.breaks.list.append(self.gpa, .{ .bb = self.wip.cursor.block, .val = val }); + } else block.breaks.len += 1; + _ = try self.wip.br(block.parent_bb); + return .none; } - fn airCondBr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airCondBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); const extra = self.air.extraData(Air.CondBr, pl_op.payload); const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - const then_block = try self.wip.block("Then"); - const else_block = try self.wip.block("Else"); - _ = self.builder.buildCondBr(cond, then_block.toLlvm(&self.wip), else_block.toLlvm(&self.wip)); + const then_block = try self.wip.block(1, "Then"); + const else_block = try self.wip.block(1, "Else"); + _ = try self.wip.brCond(cond, then_block, else_block); self.wip.cursor = .{ .block = then_block }; - self.builder.positionBuilderAtEnd(then_block.toLlvm(&self.wip)); try self.genBody(then_body); self.wip.cursor = .{ .block = else_block }; - self.builder.positionBuilderAtEnd(else_block.toLlvm(&self.wip)); try self.genBody(else_body); // No need to reset the insert cursor since this instruction is noreturn. - return null; + return .none; } - fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const inst = body_tail[0]; @@ -5667,7 +5580,7 @@ pub const FuncGen = struct { return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused); } - fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -5681,139 +5594,149 @@ pub const FuncGen = struct { fn lowerTry( fg: *FuncGen, - err_union: *llvm.Value, + err_union: Builder.Value, body: []const Air.Inst.Index, err_union_ty: Type, operand_is_ptr: bool, can_elide_load: bool, is_unused: bool, - ) !?*llvm.Value { + ) !Builder.Value { const o = fg.dg.object; const mod = o.module; const payload_ty = err_union_ty.errorUnionPayload(mod); const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod); - const err_union_llvm_ty = (try o.lowerType(err_union_ty)).toLlvm(&o.builder); + const err_union_llvm_ty = try o.lowerType(err_union_ty); if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { - const is_err = err: { - const err_set_ty = Builder.Type.err_int.toLlvm(&o.builder); - const zero = (try o.builder.intConst(Builder.Type.err_int, 0)).toLlvm(&o.builder); + const loaded = loaded: { if (!payload_has_bits) { // TODO add alignment to this load - const loaded = if (operand_is_ptr) - fg.builder.buildLoad(err_set_ty, err_union, "") + break :loaded if (operand_is_ptr) + try fg.wip.load(.normal, Builder.Type.err_int, err_union, .default, "") else err_union; - break :err fg.builder.buildICmp(.NE, loaded, zero, ""); } const err_field_index = errUnionErrorOffset(payload_ty, mod); if (operand_is_ptr or isByRef(err_union_ty, mod)) { - const err_field_ptr = fg.builder.buildStructGEP(err_union_llvm_ty, err_union, err_field_index, ""); + const err_field_ptr = + try fg.wip.gepStruct(err_union_llvm_ty, err_union, err_field_index, ""); // TODO add alignment to this load - const loaded = fg.builder.buildLoad(err_set_ty, err_field_ptr, ""); - break :err fg.builder.buildICmp(.NE, loaded, zero, ""); + break :loaded try fg.wip.load( + .normal, + Builder.Type.err_int, + err_field_ptr, + .default, + "", + ); } - const loaded = fg.builder.buildExtractValue(err_union, err_field_index, ""); - break :err fg.builder.buildICmp(.NE, loaded, zero, ""); + break :loaded try fg.wip.extractValue(err_union, &.{err_field_index}, ""); }; + const zero = try o.builder.intValue(Builder.Type.err_int, 0); + const is_err = try fg.wip.icmp(.ne, loaded, zero, ""); - const return_block = try fg.wip.block("TryRet"); - const continue_block = try fg.wip.block("TryCont"); - _ = fg.builder.buildCondBr(is_err, return_block.toLlvm(&fg.wip), continue_block.toLlvm(&fg.wip)); + const return_block = try fg.wip.block(1, "TryRet"); + const continue_block = try fg.wip.block(1, "TryCont"); + _ = try fg.wip.brCond(is_err, return_block, continue_block); fg.wip.cursor = .{ .block = return_block }; - fg.builder.positionBuilderAtEnd(return_block.toLlvm(&fg.wip)); try fg.genBody(body); fg.wip.cursor = .{ .block = continue_block }; - fg.builder.positionBuilderAtEnd(continue_block.toLlvm(&fg.wip)); - } - if (is_unused) { - return null; - } - if (!payload_has_bits) { - return if (operand_is_ptr) err_union else null; } + if (is_unused) return .none; + if (!payload_has_bits) return if (operand_is_ptr) err_union else .none; const offset = errUnionPayloadOffset(payload_ty, mod); if (operand_is_ptr) { - return fg.builder.buildStructGEP(err_union_llvm_ty, err_union, offset, ""); + return fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, ""); } else if (isByRef(err_union_ty, mod)) { - const payload_ptr = fg.builder.buildStructGEP(err_union_llvm_ty, err_union, offset, ""); + const payload_ptr = try fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, ""); + const payload_alignment = Builder.Alignment.fromByteUnits(payload_ty.abiAlignment(mod)); if (isByRef(payload_ty, mod)) { if (can_elide_load) return payload_ptr; - return fg.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(mod), false); + return fg.loadByRef(payload_ptr, payload_ty, payload_alignment, false); } - const load_inst = fg.builder.buildLoad(err_union_llvm_ty.structGetTypeAtIndex(offset), payload_ptr, ""); - load_inst.setAlignment(payload_ty.abiAlignment(mod)); - return load_inst; + const load_ty = err_union_llvm_ty.structFields(&o.builder)[offset]; + return fg.wip.load(.normal, load_ty, payload_ptr, payload_alignment, ""); } - return fg.builder.buildExtractValue(err_union, offset, ""); + return fg.wip.extractValue(err_union, &.{offset}, ""); } - fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); - const else_block = try self.wip.block("Else"); - const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); - const cond_int = if (cond.typeOf().getTypeKind() == .Pointer) - self.builder.buildPtrToInt(cond, llvm_usize, "") + const else_block = try self.wip.block(1, "Default"); + const llvm_usize = try o.lowerType(Type.usize); + const cond_int = if (cond.typeOfWip(&self.wip).isPointer(&o.builder)) + try self.wip.cast(.ptrtoint, cond, llvm_usize, "") else cond; - const llvm_switch = self.builder.buildSwitch(cond_int, else_block.toLlvm(&self.wip), switch_br.data.cases_len); var extra_index: usize = switch_br.end; var case_i: u32 = 0; + var llvm_cases_len: u32 = 0; + while (case_i < switch_br.data.cases_len) : (case_i += 1) { + const case = self.air.extraData(Air.SwitchBr.Case, extra_index); + const items: []const Air.Inst.Ref = + @ptrCast(self.air.extra[case.end..][0..case.data.items_len]); + const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; + extra_index = case.end + case.data.items_len + case_body.len; + llvm_cases_len += @intCast(items.len); + } + + var wip_switch = try self.wip.@"switch"(cond_int, else_block, llvm_cases_len); + defer wip_switch.finish(&self.wip); + + extra_index = switch_br.end; + case_i = 0; while (case_i < switch_br.data.cases_len) : (case_i += 1) { const case = self.air.extraData(Air.SwitchBr.Case, extra_index); - const items: []const Air.Inst.Ref = @ptrCast(self.air.extra[case.end..][0..case.data.items_len]); + const items: []const Air.Inst.Ref = + @ptrCast(self.air.extra[case.end..][0..case.data.items_len]); const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + case.data.items_len + case_body.len; - const case_block = try self.wip.block("Case"); + const case_block = try self.wip.block(@intCast(items.len), "Case"); for (items) |item| { - const llvm_item = try self.resolveInst(item); - const llvm_int_item = if (llvm_item.typeOf().getTypeKind() == .Pointer) - llvm_item.constPtrToInt(llvm_usize) + const llvm_item = (try self.resolveInst(item)).toConst().?; + const llvm_int_item = if (llvm_item.typeOf(&o.builder).isPointer(&o.builder)) + try o.builder.castConst(.ptrtoint, llvm_item, llvm_usize) else llvm_item; - llvm_switch.addCase(llvm_int_item, case_block.toLlvm(&self.wip)); + try wip_switch.addCase(llvm_int_item, case_block, &self.wip); } self.wip.cursor = .{ .block = case_block }; - self.builder.positionBuilderAtEnd(case_block.toLlvm(&self.wip)); try self.genBody(case_body); } self.wip.cursor = .{ .block = else_block }; - self.builder.positionBuilderAtEnd(else_block.toLlvm(&self.wip)); const else_body = self.air.extra[extra_index..][0..switch_br.data.else_body_len]; if (else_body.len != 0) { try self.genBody(else_body); } else { - _ = self.builder.buildUnreachable(); + _ = try self.wip.@"unreachable"(); } // No need to reset the insert cursor since this instruction is noreturn. - return null; + return .none; } - fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; - const loop_block = try self.wip.block("Loop"); - _ = self.builder.buildBr(loop_block.toLlvm(&self.wip)); + const loop_block = try self.wip.block(2, "Loop"); + _ = try self.wip.br(loop_block); self.wip.cursor = .{ .block = loop_block }; - self.builder.positionBuilderAtEnd(loop_block.toLlvm(&self.wip)); try self.genBody(body); // TODO instead of this logic, change AIR to have the property that @@ -5823,35 +5746,30 @@ pub const FuncGen = struct { // be while(true) instead of for(body), which will eliminate 1 branch on // a hot path. if (body.len == 0 or !self.typeOfIndex(body[body.len - 1]).isNoReturn(mod)) { - _ = self.builder.buildBr(loop_block.toLlvm(&self.wip)); + _ = try self.wip.br(loop_block); } - return null; + return .none; } - fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.typeOf(ty_op.operand); const array_ty = operand_ty.childType(mod); const llvm_usize = try o.lowerType(Type.usize); - const len = (try o.builder.intConst(llvm_usize, array_ty.arrayLen(mod))).toLlvm(&o.builder); - const slice_llvm_ty = (try o.lowerType(self.typeOfIndex(inst))).toLlvm(&o.builder); + const len = try o.builder.intValue(llvm_usize, array_ty.arrayLen(mod)); + const slice_llvm_ty = try o.lowerType(self.typeOfIndex(inst)); const operand = try self.resolveInst(ty_op.operand); - if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) { - const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), operand, 0, ""); - return self.builder.buildInsertValue(partial, len, 1, ""); - } - const indices: [2]*llvm.Value = .{ - (try o.builder.intConst(llvm_usize, 0)).toLlvm(&o.builder), - } ** 2; - const array_llvm_ty = (try o.lowerType(array_ty)).toLlvm(&o.builder); - const ptr = self.builder.buildInBoundsGEP(array_llvm_ty, operand, &indices, indices.len, ""); - const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), ptr, 0, ""); - return self.builder.buildInsertValue(partial, len, 1, ""); + if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) + return self.wip.buildAggregate(slice_llvm_ty, &.{ operand, len }, ""); + const ptr = try self.wip.gep(.inbounds, try o.lowerType(array_ty), operand, &.{ + try o.builder.intValue(llvm_usize, 0), try o.builder.intValue(llvm_usize, 0), + }, ""); + return self.wip.buildAggregate(slice_llvm_ty, &.{ ptr, len }, ""); } - fn airFloatFromInt(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airFloatFromInt(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; @@ -5865,23 +5783,21 @@ pub const FuncGen = struct { const dest_llvm_ty = try o.lowerType(dest_ty); const target = mod.getTarget(); - if (intrinsicsAllowed(dest_scalar_ty, target)) { - if (operand_scalar_ty.isSignedInt(mod)) { - return self.builder.buildSIToFP(operand, dest_llvm_ty.toLlvm(&o.builder), ""); - } else { - return self.builder.buildUIToFP(operand, dest_llvm_ty.toLlvm(&o.builder), ""); - } - } + if (intrinsicsAllowed(dest_scalar_ty, target)) return self.wip.conv( + if (operand_scalar_ty.isSignedInt(mod)) .signed else .unsigned, + operand, + dest_llvm_ty, + "", + ); const rt_int_bits = compilerRtIntBits(@intCast(operand_scalar_ty.bitSize(mod))); const rt_int_ty = try o.builder.intType(rt_int_bits); - var extended = e: { - if (operand_scalar_ty.isSignedInt(mod)) { - break :e self.builder.buildSExtOrBitCast(operand, rt_int_ty.toLlvm(&o.builder), ""); - } else { - break :e self.builder.buildZExtOrBitCast(operand, rt_int_ty.toLlvm(&o.builder), ""); - } - }; + var extended = try self.wip.conv( + if (operand_scalar_ty.isSignedInt(mod)) .signed else .unsigned, + operand, + rt_int_ty, + "", + ); const dest_bits = dest_scalar_ty.floatBits(target); const compiler_rt_operand_abbrev = compilerRtIntAbbrev(rt_int_bits); const compiler_rt_dest_abbrev = compilerRtFloatAbbrev(dest_bits); @@ -5897,16 +5813,23 @@ pub const FuncGen = struct { // On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard // i128 calling convention to adhere to the ABI that LLVM expects compiler-rt to have. param_type = try o.builder.vectorType(.normal, 2, .i64); - extended = self.builder.buildBitCast(extended, param_type.toLlvm(&o.builder), ""); + extended = try self.wip.cast(.bitcast, extended, param_type, ""); } const libc_fn = try self.getLibcFunction(fn_name, &.{param_type}, dest_llvm_ty); - const params = [1]*llvm.Value{extended}; - - return self.builder.buildCall(libc_fn.globalGetValueType(), libc_fn, ¶ms, params.len, .C, .Auto, ""); + const params = [1]*llvm.Value{extended.toLlvm(&self.wip)}; + return (try self.wip.unimplemented(dest_llvm_ty, "")).finish(self.builder.buildCall( + libc_fn.typeOf(&o.builder).toLlvm(&o.builder), + libc_fn.toLlvm(&o.builder), + ¶ms, + params.len, + .C, + .Auto, + "", + ), &self.wip); } - fn airIntFromFloat(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airIntFromFloat(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const o = self.dg.object; @@ -5924,11 +5847,12 @@ pub const FuncGen = struct { if (intrinsicsAllowed(operand_scalar_ty, target)) { // TODO set fast math flag - if (dest_scalar_ty.isSignedInt(mod)) { - return self.builder.buildFPToSI(operand, dest_llvm_ty.toLlvm(&o.builder), ""); - } else { - return self.builder.buildFPToUI(operand, dest_llvm_ty.toLlvm(&o.builder), ""); - } + return self.wip.conv( + if (dest_scalar_ty.isSignedInt(mod)) .signed else .unsigned, + operand, + dest_llvm_ty, + "", + ); } const rt_int_bits = compilerRtIntBits(@intCast(dest_scalar_ty.bitSize(mod))); @@ -5953,66 +5877,69 @@ pub const FuncGen = struct { const operand_llvm_ty = try o.lowerType(operand_ty); const libc_fn = try self.getLibcFunction(fn_name, &.{operand_llvm_ty}, libc_ret_ty); - const params = [1]*llvm.Value{operand}; - - var result = self.builder.buildCall(libc_fn.globalGetValueType(), libc_fn, ¶ms, params.len, .C, .Auto, ""); + const params = [1]*llvm.Value{operand.toLlvm(&self.wip)}; + var result = (try self.wip.unimplemented(libc_ret_ty, "")).finish(self.builder.buildCall( + libc_fn.typeOf(&o.builder).toLlvm(&o.builder), + libc_fn.toLlvm(&o.builder), + ¶ms, + params.len, + .C, + .Auto, + "", + ), &self.wip); - if (libc_ret_ty != ret_ty) result = self.builder.buildBitCast(result, ret_ty.toLlvm(&o.builder), ""); - if (ret_ty != dest_llvm_ty) result = self.builder.buildTrunc(result, dest_llvm_ty.toLlvm(&o.builder), ""); + if (libc_ret_ty != ret_ty) result = try self.wip.cast(.bitcast, result, ret_ty, ""); + if (ret_ty != dest_llvm_ty) result = try self.wip.cast(.trunc, result, dest_llvm_ty, ""); return result; } - fn sliceOrArrayPtr(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value { + fn sliceOrArrayPtr(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value { const o = fg.dg.object; const mod = o.module; - if (ty.isSlice(mod)) { - return fg.builder.buildExtractValue(ptr, 0, ""); - } else { - return ptr; - } + return if (ty.isSlice(mod)) fg.wip.extractValue(ptr, &.{0}, "") else ptr; } - fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: *llvm.Value, ty: Type) Allocator.Error!*llvm.Value { + fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value { const o = fg.dg.object; const mod = o.module; const llvm_usize = try o.lowerType(Type.usize); switch (ty.ptrSize(mod)) { .Slice => { - const len = fg.builder.buildExtractValue(ptr, 1, ""); + const len = try fg.wip.extractValue(ptr, &.{1}, ""); const elem_ty = ty.childType(mod); const abi_size = elem_ty.abiSize(mod); if (abi_size == 1) return len; - const abi_size_llvm_val = try o.builder.intConst(llvm_usize, abi_size); - return fg.builder.buildMul(len, abi_size_llvm_val.toLlvm(&o.builder), ""); + const abi_size_llvm_val = try o.builder.intValue(llvm_usize, abi_size); + return fg.wip.bin(.@"mul nuw", len, abi_size_llvm_val, ""); }, .One => { const array_ty = ty.childType(mod); const elem_ty = array_ty.childType(mod); const abi_size = elem_ty.abiSize(mod); - return (try o.builder.intConst(llvm_usize, array_ty.arrayLen(mod) * abi_size)).toLlvm(&o.builder); + return o.builder.intValue(llvm_usize, array_ty.arrayLen(mod) * abi_size); }, .Many, .C => unreachable, } } - fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*llvm.Value { + fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: u32) !Builder.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - return self.builder.buildExtractValue(operand, index, ""); + return self.wip.extractValue(operand, &.{index}, ""); } - fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*llvm.Value { + fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const slice_ptr = try self.resolveInst(ty_op.operand); const slice_ptr_ty = self.typeOf(ty_op.operand); - const slice_llvm_ty = (try o.lowerPtrElemTy(slice_ptr_ty.childType(mod))).toLlvm(&o.builder); + const slice_llvm_ty = try o.lowerPtrElemTy(slice_ptr_ty.childType(mod)); - return self.builder.buildStructGEP(slice_llvm_ty, slice_ptr, index, ""); + return self.wip.gepStruct(slice_llvm_ty, slice_ptr, index, ""); } - fn airSliceElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + fn airSliceElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const inst = body_tail[0]; @@ -6021,21 +5948,21 @@ pub const FuncGen = struct { const slice = try self.resolveInst(bin_op.lhs); const index = try self.resolveInst(bin_op.rhs); const elem_ty = slice_ty.childType(mod); - const llvm_elem_ty = (try o.lowerPtrElemTy(elem_ty)).toLlvm(&o.builder); - const base_ptr = self.builder.buildExtractValue(slice, 0, ""); - const indices: [1]*llvm.Value = .{index}; - const ptr = self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); + const llvm_elem_ty = try o.lowerPtrElemTy(elem_ty); + const base_ptr = try self.wip.extractValue(slice, &.{0}, ""); + const ptr = try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, &.{index}, ""); if (isByRef(elem_ty, mod)) { if (self.canElideLoad(body_tail)) return ptr; - return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(mod), false); + const elem_alignment = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod)); + return self.loadByRef(ptr, elem_ty, elem_alignment, false); } return self.load(ptr, slice_ty); } - fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -6044,13 +5971,12 @@ pub const FuncGen = struct { const slice = try self.resolveInst(bin_op.lhs); const index = try self.resolveInst(bin_op.rhs); - const llvm_elem_ty = (try o.lowerPtrElemTy(slice_ty.childType(mod))).toLlvm(&o.builder); - const base_ptr = self.builder.buildExtractValue(slice, 0, ""); - const indices: [1]*llvm.Value = .{index}; - return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); + const llvm_elem_ty = try o.lowerPtrElemTy(slice_ty.childType(mod)); + const base_ptr = try self.wip.extractValue(slice, &.{0}, ""); + return self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, &.{index}, ""); } - fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const inst = body_tail[0]; @@ -6059,21 +5985,20 @@ pub const FuncGen = struct { const array_ty = self.typeOf(bin_op.lhs); const array_llvm_val = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const array_llvm_ty = (try o.lowerType(array_ty)).toLlvm(&o.builder); + const array_llvm_ty = try o.lowerType(array_ty); const elem_ty = array_ty.childType(mod); if (isByRef(array_ty, mod)) { - const indices: [2]*llvm.Value = .{ - (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), - rhs, + const indices: [2]Builder.Value = .{ + try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs, }; if (isByRef(elem_ty, mod)) { - const elem_ptr = self.builder.buildInBoundsGEP(array_llvm_ty, array_llvm_val, &indices, indices.len, ""); - if (canElideLoad(self, body_tail)) - return elem_ptr; - - return self.loadByRef(elem_ptr, elem_ty, elem_ty.abiAlignment(mod), false); + const elem_ptr = + try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, ""); + if (canElideLoad(self, body_tail)) return elem_ptr; + const elem_alignment = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod)); + return self.loadByRef(elem_ptr, elem_ty, elem_alignment, false); } else { - const elem_llvm_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); + const elem_llvm_ty = try o.lowerType(elem_ty); if (Air.refToIndex(bin_op.lhs)) |lhs_index| { if (self.air.instructions.items(.tag)[lhs_index] == .load) { const load_data = self.air.instructions.items(.data)[lhs_index]; @@ -6081,66 +6006,70 @@ pub const FuncGen = struct { if (Air.refToIndex(load_ptr)) |load_ptr_index| { const load_ptr_tag = self.air.instructions.items(.tag)[load_ptr_index]; switch (load_ptr_tag) { - .struct_field_ptr, .struct_field_ptr_index_0, .struct_field_ptr_index_1, .struct_field_ptr_index_2, .struct_field_ptr_index_3 => { + .struct_field_ptr, + .struct_field_ptr_index_0, + .struct_field_ptr_index_1, + .struct_field_ptr_index_2, + .struct_field_ptr_index_3, + => { const load_ptr_inst = try self.resolveInst(load_ptr); - const gep = self.builder.buildInBoundsGEP(array_llvm_ty, load_ptr_inst, &indices, indices.len, ""); - return self.builder.buildLoad(elem_llvm_ty, gep, ""); + const gep = try self.wip.gep( + .inbounds, + array_llvm_ty, + load_ptr_inst, + &indices, + "", + ); + return self.wip.load(.normal, elem_llvm_ty, gep, .default, ""); }, else => {}, } } } } - const elem_ptr = self.builder.buildInBoundsGEP(array_llvm_ty, array_llvm_val, &indices, indices.len, ""); - return self.builder.buildLoad(elem_llvm_ty, elem_ptr, ""); + const elem_ptr = + try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, ""); + return self.wip.load(.normal, elem_llvm_ty, elem_ptr, .default, ""); } } // This branch can be reached for vectors, which are always by-value. - return self.builder.buildExtractElement(array_llvm_val, rhs, ""); + return self.wip.extractElement(array_llvm_val, rhs, ""); } - fn airPtrElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + fn airPtrElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(mod); - const llvm_elem_ty = (try o.lowerPtrElemTy(elem_ty)).toLlvm(&o.builder); + const llvm_elem_ty = try o.lowerPtrElemTy(elem_ty); const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); // TODO: when we go fully opaque pointers in LLVM 16 we can remove this branch - const ptr = if (ptr_ty.isSinglePointer(mod)) ptr: { + const ptr = try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, if (ptr_ty.isSinglePointer(mod)) // If this is a single-item pointer to an array, we need another index in the GEP. - const indices: [2]*llvm.Value = .{ - (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), - rhs, - }; - break :ptr self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); - } else ptr: { - const indices: [1]*llvm.Value = .{rhs}; - break :ptr self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); - }; + &.{ try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs } + else + &.{rhs}, ""); if (isByRef(elem_ty, mod)) { - if (self.canElideLoad(body_tail)) - return ptr; - - return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(mod), false); + if (self.canElideLoad(body_tail)) return ptr; + const elem_alignment = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod)); + return self.loadByRef(ptr, elem_ty, elem_alignment, false); } return self.load(ptr, ptr_ty); } - fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(mod); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) - return (try o.lowerPtrToVoid(ptr_ty)).toLlvm(&o.builder); + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return (try o.lowerPtrToVoid(ptr_ty)).toValue(); const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -6148,21 +6077,15 @@ pub const FuncGen = struct { const elem_ptr = self.air.getRefType(ty_pl.ty); if (elem_ptr.ptrInfo(mod).flags.vector_index != .none) return base_ptr; - const llvm_elem_ty = (try o.lowerPtrElemTy(elem_ty)).toLlvm(&o.builder); - if (ptr_ty.isSinglePointer(mod)) { + const llvm_elem_ty = try o.lowerPtrElemTy(elem_ty); + return try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, if (ptr_ty.isSinglePointer(mod)) // If this is a single-item pointer to an array, we need another index in the GEP. - const indices: [2]*llvm.Value = .{ - (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), - rhs, - }; - return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); - } else { - const indices: [1]*llvm.Value = .{rhs}; - return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); - } + &.{ try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs } + else + &.{rhs}, ""); } - fn airStructFieldPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airStructFieldPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; const struct_ptr = try self.resolveInst(struct_field.struct_operand); @@ -6174,14 +6097,14 @@ pub const FuncGen = struct { self: *FuncGen, inst: Air.Inst.Index, field_index: u32, - ) !?*llvm.Value { + ) !Builder.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const struct_ptr = try self.resolveInst(ty_op.operand); const struct_ptr_ty = self.typeOf(ty_op.operand); return self.fieldPtr(inst, struct_ptr, struct_ptr_ty, field_index); } - fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const inst = body_tail[0]; @@ -6191,9 +6114,7 @@ pub const FuncGen = struct { const struct_llvm_val = try self.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index, mod); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return null; - } + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; if (!isByRef(struct_ty, mod)) { assert(!isByRef(field_ty, mod)); @@ -6203,39 +6124,44 @@ pub const FuncGen = struct { const struct_obj = mod.typeToStruct(struct_ty).?; const bit_offset = struct_obj.packedFieldBitOffset(mod, field_index); const containing_int = struct_llvm_val; - const shift_amt = containing_int.typeOf().constInt(bit_offset, .False); - const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); - const elem_llvm_ty = (try o.lowerType(field_ty)).toLlvm(&o.builder); + const shift_amt = + try o.builder.intValue(containing_int.typeOfWip(&self.wip), bit_offset); + const shifted_value = try self.wip.bin(.lshr, containing_int, shift_amt, ""); + const elem_llvm_ty = try o.lowerType(field_ty); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { - const same_size_int = (try o.builder.intType(@intCast(field_ty.bitSize(mod)))).toLlvm(&o.builder); - const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); - return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); + const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod))); + const truncated_int = + try self.wip.cast(.trunc, shifted_value, same_size_int, ""); + return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, ""); } else if (field_ty.isPtrAtRuntime(mod)) { - const same_size_int = (try o.builder.intType(@intCast(field_ty.bitSize(mod)))).toLlvm(&o.builder); - const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); - return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); + const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod))); + const truncated_int = + try self.wip.cast(.trunc, shifted_value, same_size_int, ""); + return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, ""); } - return self.builder.buildTrunc(shifted_value, elem_llvm_ty, ""); + return self.wip.cast(.trunc, shifted_value, elem_llvm_ty, ""); }, else => { const llvm_field_index = llvmField(struct_ty, field_index, mod).?.index; - return self.builder.buildExtractValue(struct_llvm_val, llvm_field_index, ""); + return self.wip.extractValue(struct_llvm_val, &.{llvm_field_index}, ""); }, }, .Union => { assert(struct_ty.containerLayout(mod) == .Packed); const containing_int = struct_llvm_val; - const elem_llvm_ty = (try o.lowerType(field_ty)).toLlvm(&o.builder); + const elem_llvm_ty = try o.lowerType(field_ty); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { - const same_size_int = (try o.builder.intType(@intCast(field_ty.bitSize(mod)))).toLlvm(&o.builder); - const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, ""); - return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); + const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod))); + const truncated_int = + try self.wip.cast(.trunc, containing_int, same_size_int, ""); + return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, ""); } else if (field_ty.isPtrAtRuntime(mod)) { - const same_size_int = (try o.builder.intType(@intCast(field_ty.bitSize(mod)))).toLlvm(&o.builder); - const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, ""); - return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); + const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod))); + const truncated_int = + try self.wip.cast(.trunc, containing_int, same_size_int, ""); + return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, ""); } - return self.builder.buildTrunc(containing_int, elem_llvm_ty, ""); + return self.wip.cast(.trunc, containing_int, elem_llvm_ty, ""); }, else => unreachable, } @@ -6245,8 +6171,9 @@ pub const FuncGen = struct { .Struct => { assert(struct_ty.containerLayout(mod) != .Packed); const llvm_field = llvmField(struct_ty, field_index, mod).?; - const struct_llvm_ty = (try o.lowerType(struct_ty)).toLlvm(&o.builder); - const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field.index, ""); + const struct_llvm_ty = try o.lowerType(struct_ty); + const field_ptr = + try self.wip.gepStruct(struct_llvm_ty, struct_llvm_val, llvm_field.index, ""); const field_ptr_ty = try mod.ptrType(.{ .child = llvm_field.ty.toIntern(), .flags = .{ @@ -6258,31 +6185,32 @@ pub const FuncGen = struct { return field_ptr; assert(llvm_field.alignment != 0); - return self.loadByRef(field_ptr, field_ty, llvm_field.alignment, false); + const field_alignment = Builder.Alignment.fromByteUnits(llvm_field.alignment); + return self.loadByRef(field_ptr, field_ty, field_alignment, false); } else { return self.load(field_ptr, field_ptr_ty); } }, .Union => { - const union_llvm_ty = (try o.lowerType(struct_ty)).toLlvm(&o.builder); + const union_llvm_ty = try o.lowerType(struct_ty); const layout = struct_ty.unionGetLayout(mod); const payload_index = @intFromBool(layout.tag_align >= layout.payload_align); - const field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_llvm_val, payload_index, ""); - const llvm_field_ty = (try o.lowerType(field_ty)).toLlvm(&o.builder); + const field_ptr = + try self.wip.gepStruct(union_llvm_ty, struct_llvm_val, payload_index, ""); + const llvm_field_ty = try o.lowerType(field_ty); + const payload_alignment = Builder.Alignment.fromByteUnits(layout.payload_align); if (isByRef(field_ty, mod)) { - if (canElideLoad(self, body_tail)) - return field_ptr; - - return self.loadByRef(field_ptr, field_ty, layout.payload_align, false); + if (canElideLoad(self, body_tail)) return field_ptr; + return self.loadByRef(field_ptr, field_ty, payload_alignment, false); } else { - return self.builder.buildLoad(llvm_field_ty, field_ptr, ""); + return self.wip.load(.normal, llvm_field_ty, field_ptr, payload_alignment, ""); } }, else => unreachable, } } - fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -6292,33 +6220,36 @@ pub const FuncGen = struct { const parent_ty = self.air.getRefType(ty_pl.ty).childType(mod); const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); + if (field_offset == 0) return field_ptr; - const res_ty = (try o.lowerType(self.air.getRefType(ty_pl.ty))).toLlvm(&o.builder); - if (field_offset == 0) { - return field_ptr; - } + const res_ty = try o.lowerType(self.air.getRefType(ty_pl.ty)); const llvm_usize = try o.lowerType(Type.usize); - const field_ptr_int = self.builder.buildPtrToInt(field_ptr, llvm_usize.toLlvm(&o.builder), ""); - const base_ptr_int = self.builder.buildNUWSub(field_ptr_int, (try o.builder.intConst(llvm_usize, field_offset)).toLlvm(&o.builder), ""); - return self.builder.buildIntToPtr(base_ptr_int, res_ty, ""); + const field_ptr_int = try self.wip.cast(.ptrtoint, field_ptr, llvm_usize, ""); + const base_ptr_int = try self.wip.bin( + .@"sub nuw", + field_ptr_int, + try o.builder.intValue(llvm_usize, field_offset), + "", + ); + return self.wip.cast(.inttoptr, base_ptr_int, res_ty, ""); } - fn airNot(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airNot(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - return self.builder.buildNot(operand, ""); + return self.wip.not(operand, ""); } - fn airUnreach(self: *FuncGen, inst: Air.Inst.Index) ?*llvm.Value { + fn airUnreach(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; - _ = self.builder.buildUnreachable(); - return null; + _ = try self.wip.@"unreachable"(); + return .none; } - fn airDbgStmt(self: *FuncGen, inst: Air.Inst.Index) ?*llvm.Value { - const di_scope = self.di_scope orelse return null; + fn airDbgStmt(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { + const di_scope = self.di_scope orelse return .none; const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; self.prev_dbg_line = @intCast(self.base_line + dbg_stmt.line + 1); self.prev_dbg_column = @intCast(dbg_stmt.column + 1); @@ -6327,12 +6258,12 @@ pub const FuncGen = struct { else null; self.builder.setCurrentDebugLocation(self.prev_dbg_line, self.prev_dbg_column, di_scope, inlined_at); - return null; + return .none; } - fn airDbgInlineBegin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airDbgInlineBegin(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const dib = o.di_builder orelse return null; + const dib = o.di_builder orelse return .none; const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = o.module; @@ -6385,12 +6316,12 @@ pub const FuncGen = struct { const lexical_block = dib.createLexicalBlock(subprogram.toScope(), di_file, line_number, 1); self.di_scope = lexical_block.toScope(); self.base_line = decl.src_line; - return null; + return .none; } - fn airDbgInlineEnd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airDbgInlineEnd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - if (o.di_builder == null) return null; + if (o.di_builder == null) return .none; const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = o.module; @@ -6400,30 +6331,30 @@ pub const FuncGen = struct { const old = self.dbg_inlined.pop(); self.di_scope = old.scope; self.base_line = old.base_line; - return null; + return .none; } - fn airDbgBlockBegin(self: *FuncGen) !?*llvm.Value { + fn airDbgBlockBegin(self: *FuncGen) !Builder.Value { const o = self.dg.object; - const dib = o.di_builder orelse return null; + const dib = o.di_builder orelse return .none; const old_scope = self.di_scope.?; try self.dbg_block_stack.append(self.gpa, old_scope); const lexical_block = dib.createLexicalBlock(old_scope, self.di_file.?, self.prev_dbg_line, self.prev_dbg_column); self.di_scope = lexical_block.toScope(); - return null; + return .none; } - fn airDbgBlockEnd(self: *FuncGen) !?*llvm.Value { + fn airDbgBlockEnd(self: *FuncGen) !Builder.Value { const o = self.dg.object; - if (o.di_builder == null) return null; + if (o.di_builder == null) return .none; self.di_scope = self.dbg_block_stack.pop(); - return null; + return .none; } - fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; - const dib = o.di_builder orelse return null; + const dib = o.di_builder orelse return .none; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = try self.resolveInst(pl_op.operand); const name = self.air.nullTerminatedString(pl_op.payload); @@ -6443,22 +6374,20 @@ pub const FuncGen = struct { else null; const debug_loc = llvm.getDebugLoc(self.prev_dbg_line, self.prev_dbg_column, self.di_scope.?, inlined_at); - const insert_block = self.builder.getInsertBlock(); - _ = dib.insertDeclareAtEnd(operand, di_local_var, debug_loc, insert_block); - return null; + const insert_block = self.wip.cursor.block.toLlvm(&self.wip); + _ = dib.insertDeclareAtEnd(operand.toLlvm(&self.wip), di_local_var, debug_loc, insert_block); + return .none; } - fn airDbgVarVal(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airDbgVarVal(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const dib = o.di_builder orelse return null; + const dib = o.di_builder orelse return .none; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = try self.resolveInst(pl_op.operand); const operand_ty = self.typeOf(pl_op.operand); const name = self.air.nullTerminatedString(pl_op.payload); - if (needDbgVarWorkaround(o)) { - return null; - } + if (needDbgVarWorkaround(o)) return .none; const di_local_var = dib.createAutoVariable( self.di_scope.?, @@ -6474,23 +6403,22 @@ pub const FuncGen = struct { else null; const debug_loc = llvm.getDebugLoc(self.prev_dbg_line, self.prev_dbg_column, self.di_scope.?, inlined_at); - const insert_block = self.builder.getInsertBlock(); + const insert_block = self.wip.cursor.block.toLlvm(&self.wip); const mod = o.module; if (isByRef(operand_ty, mod)) { - _ = dib.insertDeclareAtEnd(operand, di_local_var, debug_loc, insert_block); + _ = dib.insertDeclareAtEnd(operand.toLlvm(&self.wip), di_local_var, debug_loc, insert_block); } else if (o.module.comp.bin_file.options.optimize_mode == .Debug) { - const alignment = operand_ty.abiAlignment(mod); - const alloca = try self.buildAlloca(operand.typeOf(), alignment); - const store_inst = self.builder.buildStore(operand, alloca); - store_inst.setAlignment(alignment); - _ = dib.insertDeclareAtEnd(alloca, di_local_var, debug_loc, insert_block); + const alignment = Builder.Alignment.fromByteUnits(operand_ty.abiAlignment(mod)); + const alloca = try self.buildAlloca(operand.typeOfWip(&self.wip), alignment); + _ = try self.wip.store(.normal, operand, alloca, alignment); + _ = dib.insertDeclareAtEnd(alloca.toLlvm(&self.wip), di_local_var, debug_loc, insert_block); } else { - _ = dib.insertDbgValueIntrinsicAtEnd(operand, di_local_var, debug_loc, insert_block); + _ = dib.insertDbgValueIntrinsicAtEnd(operand.toLlvm(&self.wip), di_local_var, debug_loc, insert_block); } - return null; + return .none; } - fn airAssembly(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airAssembly(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { // Eventually, the Zig compiler needs to be reworked to have inline // assembly go through the same parsing code regardless of backend, and // have LLVM-flavored inline assembly be *output* from that assembler. @@ -6523,11 +6451,11 @@ pub const FuncGen = struct { const llvm_ret_indirect = try arena.alloc(bool, max_return_count); const max_param_count = inputs.len + outputs.len; - const llvm_param_types = try arena.alloc(*llvm.Type, max_param_count); + const llvm_param_types = try arena.alloc(Builder.Type, max_param_count); const llvm_param_values = try arena.alloc(*llvm.Value, max_param_count); // This stores whether we need to add an elementtype attribute and // if so, the element type itself. - const llvm_param_attrs = try arena.alloc(?*llvm.Type, max_param_count); + const llvm_param_attrs = try arena.alloc(Builder.Type, max_param_count); const mod = o.module; const target = mod.getTarget(); @@ -6564,9 +6492,9 @@ pub const FuncGen = struct { // Pass the result by reference as an indirect output (e.g. "=*m") llvm_constraints.appendAssumeCapacity('*'); - llvm_param_values[llvm_param_i] = output_inst; - llvm_param_types[llvm_param_i] = output_inst.typeOf(); - llvm_param_attrs[llvm_param_i] = elem_llvm_ty.toLlvm(&o.builder); + llvm_param_values[llvm_param_i] = output_inst.toLlvm(&self.wip); + llvm_param_types[llvm_param_i] = output_inst.typeOfWip(&self.wip); + llvm_param_attrs[llvm_param_i] = elem_llvm_ty; llvm_param_i += 1; } else { // Pass the result directly (e.g. "=r") @@ -6614,27 +6542,26 @@ pub const FuncGen = struct { if (isByRef(arg_ty, mod)) { llvm_elem_ty = try o.lowerPtrElemTy(arg_ty); if (constraintAllowsMemory(constraint)) { - llvm_param_values[llvm_param_i] = arg_llvm_value; - llvm_param_types[llvm_param_i] = arg_llvm_value.typeOf(); + llvm_param_values[llvm_param_i] = arg_llvm_value.toLlvm(&self.wip); + llvm_param_types[llvm_param_i] = arg_llvm_value.typeOfWip(&self.wip); } else { - const alignment = arg_ty.abiAlignment(mod); - const arg_llvm_ty = (try o.lowerType(arg_ty)).toLlvm(&o.builder); - const load_inst = self.builder.buildLoad(arg_llvm_ty, arg_llvm_value, ""); - load_inst.setAlignment(alignment); - llvm_param_values[llvm_param_i] = load_inst; + const alignment = Builder.Alignment.fromByteUnits(arg_ty.abiAlignment(mod)); + const arg_llvm_ty = try o.lowerType(arg_ty); + const load_inst = + try self.wip.load(.normal, arg_llvm_ty, arg_llvm_value, alignment, ""); + llvm_param_values[llvm_param_i] = load_inst.toLlvm(&self.wip); llvm_param_types[llvm_param_i] = arg_llvm_ty; } } else { if (constraintAllowsRegister(constraint)) { - llvm_param_values[llvm_param_i] = arg_llvm_value; - llvm_param_types[llvm_param_i] = arg_llvm_value.typeOf(); + llvm_param_values[llvm_param_i] = arg_llvm_value.toLlvm(&self.wip); + llvm_param_types[llvm_param_i] = arg_llvm_value.typeOfWip(&self.wip); } else { - const alignment = arg_ty.abiAlignment(mod); - const arg_ptr = try self.buildAlloca(arg_llvm_value.typeOf(), alignment); - const store_inst = self.builder.buildStore(arg_llvm_value, arg_ptr); - store_inst.setAlignment(alignment); - llvm_param_values[llvm_param_i] = arg_ptr; - llvm_param_types[llvm_param_i] = arg_ptr.typeOf(); + const alignment = Builder.Alignment.fromByteUnits(arg_ty.abiAlignment(mod)); + const arg_ptr = try self.buildAlloca(arg_llvm_value.typeOfWip(&self.wip), alignment); + _ = try self.wip.store(.normal, arg_llvm_value, arg_ptr, alignment); + llvm_param_values[llvm_param_i] = arg_ptr.toLlvm(&self.wip); + llvm_param_types[llvm_param_i] = arg_ptr.typeOfWip(&self.wip); } } @@ -6658,12 +6585,12 @@ pub const FuncGen = struct { // In the case of indirect inputs, LLVM requires the callsite to have // an elementtype() attribute. if (constraint[0] == '*') { - llvm_param_attrs[llvm_param_i] = (if (llvm_elem_ty != .none) + llvm_param_attrs[llvm_param_i] = if (llvm_elem_ty != .none) llvm_elem_ty else - try o.lowerPtrElemTy(arg_ty.childType(mod))).toLlvm(&o.builder); + try o.lowerPtrElemTy(arg_ty.childType(mod)); } else { - llvm_param_attrs[llvm_param_i] = null; + llvm_param_attrs[llvm_param_i] = .none; } llvm_param_i += 1; @@ -6786,14 +6713,9 @@ pub const FuncGen = struct { else => try o.builder.structType(.normal, llvm_ret_types), }; - const llvm_fn_ty = llvm.functionType( - ret_llvm_ty.toLlvm(&o.builder), - llvm_param_types.ptr, - @intCast(param_count), - .False, - ); + const llvm_fn_ty = try o.builder.fnType(ret_llvm_ty, llvm_param_types[0..param_count], .normal); const asm_fn = llvm.getInlineAsm( - llvm_fn_ty, + llvm_fn_ty.toLlvm(&o.builder), rendered_template.items.ptr, rendered_template.items.len, llvm_constraints.items.ptr, @@ -6803,18 +6725,18 @@ pub const FuncGen = struct { .ATT, .False, ); - const call = self.builder.buildCall( - llvm_fn_ty, + const call = (try self.wip.unimplemented(ret_llvm_ty, "")).finish(self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), asm_fn, llvm_param_values.ptr, @intCast(param_count), .C, .Auto, "", - ); + ), &self.wip); for (llvm_param_attrs[0..param_count], 0..) |llvm_elem_ty, i| { - if (llvm_elem_ty) |llvm_ty| { - llvm.setCallElemTypeAttr(call, i, llvm_ty); + if (llvm_elem_ty != .none) { + llvm.setCallElemTypeAttr(call.toLlvm(&self.wip), i, llvm_elem_ty.toLlvm(&o.builder)); } } @@ -6823,16 +6745,17 @@ pub const FuncGen = struct { for (outputs, 0..) |output, i| { if (llvm_ret_indirect[i]) continue; - const output_value = if (return_count > 1) b: { - break :b self.builder.buildExtractValue(call, @intCast(llvm_ret_i), ""); - } else call; + const output_value = if (return_count > 1) + try self.wip.extractValue(call, &[_]u32{@intCast(llvm_ret_i)}, "") + else + call; if (output != .none) { const output_ptr = try self.resolveInst(output); const output_ptr_ty = self.typeOf(output); - const store_inst = self.builder.buildStore(output_value, output_ptr); - store_inst.setAlignment(output_ptr_ty.ptrAlignment(mod)); + const alignment = Builder.Alignment.fromByteUnits(output_ptr_ty.ptrAlignment(mod)); + _ = try self.wip.store(.normal, output_value, output_ptr, alignment); } else { ret_val = output_value; } @@ -6846,8 +6769,8 @@ pub const FuncGen = struct { self: *FuncGen, inst: Air.Inst.Index, operand_is_ptr: bool, - pred: llvm.IntPredicate, - ) !?*llvm.Value { + cond: Builder.IntegerCondition, + ) !Builder.Value { const o = self.dg.object; const mod = o.module; const un_op = self.air.instructions.items(.data)[inst].un_op; @@ -6858,45 +6781,40 @@ pub const FuncGen = struct { const payload_ty = optional_ty.optionalChild(mod); if (optional_ty.optionalReprIsPayload(mod)) { const loaded = if (operand_is_ptr) - self.builder.buildLoad(optional_llvm_ty.toLlvm(&o.builder), operand, "") + try self.wip.load(.normal, optional_llvm_ty, operand, .default, "") else operand; if (payload_ty.isSlice(mod)) { - const slice_ptr = self.builder.buildExtractValue(loaded, 0, ""); + const slice_ptr = try self.wip.extractValue(loaded, &.{0}, ""); const ptr_ty = try o.builder.ptrType(toLlvmAddressSpace( payload_ty.ptrAddressSpace(mod), mod.getTarget(), )); - return self.builder.buildICmp(pred, slice_ptr, (try o.builder.nullConst(ptr_ty)).toLlvm(&o.builder), ""); + return self.wip.icmp(cond, slice_ptr, try o.builder.nullValue(ptr_ty), ""); } - return self.builder.buildICmp(pred, loaded, (try o.builder.zeroInitConst(optional_llvm_ty)).toLlvm(&o.builder), ""); + return self.wip.icmp(cond, loaded, try o.builder.zeroInitValue(optional_llvm_ty), ""); } comptime assert(optional_layout_version == 3); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const loaded = if (operand_is_ptr) - self.builder.buildLoad(optional_llvm_ty.toLlvm(&o.builder), operand, "") + try self.wip.load(.normal, optional_llvm_ty, operand, .default, "") else operand; - return self.builder.buildICmp(pred, loaded, (try o.builder.intConst(.i8, 0)).toLlvm(&o.builder), ""); + return self.wip.icmp(cond, loaded, try o.builder.intValue(.i8, 0), ""); } const is_by_ref = operand_is_ptr or isByRef(optional_ty, mod); - const non_null_bit = try self.optIsNonNull(optional_llvm_ty.toLlvm(&o.builder), operand, is_by_ref); - if (pred == .EQ) { - return self.builder.buildNot(non_null_bit, ""); - } else { - return non_null_bit; - } + return self.optCmpNull(cond, optional_llvm_ty, operand, is_by_ref); } fn airIsErr( self: *FuncGen, inst: Air.Inst.Index, - op: llvm.IntPredicate, + cond: Builder.IntegerCondition, operand_is_ptr: bool, - ) !?*llvm.Value { + ) !Builder.Value { const o = self.dg.object; const mod = o.module; const un_op = self.air.instructions.items(.data)[inst].un_op; @@ -6904,39 +6822,37 @@ pub const FuncGen = struct { const operand_ty = self.typeOf(un_op); const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; const payload_ty = err_union_ty.errorUnionPayload(mod); - const zero = (try o.builder.intConst(Builder.Type.err_int, 0)).toLlvm(&o.builder); + const zero = try o.builder.intValue(Builder.Type.err_int, 0); if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { - const val: Builder.Constant = switch (op) { - .EQ => .true, // 0 == 0 - .NE => .false, // 0 != 0 + const val: Builder.Constant = switch (cond) { + .eq => .true, // 0 == 0 + .ne => .false, // 0 != 0 else => unreachable, }; - return val.toLlvm(&o.builder); + return val.toValue(); } if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const loaded = if (operand_is_ptr) - self.builder.buildLoad((try o.lowerType(err_union_ty)).toLlvm(&o.builder), operand, "") + try self.wip.load(.normal, try o.lowerType(err_union_ty), operand, .default, "") else operand; - return self.builder.buildICmp(op, loaded, zero, ""); + return self.wip.icmp(cond, loaded, zero, ""); } const err_field_index = errUnionErrorOffset(payload_ty, mod); - if (operand_is_ptr or isByRef(err_union_ty, mod)) { - const err_union_llvm_ty = (try o.lowerType(err_union_ty)).toLlvm(&o.builder); - const err_field_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, err_field_index, ""); - const loaded = self.builder.buildLoad(Builder.Type.err_int.toLlvm(&o.builder), err_field_ptr, ""); - return self.builder.buildICmp(op, loaded, zero, ""); - } - - const loaded = self.builder.buildExtractValue(operand, err_field_index, ""); - return self.builder.buildICmp(op, loaded, zero, ""); + const loaded = if (operand_is_ptr or isByRef(err_union_ty, mod)) loaded: { + const err_union_llvm_ty = try o.lowerType(err_union_ty); + const err_field_ptr = + try self.wip.gepStruct(err_union_llvm_ty, operand, err_field_index, ""); + break :loaded try self.wip.load(.normal, Builder.Type.err_int, err_field_ptr, .default, ""); + } else try self.wip.extractValue(operand, &.{err_field_index}, ""); + return self.wip.icmp(cond, loaded, zero, ""); } - fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; @@ -6952,11 +6868,10 @@ pub const FuncGen = struct { // The payload and the optional are the same value. return operand; } - const optional_llvm_ty = (try o.lowerType(optional_ty)).toLlvm(&o.builder); - return self.builder.buildStructGEP(optional_llvm_ty, operand, 0, ""); + return self.wip.gepStruct(try o.lowerType(optional_ty), operand, 0, ""); } - fn airOptionalPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airOptionalPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { comptime assert(optional_layout_version == 3); const o = self.dg.object; @@ -6965,10 +6880,10 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = optional_ty.optionalChild(mod); - const non_null_bit = (try o.builder.intConst(.i8, 1)).toLlvm(&o.builder); + const non_null_bit = try o.builder.intValue(.i8, 1); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // We have a pointer to a i8. We need to set it to 1 and then return the same pointer. - _ = self.builder.buildStore(non_null_bit, operand); + _ = try self.wip.store(.normal, non_null_bit, operand, .default); return operand; } if (optional_ty.optionalReprIsPayload(mod)) { @@ -6978,19 +6893,18 @@ pub const FuncGen = struct { } // First set the non-null bit. - const optional_llvm_ty = (try o.lowerType(optional_ty)).toLlvm(&o.builder); - const non_null_ptr = self.builder.buildStructGEP(optional_llvm_ty, operand, 1, ""); + const optional_llvm_ty = try o.lowerType(optional_ty); + const non_null_ptr = try self.wip.gepStruct(optional_llvm_ty, operand, 1, ""); // TODO set alignment on this store - _ = self.builder.buildStore(non_null_bit, non_null_ptr); + _ = try self.wip.store(.normal, non_null_bit, non_null_ptr, .default); // Then return the payload pointer (only if it's used). - if (self.liveness.isUnused(inst)) - return null; + if (self.liveness.isUnused(inst)) return .none; - return self.builder.buildStructGEP(optional_llvm_ty, operand, 0, ""); + return self.wip.gepStruct(optional_llvm_ty, operand, 0, ""); } - fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const inst = body_tail[0]; @@ -6998,14 +6912,14 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.typeOf(ty_op.operand); const payload_ty = self.typeOfIndex(inst); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; if (optional_ty.optionalReprIsPayload(mod)) { // Payload value is the same as the optional value. return operand; } - const opt_llvm_ty = (try o.lowerType(optional_ty)).toLlvm(&o.builder); + const opt_llvm_ty = try o.lowerType(optional_ty); const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false; return self.optPayloadHandle(opt_llvm_ty, operand, optional_ty, can_elide_load); } @@ -7014,7 +6928,7 @@ pub const FuncGen = struct { self: *FuncGen, body_tail: []const Air.Inst.Index, operand_is_ptr: bool, - ) !?*llvm.Value { + ) !Builder.Value { const o = self.dg.object; const mod = o.module; const inst = body_tail[0]; @@ -7026,32 +6940,30 @@ pub const FuncGen = struct { const payload_ty = if (operand_is_ptr) result_ty.childType(mod) else result_ty; if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return if (operand_is_ptr) operand else null; + return if (operand_is_ptr) operand else .none; } const offset = errUnionPayloadOffset(payload_ty, mod); - const err_union_llvm_ty = (try o.lowerType(err_union_ty)).toLlvm(&o.builder); + const err_union_llvm_ty = try o.lowerType(err_union_ty); if (operand_is_ptr) { - return self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, ""); + return self.wip.gepStruct(err_union_llvm_ty, operand, offset, ""); } else if (isByRef(err_union_ty, mod)) { - const payload_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, ""); + const payload_alignment = Builder.Alignment.fromByteUnits(payload_ty.abiAlignment(mod)); + const payload_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, offset, ""); if (isByRef(payload_ty, mod)) { - if (self.canElideLoad(body_tail)) - return payload_ptr; - - return self.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(mod), false); + if (self.canElideLoad(body_tail)) return payload_ptr; + return self.loadByRef(payload_ptr, payload_ty, payload_alignment, false); } - const load_inst = self.builder.buildLoad(err_union_llvm_ty.structGetTypeAtIndex(offset), payload_ptr, ""); - load_inst.setAlignment(payload_ty.abiAlignment(mod)); - return load_inst; + const payload_llvm_ty = err_union_llvm_ty.structFields(&o.builder)[offset]; + return self.wip.load(.normal, payload_llvm_ty, payload_ptr, payload_alignment, ""); } - return self.builder.buildExtractValue(operand, offset, ""); + return self.wip.extractValue(operand, &.{offset}, ""); } fn airErrUnionErr( self: *FuncGen, inst: Air.Inst.Index, operand_is_ptr: bool, - ) !?*llvm.Value { + ) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; @@ -7062,30 +6974,28 @@ pub const FuncGen = struct { if (operand_is_ptr) { return operand; } else { - return (try o.builder.intConst(Builder.Type.err_int, 0)).toLlvm(&o.builder); + return o.builder.intValue(Builder.Type.err_int, 0); } } - const err_set_llvm_ty = (try o.lowerType(Type.anyerror)).toLlvm(&o.builder); - const payload_ty = err_union_ty.errorUnionPayload(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (!operand_is_ptr) return operand; - return self.builder.buildLoad(err_set_llvm_ty, operand, ""); + return self.wip.load(.normal, Builder.Type.err_int, operand, .default, ""); } const offset = errUnionErrorOffset(payload_ty, mod); if (operand_is_ptr or isByRef(err_union_ty, mod)) { - const err_union_llvm_ty = (try o.lowerType(err_union_ty)).toLlvm(&o.builder); - const err_field_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, ""); - return self.builder.buildLoad(err_set_llvm_ty, err_field_ptr, ""); + const err_union_llvm_ty = try o.lowerType(err_union_ty); + const err_field_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, offset, ""); + return self.wip.load(.normal, Builder.Type.err_int, err_field_ptr, .default, ""); } - return self.builder.buildExtractValue(operand, offset, ""); + return self.wip.extractValue(operand, &.{offset}, ""); } - fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; @@ -7093,49 +7003,49 @@ pub const FuncGen = struct { const err_union_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = err_union_ty.errorUnionPayload(mod); - const non_error_val = try o.lowerValue((try mod.intValue(Type.err_int, 0)).toIntern()); + const non_error_val = try o.builder.intValue(Builder.Type.err_int, 0); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - _ = self.builder.buildStore(non_error_val.toLlvm(&o.builder), operand); + _ = try self.wip.store(.normal, non_error_val, operand, .default); return operand; } - const err_union_llvm_ty = (try o.lowerType(err_union_ty)).toLlvm(&o.builder); + const err_union_llvm_ty = try o.lowerType(err_union_ty); { + const error_alignment = Builder.Alignment.fromByteUnits(Type.err_int.abiAlignment(mod)); const error_offset = errUnionErrorOffset(payload_ty, mod); // First set the non-error value. - const non_null_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, error_offset, ""); - const store_inst = self.builder.buildStore(non_error_val.toLlvm(&o.builder), non_null_ptr); - store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); + const non_null_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, error_offset, ""); + _ = try self.wip.store(.normal, non_error_val, non_null_ptr, error_alignment); } // Then return the payload pointer (only if it is used). - if (self.liveness.isUnused(inst)) - return null; + if (self.liveness.isUnused(inst)) return .none; const payload_offset = errUnionPayloadOffset(payload_ty, mod); - return self.builder.buildStructGEP(err_union_llvm_ty, operand, payload_offset, ""); + return self.wip.gepStruct(err_union_llvm_ty, operand, payload_offset, ""); } - fn airErrReturnTrace(self: *FuncGen, _: Air.Inst.Index) !?*llvm.Value { - return self.err_ret_trace.?; + fn airErrReturnTrace(self: *FuncGen, _: Air.Inst.Index) !Builder.Value { + assert(self.err_ret_trace != .none); + return self.err_ret_trace; } - fn airSetErrReturnTrace(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airSetErrReturnTrace(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand = try self.resolveInst(un_op); - self.err_ret_trace = operand; - return null; + self.err_ret_trace = try self.resolveInst(un_op); + return .none; } - fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - //const struct_ty = try self.resolveInst(ty_pl.ty); const struct_ty = self.air.getRefType(ty_pl.ty); const field_index = ty_pl.payload; const mod = o.module; const llvm_field = llvmField(struct_ty, field_index, mod).?; - const struct_llvm_ty = (try o.lowerType(struct_ty)).toLlvm(&o.builder); - const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field.index, ""); + const struct_llvm_ty = try o.lowerType(struct_ty); + assert(self.err_ret_trace != .none); + const field_ptr = + try self.wip.gepStruct(struct_llvm_ty, self.err_ret_trace, llvm_field.index, ""); const field_ptr_ty = try mod.ptrType(.{ .child = llvm_field.ty.toIntern(), .flags = .{ @@ -7145,34 +7055,32 @@ pub const FuncGen = struct { return self.load(field_ptr, field_ptr_ty); } - fn airWrapOptional(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airWrapOptional(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const payload_ty = self.typeOf(ty_op.operand); - const non_null_bit = (try o.builder.intConst(.i8, 1)).toLlvm(&o.builder); + const non_null_bit = try o.builder.intValue(.i8, 1); comptime assert(optional_layout_version == 3); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return non_null_bit; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.typeOfIndex(inst); - if (optional_ty.optionalReprIsPayload(mod)) { - return operand; - } - const llvm_optional_ty = (try o.lowerType(optional_ty)).toLlvm(&o.builder); + if (optional_ty.optionalReprIsPayload(mod)) return operand; + const llvm_optional_ty = try o.lowerType(optional_ty); if (isByRef(optional_ty, mod)) { - const optional_ptr = try self.buildAlloca(llvm_optional_ty, optional_ty.abiAlignment(mod)); - const payload_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 0, ""); + const alignment = Builder.Alignment.fromByteUnits(optional_ty.abiAlignment(mod)); + const optional_ptr = try self.buildAlloca(llvm_optional_ty, alignment); + const payload_ptr = try self.wip.gepStruct(llvm_optional_ty, optional_ptr, 0, ""); const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); - try self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic); - const non_null_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 1, ""); - _ = self.builder.buildStore(non_null_bit, non_null_ptr); + try self.store(payload_ptr, payload_ptr_ty, operand, .none); + const non_null_ptr = try self.wip.gepStruct(llvm_optional_ty, optional_ptr, 1, ""); + _ = try self.wip.store(.normal, non_null_bit, non_null_ptr, .default); return optional_ptr; } - const partial = self.builder.buildInsertValue(llvm_optional_ty.getUndef(), operand, 0, ""); - return self.builder.buildInsertValue(partial, non_null_bit, 1, ""); + return self.wip.buildAggregate(llvm_optional_ty, &.{ operand, non_null_bit }, ""); } - fn airWrapErrUnionPayload(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airWrapErrUnionPayload(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; @@ -7182,46 +7090,47 @@ pub const FuncGen = struct { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return operand; } - const ok_err_code = (try o.builder.intConst(Builder.Type.err_int, 0)).toLlvm(&o.builder); - const err_un_llvm_ty = (try o.lowerType(err_un_ty)).toLlvm(&o.builder); + const ok_err_code = try o.builder.intValue(Builder.Type.err_int, 0); + const err_un_llvm_ty = try o.lowerType(err_un_ty); const payload_offset = errUnionPayloadOffset(payload_ty, mod); const error_offset = errUnionErrorOffset(payload_ty, mod); if (isByRef(err_un_ty, mod)) { - const result_ptr = try self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(mod)); - const err_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, error_offset, ""); - const store_inst = self.builder.buildStore(ok_err_code, err_ptr); - store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); - const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, ""); + const alignment = Builder.Alignment.fromByteUnits(err_un_ty.abiAlignment(mod)); + const result_ptr = try self.buildAlloca(err_un_llvm_ty, alignment); + const err_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, error_offset, ""); + const error_alignment = Builder.Alignment.fromByteUnits(Type.err_int.abiAlignment(mod)); + _ = try self.wip.store(.normal, ok_err_code, err_ptr, error_alignment); + const payload_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, payload_offset, ""); const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); - try self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic); + try self.store(payload_ptr, payload_ptr_ty, operand, .none); return result_ptr; } - - const partial = self.builder.buildInsertValue(err_un_llvm_ty.getUndef(), ok_err_code, error_offset, ""); - return self.builder.buildInsertValue(partial, operand, payload_offset, ""); + var fields: [2]Builder.Value = undefined; + fields[payload_offset] = operand; + fields[error_offset] = ok_err_code; + return self.wip.buildAggregate(err_un_llvm_ty, &fields, ""); } - fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const err_un_ty = self.typeOfIndex(inst); const payload_ty = err_un_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return operand; - } - const err_un_llvm_ty = (try o.lowerType(err_un_ty)).toLlvm(&o.builder); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return operand; + const err_un_llvm_ty = try o.lowerType(err_un_ty); const payload_offset = errUnionPayloadOffset(payload_ty, mod); const error_offset = errUnionErrorOffset(payload_ty, mod); if (isByRef(err_un_ty, mod)) { - const result_ptr = try self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(mod)); - const err_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, error_offset, ""); - const store_inst = self.builder.buildStore(operand, err_ptr); - store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); - const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, ""); + const alignment = Builder.Alignment.fromByteUnits(err_un_ty.abiAlignment(mod)); + const result_ptr = try self.buildAlloca(err_un_llvm_ty, alignment); + const err_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, error_offset, ""); + const error_alignment = Builder.Alignment.fromByteUnits(Type.err_int.abiAlignment(mod)); + _ = try self.wip.store(.normal, operand, err_ptr, error_alignment); + const payload_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, payload_offset, ""); const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); // TODO store undef to payload_ptr _ = payload_ptr; @@ -7229,12 +7138,12 @@ pub const FuncGen = struct { return result_ptr; } - const partial = self.builder.buildInsertValue(err_un_llvm_ty.getUndef(), operand, error_offset, ""); // TODO set payload bytes to undef - return partial; + const undef = try o.builder.undefValue(err_un_llvm_ty); + return self.wip.insertValue(undef, operand, &.{error_offset}, ""); } - fn airWasmMemorySize(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airWasmMemorySize(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const index = pl_op.payload; @@ -7242,10 +7151,18 @@ pub const FuncGen = struct { const args: [1]*llvm.Value = .{ (try o.builder.intConst(.i32, index)).toLlvm(&o.builder), }; - return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); + return (try self.wip.unimplemented(.i32, "")).finish(self.builder.buildCall( + (try o.builder.fnType(.i32, &.{.i32}, .normal)).toLlvm(&o.builder), + llvm_fn, + &args, + args.len, + .Fast, + .Auto, + "", + ), &self.wip); } - fn airWasmMemoryGrow(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airWasmMemoryGrow(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const index = pl_op.payload; @@ -7253,12 +7170,20 @@ pub const FuncGen = struct { const llvm_fn = try self.getIntrinsic("llvm.wasm.memory.grow", &.{.i32}); const args: [2]*llvm.Value = .{ (try o.builder.intConst(.i32, index)).toLlvm(&o.builder), - operand, + operand.toLlvm(&self.wip), }; - return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); + return (try self.wip.unimplemented(.i32, "")).finish(self.builder.buildCall( + (try o.builder.fnType(.i32, &.{ .i32, .i32 }, .normal)).toLlvm(&o.builder), + llvm_fn, + &args, + args.len, + .Fast, + .Auto, + "", + ), &self.wip); } - fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const data = self.air.instructions.items(.data)[inst].vector_store_elem; @@ -7269,19 +7194,20 @@ pub const FuncGen = struct { const index = try self.resolveInst(extra.lhs); const operand = try self.resolveInst(extra.rhs); - const loaded_vector = blk: { - const elem_llvm_ty = (try o.lowerType(vector_ptr_ty.childType(mod))).toLlvm(&o.builder); - const load_inst = self.builder.buildLoad(elem_llvm_ty, vector_ptr, ""); - load_inst.setAlignment(vector_ptr_ty.ptrAlignment(mod)); - load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr(mod))); - break :blk load_inst; + const kind: Builder.MemoryAccessKind = switch (vector_ptr_ty.isVolatilePtr(mod)) { + false => .normal, + true => .@"volatile", }; - const modified_vector = self.builder.buildInsertElement(loaded_vector, operand, index, ""); - try self.store(vector_ptr, vector_ptr_ty, modified_vector, .NotAtomic); - return null; + const elem_llvm_ty = try o.lowerType(vector_ptr_ty.childType(mod)); + const alignment = Builder.Alignment.fromByteUnits(vector_ptr_ty.ptrAlignment(mod)); + const loaded = try self.wip.load(kind, elem_llvm_ty, vector_ptr, alignment, ""); + + const new_vector = try self.wip.insertElement(loaded, operand, index, ""); + _ = try self.store(vector_ptr, vector_ptr_ty, new_vector, .none); + return .none; } - fn airMin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airMin(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -7290,11 +7216,13 @@ pub const FuncGen = struct { const scalar_ty = self.typeOfIndex(inst).scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmin, scalar_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMin(lhs, rhs, ""); - return self.builder.buildUMin(lhs, rhs, ""); + return self.wip.bin(if (scalar_ty.isSignedInt(mod)) + .@"llvm.smin." + else + .@"llvm.umin.", lhs, rhs, ""); } - fn airMax(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airMax(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -7303,26 +7231,23 @@ pub const FuncGen = struct { const scalar_ty = self.typeOfIndex(inst).scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmax, scalar_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMax(lhs, rhs, ""); - return self.builder.buildUMax(lhs, rhs, ""); + return self.wip.bin(if (scalar_ty.isSignedInt(mod)) + .@"llvm.smax." + else + .@"llvm.umax.", lhs, rhs, ""); } - fn airSlice(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airSlice(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); const inst_ty = self.typeOfIndex(inst); - const llvm_slice_ty = (try o.lowerType(inst_ty)).toLlvm(&o.builder); - - // In case of slicing a global, the result type looks something like `{ i8*, i64 }` - // but `ptr` is pointing to the global directly. - const partial = self.builder.buildInsertValue(llvm_slice_ty.getUndef(), ptr, 0, ""); - return self.builder.buildInsertValue(partial, len, 1, ""); + return self.wip.buildAggregate(try o.lowerType(inst_ty), &.{ ptr, len }, ""); } - fn airAdd(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airAdd(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const o = self.dg.object; @@ -7334,8 +7259,7 @@ pub const FuncGen = struct { const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.add, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWAdd(lhs, rhs, ""); - return self.builder.buildNUWAdd(lhs, rhs, ""); + return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .@"add nsw" else .@"add nuw", lhs, rhs, ""); } fn airSafeArithmetic( @@ -7343,7 +7267,7 @@ pub const FuncGen = struct { inst: Air.Inst.Index, signed_intrinsic: []const u8, unsigned_intrinsic: []const u8, - ) !?*llvm.Value { + ) !Builder.Value { const o = fg.dg.object; const mod = o.module; @@ -7358,44 +7282,51 @@ pub const FuncGen = struct { true => signed_intrinsic, false => unsigned_intrinsic, }; - const llvm_fn = try fg.getIntrinsic(intrinsic_name, &.{try o.lowerType(inst_ty)}); - const result_struct = fg.builder.buildCall( - llvm_fn.globalGetValueType(), + const llvm_inst_ty = try o.lowerType(inst_ty); + const llvm_ret_ty = try o.builder.structType(.normal, &.{ + llvm_inst_ty, + try llvm_inst_ty.changeScalar(.i1, &o.builder), + }); + const llvm_fn_ty = try o.builder.fnType(llvm_ret_ty, &.{ llvm_inst_ty, llvm_inst_ty }, .normal); + const llvm_fn = try fg.getIntrinsic(intrinsic_name, &.{llvm_inst_ty}); + const result_struct = (try fg.wip.unimplemented(llvm_ret_ty, "")).finish(fg.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), llvm_fn, - &[_]*llvm.Value{ lhs, rhs }, + &[_]*llvm.Value{ lhs.toLlvm(&fg.wip), rhs.toLlvm(&fg.wip) }, 2, .Fast, .Auto, "", - ); - const overflow_bit = fg.builder.buildExtractValue(result_struct, 1, ""); + ), &fg.wip); + const overflow_bit = try fg.wip.extractValue(result_struct, &.{1}, ""); const scalar_overflow_bit = switch (is_scalar) { true => overflow_bit, - false => fg.builder.buildOrReduce(overflow_bit), + false => (try fg.wip.unimplemented(.i1, "")).finish( + fg.builder.buildOrReduce(overflow_bit.toLlvm(&fg.wip)), + &fg.wip, + ), }; - const fail_block = try fg.wip.block("OverflowFail"); - const ok_block = try fg.wip.block("OverflowOk"); - _ = fg.builder.buildCondBr(scalar_overflow_bit, fail_block.toLlvm(&fg.wip), ok_block.toLlvm(&fg.wip)); + const fail_block = try fg.wip.block(1, "OverflowFail"); + const ok_block = try fg.wip.block(1, "OverflowOk"); + _ = try fg.wip.brCond(scalar_overflow_bit, fail_block, ok_block); fg.wip.cursor = .{ .block = fail_block }; - fg.builder.positionBuilderAtEnd(fail_block.toLlvm(&fg.wip)); try fg.buildSimplePanic(.integer_overflow); fg.wip.cursor = .{ .block = ok_block }; - fg.builder.positionBuilderAtEnd(ok_block.toLlvm(&fg.wip)); - return fg.builder.buildExtractValue(result_struct, 0, ""); + return fg.wip.extractValue(result_struct, &.{0}, ""); } - fn airAddWrap(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airAddWrap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - return self.builder.buildAdd(lhs, rhs, ""); + return self.wip.bin(.add, lhs, rhs, ""); } - fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -7405,12 +7336,13 @@ pub const FuncGen = struct { const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float add", .{}); - if (scalar_ty.isSignedInt(mod)) return self.builder.buildSAddSat(lhs, rhs, ""); - - return self.builder.buildUAddSat(lhs, rhs, ""); + return self.wip.bin(if (scalar_ty.isSignedInt(mod)) + .@"llvm.sadd.sat." + else + .@"llvm.uadd.sat.", lhs, rhs, ""); } - fn airSub(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airSub(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const o = self.dg.object; @@ -7422,19 +7354,18 @@ pub const FuncGen = struct { const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.sub, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWSub(lhs, rhs, ""); - return self.builder.buildNUWSub(lhs, rhs, ""); + return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .@"sub nsw" else .@"sub nuw", lhs, rhs, ""); } - fn airSubWrap(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airSubWrap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - return self.builder.buildSub(lhs, rhs, ""); + return self.wip.bin(.sub, lhs, rhs, ""); } - fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -7444,11 +7375,13 @@ pub const FuncGen = struct { const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float sub", .{}); - if (scalar_ty.isSignedInt(mod)) return self.builder.buildSSubSat(lhs, rhs, ""); - return self.builder.buildUSubSat(lhs, rhs, ""); + return self.wip.bin(if (scalar_ty.isSignedInt(mod)) + .@"llvm.ssub.sat." + else + .@"llvm.usub.sat.", lhs, rhs, ""); } - fn airMul(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airMul(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const o = self.dg.object; @@ -7460,19 +7393,18 @@ pub const FuncGen = struct { const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.mul, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWMul(lhs, rhs, ""); - return self.builder.buildNUWMul(lhs, rhs, ""); + return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .@"mul nsw" else .@"mul nuw", lhs, rhs, ""); } - fn airMulWrap(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airMulWrap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - return self.builder.buildMul(lhs, rhs, ""); + return self.wip.bin(.mul, lhs, rhs, ""); } - fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -7482,11 +7414,13 @@ pub const FuncGen = struct { const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float mul", .{}); - if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMulFixSat(lhs, rhs, ""); - return self.builder.buildUMulFixSat(lhs, rhs, ""); + return self.wip.bin(if (scalar_ty.isSignedInt(mod)) + .@"llvm.smul.fix.sat." + else + .@"llvm.umul.fix.sat.", lhs, rhs, ""); } - fn airDivFloat(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airDivFloat(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -7497,7 +7431,7 @@ pub const FuncGen = struct { return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); } - fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const o = self.dg.object; @@ -7512,11 +7446,10 @@ pub const FuncGen = struct { const result = try self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); return self.buildFloatOp(.trunc, inst_ty, 1, .{result}); } - if (scalar_ty.isSignedInt(mod)) return self.builder.buildSDiv(lhs, rhs, ""); - return self.builder.buildUDiv(lhs, rhs, ""); + return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .sdiv else .udiv, lhs, rhs, ""); } - fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const o = self.dg.object; @@ -7533,24 +7466,24 @@ pub const FuncGen = struct { } if (scalar_ty.isSignedInt(mod)) { const inst_llvm_ty = try o.lowerType(inst_ty); - const bit_size_minus_one = try o.builder.splatConst(inst_llvm_ty, try o.builder.intConst( + const bit_size_minus_one = try o.builder.splatValue(inst_llvm_ty, try o.builder.intConst( inst_llvm_ty.scalarType(&o.builder), inst_llvm_ty.scalarBits(&o.builder) - 1, )); - const div = self.builder.buildSDiv(lhs, rhs, ""); - const rem = self.builder.buildSRem(lhs, rhs, ""); - const div_sign = self.builder.buildXor(lhs, rhs, ""); - const div_sign_mask = self.builder.buildAShr(div_sign, bit_size_minus_one.toLlvm(&o.builder), ""); - const zero = try o.builder.zeroInitConst(inst_llvm_ty); - const rem_nonzero = self.builder.buildICmp(.NE, rem, zero.toLlvm(&o.builder), ""); - const correction = self.builder.buildSelect(rem_nonzero, div_sign_mask, zero.toLlvm(&o.builder), ""); - return self.builder.buildNSWAdd(div, correction, ""); + const div = try self.wip.bin(.sdiv, lhs, rhs, ""); + const rem = try self.wip.bin(.srem, lhs, rhs, ""); + const div_sign = try self.wip.bin(.xor, lhs, rhs, ""); + const div_sign_mask = try self.wip.bin(.ashr, div_sign, bit_size_minus_one, ""); + const zero = try o.builder.zeroInitValue(inst_llvm_ty); + const rem_nonzero = try self.wip.icmp(.ne, rem, zero, ""); + const correction = try self.wip.select(rem_nonzero, div_sign_mask, zero, ""); + return self.wip.bin(.@"add nsw", div, correction, ""); } - return self.builder.buildUDiv(lhs, rhs, ""); + return self.wip.bin(.udiv, lhs, rhs, ""); } - fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const o = self.dg.object; @@ -7562,11 +7495,13 @@ pub const FuncGen = struct { const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt(mod)) return self.builder.buildExactSDiv(lhs, rhs, ""); - return self.builder.buildExactUDiv(lhs, rhs, ""); + return self.wip.bin(if (scalar_ty.isSignedInt(mod)) + .@"sdiv exact" + else + .@"udiv exact", lhs, rhs, ""); } - fn airRem(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airRem(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const o = self.dg.object; @@ -7578,11 +7513,13 @@ pub const FuncGen = struct { const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt(mod)) return self.builder.buildSRem(lhs, rhs, ""); - return self.builder.buildURem(lhs, rhs, ""); + return self.wip.bin(if (scalar_ty.isSignedInt(mod)) + .srem + else + .urem, lhs, rhs, ""); } - fn airMod(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airMod(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const o = self.dg.object; @@ -7598,29 +7535,29 @@ pub const FuncGen = struct { const a = try self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs }); const b = try self.buildFloatOp(.add, inst_ty, 2, .{ a, rhs }); const c = try self.buildFloatOp(.fmod, inst_ty, 2, .{ b, rhs }); - const zero = try o.builder.zeroInitConst(inst_llvm_ty); - const ltz = try self.buildFloatCmp(.lt, inst_ty, .{ lhs, zero.toLlvm(&o.builder) }); - return self.builder.buildSelect(ltz, c, a, ""); + const zero = try o.builder.zeroInitValue(inst_llvm_ty); + const ltz = try self.buildFloatCmp(.lt, inst_ty, .{ lhs, zero }); + return self.wip.select(ltz, c, a, ""); } if (scalar_ty.isSignedInt(mod)) { - const bit_size_minus_one = try o.builder.splatConst(inst_llvm_ty, try o.builder.intConst( + const bit_size_minus_one = try o.builder.splatValue(inst_llvm_ty, try o.builder.intConst( inst_llvm_ty.scalarType(&o.builder), inst_llvm_ty.scalarBits(&o.builder) - 1, )); - const rem = self.builder.buildSRem(lhs, rhs, ""); - const div_sign = self.builder.buildXor(lhs, rhs, ""); - const div_sign_mask = self.builder.buildAShr(div_sign, bit_size_minus_one.toLlvm(&o.builder), ""); - const rhs_masked = self.builder.buildAnd(rhs, div_sign_mask, ""); - const zero = try o.builder.zeroInitConst(inst_llvm_ty); - const rem_nonzero = self.builder.buildICmp(.NE, rem, zero.toLlvm(&o.builder), ""); - const correction = self.builder.buildSelect(rem_nonzero, rhs_masked, zero.toLlvm(&o.builder), ""); - return self.builder.buildNSWAdd(rem, correction, ""); + const rem = try self.wip.bin(.srem, lhs, rhs, ""); + const div_sign = try self.wip.bin(.xor, lhs, rhs, ""); + const div_sign_mask = try self.wip.bin(.ashr, div_sign, bit_size_minus_one, ""); + const rhs_masked = try self.wip.bin(.@"and", rhs, div_sign_mask, ""); + const zero = try o.builder.zeroInitValue(inst_llvm_ty); + const rem_nonzero = try self.wip.icmp(.ne, rem, zero, ""); + const correction = try self.wip.select(rem_nonzero, rhs_masked, zero, ""); + return self.wip.bin(.@"add nsw", rem, correction, ""); } - return self.builder.buildURem(lhs, rhs, ""); + return self.wip.bin(.urem, lhs, rhs, ""); } - fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -7628,55 +7565,39 @@ pub const FuncGen = struct { const ptr = try self.resolveInst(bin_op.lhs); const offset = try self.resolveInst(bin_op.rhs); const ptr_ty = self.typeOf(bin_op.lhs); - const llvm_elem_ty = (try o.lowerPtrElemTy(ptr_ty.childType(mod))).toLlvm(&o.builder); + const llvm_elem_ty = try o.lowerPtrElemTy(ptr_ty.childType(mod)); switch (ptr_ty.ptrSize(mod)) { - .One => { - // It's a pointer to an array, so according to LLVM we need an extra GEP index. - const indices: [2]*llvm.Value = .{ - (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), - offset, - }; - return self.builder.buildInBoundsGEP(llvm_elem_ty, ptr, &indices, indices.len, ""); - }, - .C, .Many => { - const indices: [1]*llvm.Value = .{offset}; - return self.builder.buildInBoundsGEP(llvm_elem_ty, ptr, &indices, indices.len, ""); - }, + // It's a pointer to an array, so according to LLVM we need an extra GEP index. + .One => return self.wip.gep(.inbounds, llvm_elem_ty, ptr, &.{ + try o.builder.intValue(try o.lowerType(Type.usize), 0), offset, + }, ""), + .C, .Many => return self.wip.gep(.inbounds, llvm_elem_ty, ptr, &.{offset}, ""), .Slice => { - const base = self.builder.buildExtractValue(ptr, 0, ""); - const indices: [1]*llvm.Value = .{offset}; - return self.builder.buildInBoundsGEP(llvm_elem_ty, base, &indices, indices.len, ""); + const base = try self.wip.extractValue(ptr, &.{0}, ""); + return self.wip.gep(.inbounds, llvm_elem_ty, base, &.{offset}, ""); }, } } - fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); const offset = try self.resolveInst(bin_op.rhs); - const negative_offset = self.builder.buildNeg(offset, ""); + const negative_offset = try self.wip.neg(offset, ""); const ptr_ty = self.typeOf(bin_op.lhs); - const llvm_elem_ty = (try o.lowerPtrElemTy(ptr_ty.childType(mod))).toLlvm(&o.builder); + const llvm_elem_ty = try o.lowerPtrElemTy(ptr_ty.childType(mod)); switch (ptr_ty.ptrSize(mod)) { - .One => { - // It's a pointer to an array, so according to LLVM we need an extra GEP index. - const indices: [2]*llvm.Value = .{ - (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), - negative_offset, - }; - return self.builder.buildInBoundsGEP(llvm_elem_ty, ptr, &indices, indices.len, ""); - }, - .C, .Many => { - const indices: [1]*llvm.Value = .{negative_offset}; - return self.builder.buildInBoundsGEP(llvm_elem_ty, ptr, &indices, indices.len, ""); - }, + // It's a pointer to an array, so according to LLVM we need an extra GEP index. + .One => return self.wip.gep(.inbounds, llvm_elem_ty, ptr, &.{ + try o.builder.intValue(try o.lowerType(Type.usize), 0), negative_offset, + }, ""), + .C, .Many => return self.wip.gep(.inbounds, llvm_elem_ty, ptr, &.{negative_offset}, ""), .Slice => { - const base = self.builder.buildExtractValue(ptr, 0, ""); - const indices: [1]*llvm.Value = .{negative_offset}; - return self.builder.buildInBoundsGEP(llvm_elem_ty, base, &indices, indices.len, ""); + const base = try self.wip.extractValue(ptr, &.{0}, ""); + return self.wip.gep(.inbounds, llvm_elem_ty, base, &.{negative_offset}, ""); }, } } @@ -7686,7 +7607,7 @@ pub const FuncGen = struct { inst: Air.Inst.Index, signed_intrinsic: []const u8, unsigned_intrinsic: []const u8, - ) !?*llvm.Value { + ) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -7701,59 +7622,91 @@ pub const FuncGen = struct { const intrinsic_name = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic; - const llvm_dest_ty = (try o.lowerType(dest_ty)).toLlvm(&o.builder); + const llvm_dest_ty = try o.lowerType(dest_ty); + const llvm_lhs_ty = try o.lowerType(lhs_ty); - const llvm_fn = try self.getIntrinsic(intrinsic_name, &.{try o.lowerType(lhs_ty)}); - const result_struct = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &[_]*llvm.Value{ lhs, rhs }, 2, .Fast, .Auto, ""); + const llvm_fn = try self.getIntrinsic(intrinsic_name, &.{llvm_lhs_ty}); + const llvm_ret_ty = try o.builder.structType( + .normal, + &.{ llvm_lhs_ty, try llvm_lhs_ty.changeScalar(.i1, &o.builder) }, + ); + const llvm_fn_ty = try o.builder.fnType(llvm_ret_ty, &.{ llvm_lhs_ty, llvm_lhs_ty }, .normal); + const result_struct = (try self.wip.unimplemented(llvm_ret_ty, "")).finish( + self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + llvm_fn, + &[_]*llvm.Value{ lhs.toLlvm(&self.wip), rhs.toLlvm(&self.wip) }, + 2, + .Fast, + .Auto, + "", + ), + &self.wip, + ); - const result = self.builder.buildExtractValue(result_struct, 0, ""); - const overflow_bit = self.builder.buildExtractValue(result_struct, 1, ""); + const result = try self.wip.extractValue(result_struct, &.{0}, ""); + const overflow_bit = try self.wip.extractValue(result_struct, &.{1}, ""); const result_index = llvmField(dest_ty, 0, mod).?.index; const overflow_index = llvmField(dest_ty, 1, mod).?.index; if (isByRef(dest_ty, mod)) { - const result_alignment = dest_ty.abiAlignment(mod); + const result_alignment = Builder.Alignment.fromByteUnits(dest_ty.abiAlignment(mod)); const alloca_inst = try self.buildAlloca(llvm_dest_ty, result_alignment); { - const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, result_index, ""); - const store_inst = self.builder.buildStore(result, field_ptr); - store_inst.setAlignment(result_alignment); + const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, result_index, ""); + _ = try self.wip.store(.normal, result, field_ptr, result_alignment); } { - const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, overflow_index, ""); - const store_inst = self.builder.buildStore(overflow_bit, field_ptr); - store_inst.setAlignment(1); + const overflow_alignment = comptime Builder.Alignment.fromByteUnits(1); + const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, overflow_index, ""); + _ = try self.wip.store(.normal, overflow_bit, field_ptr, overflow_alignment); } return alloca_inst; } - const partial = self.builder.buildInsertValue(llvm_dest_ty.getUndef(), result, result_index, ""); - return self.builder.buildInsertValue(partial, overflow_bit, overflow_index, ""); + var fields: [2]Builder.Value = undefined; + fields[result_index] = result; + fields[overflow_index] = overflow_bit; + return self.wip.buildAggregate(llvm_dest_ty, &fields, ""); } fn buildElementwiseCall( self: *FuncGen, - llvm_fn: *llvm.Value, - args_vectors: []const *llvm.Value, - result_vector: *llvm.Value, + llvm_fn: Builder.Function.Index, + args_vectors: []const Builder.Value, + result_vector: Builder.Value, vector_len: usize, - ) !*llvm.Value { + ) !Builder.Value { const o = self.dg.object; assert(args_vectors.len <= 3); + const llvm_fn_ty = llvm_fn.typeOf(&o.builder); + const llvm_scalar_ty = llvm_fn_ty.functionReturn(&o.builder); + var i: usize = 0; var result = result_vector; while (i < vector_len) : (i += 1) { - const index_i32 = (try o.builder.intConst(.i32, i)).toLlvm(&o.builder); + const index_i32 = try o.builder.intValue(.i32, i); var args: [3]*llvm.Value = undefined; - for (args_vectors, 0..) |arg_vector, k| { - args[k] = self.builder.buildExtractElement(arg_vector, index_i32, ""); + for (args[0..args_vectors.len], args_vectors) |*arg_elem, arg_vector| { + arg_elem.* = (try self.wip.extractElement(arg_vector, index_i32, "")).toLlvm(&self.wip); } - const result_elem = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, @intCast(args_vectors.len), .C, .Auto, ""); - result = self.builder.buildInsertElement(result, result_elem, index_i32, ""); + const result_elem = (try self.wip.unimplemented(llvm_scalar_ty, "")).finish( + self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + llvm_fn.toLlvm(&o.builder), + &args, + @intCast(args_vectors.len), + .C, + .Auto, + "", + ), + &self.wip, + ); + result = try self.wip.insertElement(result, result_elem, index_i32, ""); } return result; } @@ -7763,29 +7716,29 @@ pub const FuncGen = struct { fn_name: Builder.String, param_types: []const Builder.Type, return_type: Builder.Type, - ) Allocator.Error!*llvm.Value { + ) Allocator.Error!Builder.Function.Index { const o = self.dg.object; - const slice = fn_name.toSlice(&o.builder).?; - return o.llvm_module.getNamedFunction(slice) orelse b: { - const alias = o.llvm_module.getNamedGlobalAlias(slice.ptr, slice.len); - break :b if (alias) |a| a.getAliasee() else null; - } orelse b: { - const fn_type = try o.builder.fnType(return_type, param_types, .normal); - const f = o.llvm_module.addFunction(slice, fn_type.toLlvm(&o.builder)); - - var global = Builder.Global{ - .type = fn_type, - .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) }, - }; - var function = Builder.Function{ - .global = @enumFromInt(o.builder.globals.count()), - }; + if (o.builder.getGlobal(fn_name)) |global| return switch (global.ptrConst(&o.builder).kind) { + .alias => |alias| alias.getAliasee(&o.builder).ptrConst(&o.builder).kind.function, + .function => |function| function, + else => unreachable, + }; - try o.builder.llvm.globals.append(self.gpa, f); - _ = try o.builder.addGlobal(fn_name, global); - try o.builder.functions.append(self.gpa, function); - break :b f; + const fn_type = try o.builder.fnType(return_type, param_types, .normal); + const f = o.llvm_module.addFunction(fn_name.toSlice(&o.builder).?, fn_type.toLlvm(&o.builder)); + + var global = Builder.Global{ + .type = fn_type, + .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) }, + }; + var function = Builder.Function{ + .global = @enumFromInt(o.builder.globals.count()), }; + + try o.builder.llvm.globals.append(self.gpa, f); + _ = try o.builder.addGlobal(fn_name, global); + try o.builder.functions.append(self.gpa, function); + return global.kind.function; } /// Creates a floating point comparison by lowering to the appropriate @@ -7794,8 +7747,8 @@ pub const FuncGen = struct { self: *FuncGen, pred: math.CompareOperator, ty: Type, - params: [2]*llvm.Value, - ) !*llvm.Value { + params: [2]Builder.Value, + ) !Builder.Value { const o = self.dg.object; const mod = o.module; const target = o.module.getTarget(); @@ -7803,15 +7756,15 @@ pub const FuncGen = struct { const scalar_llvm_ty = try o.lowerType(scalar_ty); if (intrinsicsAllowed(scalar_ty, target)) { - const llvm_predicate: llvm.RealPredicate = switch (pred) { - .eq => .OEQ, - .neq => .UNE, - .lt => .OLT, - .lte => .OLE, - .gt => .OGT, - .gte => .OGE, + const cond: Builder.FloatCondition = switch (pred) { + .eq => .oeq, + .neq => .une, + .lt => .olt, + .lte => .ole, + .gt => .ogt, + .gte => .oge, }; - return self.builder.buildFCmp(llvm_predicate, params[0], params[1], ""); + return self.wip.fcmp(cond, params[0], params[1], ""); } const float_bits = scalar_ty.floatBits(target); @@ -7832,29 +7785,42 @@ pub const FuncGen = struct { .i32, ); - const zero = (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder); - const int_pred: llvm.IntPredicate = switch (pred) { - .eq => .EQ, - .neq => .NE, - .lt => .SLT, - .lte => .SLE, - .gt => .SGT, - .gte => .SGE, + const zero = try o.builder.intConst(.i32, 0); + const int_cond: Builder.IntegerCondition = switch (pred) { + .eq => .eq, + .neq => .ne, + .lt => .slt, + .lte => .sle, + .gt => .sgt, + .gte => .sge, }; if (ty.zigTypeTag(mod) == .Vector) { const vec_len = ty.vectorLen(mod); - const vector_result_ty = (try o.builder.vectorType(.normal, vec_len, .i32)).toLlvm(&o.builder); + const vector_result_ty = try o.builder.vectorType(.normal, vec_len, .i32); - var result = vector_result_ty.getUndef(); - result = try self.buildElementwiseCall(libc_fn, ¶ms, result, vec_len); + const init = try o.builder.poisonValue(vector_result_ty); + const result = try self.buildElementwiseCall(libc_fn, ¶ms, init, vec_len); - const zero_vector = self.builder.buildVectorSplat(vec_len, zero, ""); - return self.builder.buildICmp(int_pred, result, zero_vector, ""); + const zero_vector = try o.builder.splatValue(vector_result_ty, zero); + return self.wip.icmp(int_cond, result, zero_vector, ""); } - const result = self.builder.buildCall(libc_fn.globalGetValueType(), libc_fn, ¶ms, params.len, .C, .Auto, ""); - return self.builder.buildICmp(int_pred, result, zero, ""); + const llvm_fn_ty = libc_fn.typeOf(&o.builder); + const llvm_params = [2]*llvm.Value{ params[0].toLlvm(&self.wip), params[1].toLlvm(&self.wip) }; + const result = (try self.wip.unimplemented( + llvm_fn_ty.functionReturn(&o.builder), + "", + )).finish(self.builder.buildCall( + libc_fn.typeOf(&o.builder).toLlvm(&o.builder), + libc_fn.toLlvm(&o.builder), + &llvm_params, + llvm_params.len, + .C, + .Auto, + "", + ), &self.wip); + return self.wip.icmp(int_cond, result, zero.toValue(), ""); } const FloatOp = enum { @@ -7896,26 +7862,25 @@ pub const FuncGen = struct { comptime op: FloatOp, ty: Type, comptime params_len: usize, - params: [params_len]*llvm.Value, - ) !*llvm.Value { + params: [params_len]Builder.Value, + ) !Builder.Value { const o = self.dg.object; const mod = o.module; const target = mod.getTarget(); const scalar_ty = ty.scalarType(mod); const llvm_ty = try o.lowerType(ty); - const scalar_llvm_ty = try o.lowerType(scalar_ty); const intrinsics_allowed = op != .tan and intrinsicsAllowed(scalar_ty, target); const strat: FloatOpStrat = if (intrinsics_allowed) switch (op) { // Some operations are dedicated LLVM instructions, not available as intrinsics - .neg => return self.builder.buildFNeg(params[0], ""), - .add => return self.builder.buildFAdd(params[0], params[1], ""), - .sub => return self.builder.buildFSub(params[0], params[1], ""), - .mul => return self.builder.buildFMul(params[0], params[1], ""), - .div => return self.builder.buildFDiv(params[0], params[1], ""), - .fmod => return self.builder.buildFRem(params[0], params[1], ""), - .fmax => return self.builder.buildMaxNum(params[0], params[1], ""), - .fmin => return self.builder.buildMinNum(params[0], params[1], ""), + .neg => return self.wip.un(.fneg, params[0], ""), + .add => return self.wip.bin(.fadd, params[0], params[1], ""), + .sub => return self.wip.bin(.fsub, params[0], params[1], ""), + .mul => return self.wip.bin(.fmul, params[0], params[1], ""), + .div => return self.wip.bin(.fdiv, params[0], params[1], ""), + .fmod => return self.wip.bin(.frem, params[0], params[1], ""), + .fmax => return self.wip.bin(.@"llvm.maxnum.", params[0], params[1], ""), + .fmin => return self.wip.bin(.@"llvm.minnum.", params[0], params[1], ""), else => .{ .intrinsic = "llvm." ++ @tagName(op) }, } else b: { const float_bits = scalar_ty.floatBits(target); @@ -7924,19 +7889,14 @@ pub const FuncGen = struct { // In this case we can generate a softfloat negation by XORing the // bits with a constant. const int_ty = try o.builder.intType(@intCast(float_bits)); - const one = try o.builder.intConst(int_ty, 1); - const shift_amt = try o.builder.intConst(int_ty, float_bits - 1); - const sign_mask = try o.builder.binConst(.shl, one, shift_amt); - const result = if (ty.zigTypeTag(mod) == .Vector) blk: { - const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(mod), sign_mask.toLlvm(&o.builder), ""); - const cast_ty = try o.builder.vectorType(.normal, ty.vectorLen(mod), int_ty); - const bitcasted_operand = self.builder.buildBitCast(params[0], cast_ty.toLlvm(&o.builder), ""); - break :blk self.builder.buildXor(bitcasted_operand, splat_sign_mask, ""); - } else blk: { - const bitcasted_operand = self.builder.buildBitCast(params[0], int_ty.toLlvm(&o.builder), ""); - break :blk self.builder.buildXor(bitcasted_operand, sign_mask.toLlvm(&o.builder), ""); - }; - return self.builder.buildBitCast(result, llvm_ty.toLlvm(&o.builder), ""); + const cast_ty = try llvm_ty.changeScalar(int_ty, &o.builder); + const sign_mask = try o.builder.splatValue( + cast_ty, + try o.builder.intConst(int_ty, @as(u128, 1) << @intCast(float_bits - 1)), + ); + const bitcasted_operand = try self.wip.cast(.bitcast, params[0], cast_ty, ""); + const result = try self.wip.bin(.xor, bitcasted_operand, sign_mask, ""); + return self.wip.cast(.bitcast, result, llvm_ty, ""); }, .add, .sub, .div, .mul => .{ .libc = try o.builder.fmt("__{s}{s}f3", .{ @tagName(op), compilerRtFloatAbbrev(float_bits), @@ -7965,26 +7925,42 @@ pub const FuncGen = struct { }; }; - const llvm_fn: *llvm.Value = switch (strat) { + const llvm_fn = switch (strat) { .intrinsic => |fn_name| try self.getIntrinsic(fn_name, &.{llvm_ty}), .libc => |fn_name| b: { + const scalar_llvm_ty = llvm_ty.scalarType(&o.builder); const libc_fn = try self.getLibcFunction( fn_name, ([1]Builder.Type{scalar_llvm_ty} ** 3)[0..params.len], scalar_llvm_ty, ); if (ty.zigTypeTag(mod) == .Vector) { - const result = llvm_ty.toLlvm(&o.builder).getUndef(); + const result = try o.builder.poisonValue(llvm_ty); return self.buildElementwiseCall(libc_fn, ¶ms, result, ty.vectorLen(mod)); } - break :b libc_fn; + break :b libc_fn.toLlvm(&o.builder); }, }; - return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params_len, .C, .Auto, ""); + const llvm_fn_ty = try o.builder.fnType( + llvm_ty, + ([1]Builder.Type{llvm_ty} ** 3)[0..params.len], + .normal, + ); + var llvm_params: [params_len]*llvm.Value = undefined; + for (&llvm_params, params) |*llvm_param, param| llvm_param.* = param.toLlvm(&self.wip); + return (try self.wip.unimplemented(llvm_ty, "")).finish(self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + llvm_fn, + &llvm_params, + params_len, + .C, + .Auto, + "", + ), &self.wip); } - fn airMulAdd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airMulAdd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; @@ -7996,7 +7972,7 @@ pub const FuncGen = struct { return self.buildFloatOp(.fma, ty, 3, .{ mulend1, mulend2, addend }); } - fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -8006,72 +7982,67 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(extra.rhs); const lhs_ty = self.typeOf(extra.lhs); - const rhs_ty = self.typeOf(extra.rhs); const lhs_scalar_ty = lhs_ty.scalarType(mod); - const rhs_scalar_ty = rhs_ty.scalarType(mod); const dest_ty = self.typeOfIndex(inst); - const llvm_dest_ty = (try o.lowerType(dest_ty)).toLlvm(&o.builder); + const llvm_dest_ty = try o.lowerType(dest_ty); - const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) - self.builder.buildZExt(rhs, (try o.lowerType(lhs_ty)).toLlvm(&o.builder), "") - else - rhs; + const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), ""); - const result = self.builder.buildShl(lhs, casted_rhs, ""); - const reconstructed = if (lhs_scalar_ty.isSignedInt(mod)) - self.builder.buildAShr(result, casted_rhs, "") + const result = try self.wip.bin(.shl, lhs, casted_rhs, ""); + const reconstructed = try self.wip.bin(if (lhs_scalar_ty.isSignedInt(mod)) + .ashr else - self.builder.buildLShr(result, casted_rhs, ""); + .lshr, result, casted_rhs, ""); - const overflow_bit = self.builder.buildICmp(.NE, lhs, reconstructed, ""); + const overflow_bit = try self.wip.icmp(.ne, lhs, reconstructed, ""); const result_index = llvmField(dest_ty, 0, mod).?.index; const overflow_index = llvmField(dest_ty, 1, mod).?.index; if (isByRef(dest_ty, mod)) { - const result_alignment = dest_ty.abiAlignment(mod); + const result_alignment = Builder.Alignment.fromByteUnits(dest_ty.abiAlignment(mod)); const alloca_inst = try self.buildAlloca(llvm_dest_ty, result_alignment); { - const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, result_index, ""); - const store_inst = self.builder.buildStore(result, field_ptr); - store_inst.setAlignment(result_alignment); + const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, result_index, ""); + _ = try self.wip.store(.normal, result, field_ptr, result_alignment); } { - const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, overflow_index, ""); - const store_inst = self.builder.buildStore(overflow_bit, field_ptr); - store_inst.setAlignment(1); + const field_alignment = comptime Builder.Alignment.fromByteUnits(1); + const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, overflow_index, ""); + _ = try self.wip.store(.normal, overflow_bit, field_ptr, field_alignment); } - return alloca_inst; } - const partial = self.builder.buildInsertValue(llvm_dest_ty.getUndef(), result, result_index, ""); - return self.builder.buildInsertValue(partial, overflow_bit, overflow_index, ""); + var fields: [2]Builder.Value = undefined; + fields[result_index] = result; + fields[overflow_index] = overflow_bit; + return self.wip.buildAggregate(llvm_dest_ty, &fields, ""); } - fn airAnd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airAnd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - return self.builder.buildAnd(lhs, rhs, ""); + return self.wip.bin(.@"and", lhs, rhs, ""); } - fn airOr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airOr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - return self.builder.buildOr(lhs, rhs, ""); + return self.wip.bin(.@"or", lhs, rhs, ""); } - fn airXor(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airXor(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - return self.builder.buildXor(lhs, rhs, ""); + return self.wip.bin(.xor, lhs, rhs, ""); } - fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -8080,39 +8051,29 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.typeOf(bin_op.lhs); - const rhs_ty = self.typeOf(bin_op.rhs); const lhs_scalar_ty = lhs_ty.scalarType(mod); - const rhs_scalar_ty = rhs_ty.scalarType(mod); - const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) - self.builder.buildZExt(rhs, (try o.lowerType(lhs_ty)).toLlvm(&o.builder), "") + const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), ""); + return self.wip.bin(if (lhs_scalar_ty.isSignedInt(mod)) + .@"shl nsw" else - rhs; - if (lhs_scalar_ty.isSignedInt(mod)) return self.builder.buildNSWShl(lhs, casted_rhs, ""); - return self.builder.buildNUWShl(lhs, casted_rhs, ""); + .@"shl nuw", lhs, casted_rhs, ""); } - fn airShl(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airShl(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const lhs_type = self.typeOf(bin_op.lhs); - const rhs_type = self.typeOf(bin_op.rhs); - const lhs_scalar_ty = lhs_type.scalarType(mod); - const rhs_scalar_ty = rhs_type.scalarType(mod); - const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) - self.builder.buildZExt(rhs, (try o.lowerType(lhs_type)).toLlvm(&o.builder), "") - else - rhs; - return self.builder.buildShl(lhs, casted_rhs, ""); + const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_type), ""); + return self.wip.bin(.shl, lhs, casted_rhs, ""); } - fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -8121,42 +8082,36 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.typeOf(bin_op.lhs); - const rhs_ty = self.typeOf(bin_op.rhs); const lhs_scalar_ty = lhs_ty.scalarType(mod); - const rhs_scalar_ty = rhs_ty.scalarType(mod); const lhs_bits = lhs_scalar_ty.bitSize(mod); - const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_bits) - self.builder.buildZExt(rhs, lhs.typeOf(), "") - else - rhs; + const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), ""); - const result = if (lhs_scalar_ty.isSignedInt(mod)) - self.builder.buildSShlSat(lhs, casted_rhs, "") + const result = try self.wip.bin(if (lhs_scalar_ty.isSignedInt(mod)) + .@"llvm.sshl.sat." else - self.builder.buildUShlSat(lhs, casted_rhs, ""); + .@"llvm.ushl.sat.", lhs, casted_rhs, ""); // LLVM langref says "If b is (statically or dynamically) equal to or // larger than the integer bit width of the arguments, the result is a // poison value." // However Zig semantics says that saturating shift left can never produce // undefined; instead it saturates. - const lhs_scalar_llvm_ty = try o.lowerType(lhs_scalar_ty); - const bits = (try o.builder.intConst(lhs_scalar_llvm_ty, lhs_bits)).toLlvm(&o.builder); - const lhs_max = (try o.builder.intConst(lhs_scalar_llvm_ty, -1)).toLlvm(&o.builder); - if (rhs_ty.zigTypeTag(mod) == .Vector) { - const vec_len = rhs_ty.vectorLen(mod); - const bits_vec = self.builder.buildVectorSplat(vec_len, bits, ""); - const lhs_max_vec = self.builder.buildVectorSplat(vec_len, lhs_max, ""); - const in_range = self.builder.buildICmp(.ULT, rhs, bits_vec, ""); - return self.builder.buildSelect(in_range, result, lhs_max_vec, ""); - } else { - const in_range = self.builder.buildICmp(.ULT, rhs, bits, ""); - return self.builder.buildSelect(in_range, result, lhs_max, ""); - } + const lhs_llvm_ty = try o.lowerType(lhs_ty); + const lhs_scalar_llvm_ty = lhs_llvm_ty.scalarType(&o.builder); + const bits = try o.builder.splatValue( + lhs_llvm_ty, + try o.builder.intConst(lhs_scalar_llvm_ty, lhs_bits), + ); + const lhs_max = try o.builder.splatValue( + lhs_llvm_ty, + try o.builder.intConst(lhs_scalar_llvm_ty, -1), + ); + const in_range = try self.wip.icmp(.ult, rhs, bits, ""); + return self.wip.select(in_range, result, lhs_max, ""); } - fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !?*llvm.Value { + fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -8165,63 +8120,41 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.typeOf(bin_op.lhs); - const rhs_ty = self.typeOf(bin_op.rhs); const lhs_scalar_ty = lhs_ty.scalarType(mod); - const rhs_scalar_ty = rhs_ty.scalarType(mod); - const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) - self.builder.buildZExt(rhs, (try o.lowerType(lhs_ty)).toLlvm(&o.builder), "") - else - rhs; + const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), ""); const is_signed_int = lhs_scalar_ty.isSignedInt(mod); - if (is_exact) { - if (is_signed_int) { - return self.builder.buildAShrExact(lhs, casted_rhs, ""); - } else { - return self.builder.buildLShrExact(lhs, casted_rhs, ""); - } - } else { - if (is_signed_int) { - return self.builder.buildAShr(lhs, casted_rhs, ""); - } else { - return self.builder.buildLShr(lhs, casted_rhs, ""); - } - } + return self.wip.bin(if (is_exact) + if (is_signed_int) .@"ashr exact" else .@"lshr exact" + else if (is_signed_int) .ashr else .lshr, lhs, casted_rhs, ""); } - fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const dest_ty = self.typeOfIndex(inst); - const dest_info = dest_ty.intInfo(mod); - const dest_llvm_ty = (try o.lowerType(dest_ty)).toLlvm(&o.builder); + const dest_llvm_ty = try o.lowerType(dest_ty); const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); const operand_info = operand_ty.intInfo(mod); - if (operand_info.bits < dest_info.bits) { - switch (operand_info.signedness) { - .signed => return self.builder.buildSExt(operand, dest_llvm_ty, ""), - .unsigned => return self.builder.buildZExt(operand, dest_llvm_ty, ""), - } - } else if (operand_info.bits > dest_info.bits) { - return self.builder.buildTrunc(operand, dest_llvm_ty, ""); - } else { - return operand; - } + return self.wip.conv(switch (operand_info.signedness) { + .signed => .signed, + .unsigned => .unsigned, + }, operand, dest_llvm_ty, ""); } - fn airTrunc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airTrunc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const dest_llvm_ty = (try o.lowerType(self.typeOfIndex(inst))).toLlvm(&o.builder); - return self.builder.buildTrunc(operand, dest_llvm_ty, ""); + const dest_llvm_ty = try o.lowerType(self.typeOfIndex(inst)); + return self.wip.cast(.trunc, operand, dest_llvm_ty, ""); } - fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; @@ -8233,8 +8166,7 @@ pub const FuncGen = struct { const src_bits = operand_ty.floatBits(target); if (intrinsicsAllowed(dest_ty, target) and intrinsicsAllowed(operand_ty, target)) { - const dest_llvm_ty = (try o.lowerType(dest_ty)).toLlvm(&o.builder); - return self.builder.buildFPTrunc(operand, dest_llvm_ty, ""); + return self.wip.cast(.fptrunc, operand, try o.lowerType(dest_ty), ""); } else { const operand_llvm_ty = try o.lowerType(operand_ty); const dest_llvm_ty = try o.lowerType(dest_ty); @@ -8243,14 +8175,21 @@ pub const FuncGen = struct { compilerRtFloatAbbrev(src_bits), compilerRtFloatAbbrev(dest_bits), }); - const params = [1]*llvm.Value{operand}; const llvm_fn = try self.getLibcFunction(fn_name, &.{operand_llvm_ty}, dest_llvm_ty); - - return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .C, .Auto, ""); + const params = [1]*llvm.Value{operand.toLlvm(&self.wip)}; + return (try self.wip.unimplemented(dest_llvm_ty, "")).finish(self.builder.buildCall( + llvm_fn.typeOf(&o.builder).toLlvm(&o.builder), + llvm_fn.toLlvm(&o.builder), + ¶ms, + params.len, + .C, + .Auto, + "", + ), &self.wip); } } - fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; @@ -8262,8 +8201,7 @@ pub const FuncGen = struct { const src_bits = operand_ty.floatBits(target); if (intrinsicsAllowed(dest_ty, target) and intrinsicsAllowed(operand_ty, target)) { - const dest_llvm_ty = (try o.lowerType(dest_ty)).toLlvm(&o.builder); - return self.builder.buildFPExt(operand, dest_llvm_ty, ""); + return self.wip.cast(.fpext, operand, try o.lowerType(dest_ty), ""); } else { const operand_llvm_ty = try o.lowerType(operand_ty); const dest_llvm_ty = try o.lowerType(dest_ty); @@ -8272,24 +8210,31 @@ pub const FuncGen = struct { compilerRtFloatAbbrev(src_bits), compilerRtFloatAbbrev(dest_bits), }); - const params = [1]*llvm.Value{operand}; const llvm_fn = try self.getLibcFunction(fn_name, &.{operand_llvm_ty}, dest_llvm_ty); - - return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .C, .Auto, ""); + const params = [1]*llvm.Value{operand.toLlvm(&self.wip)}; + return (try self.wip.unimplemented(dest_llvm_ty, "")).finish(self.builder.buildCall( + llvm_fn.typeOf(&o.builder).toLlvm(&o.builder), + llvm_fn.toLlvm(&o.builder), + ¶ms, + params.len, + .C, + .Auto, + "", + ), &self.wip); } } - fn airIntFromPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airIntFromPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const operand_ptr = self.sliceOrArrayPtr(operand, ptr_ty); - const dest_llvm_ty = (try o.lowerType(self.typeOfIndex(inst))).toLlvm(&o.builder); - return self.builder.buildPtrToInt(operand_ptr, dest_llvm_ty, ""); + const operand_ptr = try self.sliceOrArrayPtr(operand, ptr_ty); + const dest_llvm_ty = try o.lowerType(self.typeOfIndex(inst)); + return self.wip.cast(.ptrtoint, operand_ptr, dest_llvm_ty, ""); } - fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !*llvm.Value { + fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.typeOf(ty_op.operand); const inst_ty = self.typeOfIndex(inst); @@ -8297,26 +8242,26 @@ pub const FuncGen = struct { return self.bitCast(operand, operand_ty, inst_ty); } - fn bitCast(self: *FuncGen, operand: *llvm.Value, operand_ty: Type, inst_ty: Type) !*llvm.Value { + fn bitCast(self: *FuncGen, operand: Builder.Value, operand_ty: Type, inst_ty: Type) !Builder.Value { const o = self.dg.object; const mod = o.module; const operand_is_ref = isByRef(operand_ty, mod); const result_is_ref = isByRef(inst_ty, mod); - const llvm_dest_ty = (try o.lowerType(inst_ty)).toLlvm(&o.builder); + const llvm_dest_ty = try o.lowerType(inst_ty); if (operand_is_ref and result_is_ref) { // They are both pointers, so just return the same opaque pointer :) return operand; } - if (llvm_dest_ty.getTypeKind() == .Integer and - operand.typeOf().getTypeKind() == .Integer) + if (llvm_dest_ty.isInteger(&o.builder) and + operand.typeOfWip(&self.wip).isInteger(&o.builder)) { - return self.builder.buildZExtOrBitCast(operand, llvm_dest_ty, ""); + return self.wip.conv(.unsigned, operand, llvm_dest_ty, ""); } if (operand_ty.zigTypeTag(mod) == .Int and inst_ty.isPtrAtRuntime(mod)) { - return self.builder.buildIntToPtr(operand, llvm_dest_ty, ""); + return self.wip.cast(.inttoptr, operand, llvm_dest_ty, ""); } if (operand_ty.zigTypeTag(mod) == .Vector and inst_ty.zigTypeTag(mod) == .Array) { @@ -8324,108 +8269,97 @@ pub const FuncGen = struct { if (!result_is_ref) { return self.dg.todo("implement bitcast vector to non-ref array", .{}); } - const array_ptr = try self.buildAlloca(llvm_dest_ty, null); + const array_ptr = try self.buildAlloca(llvm_dest_ty, .default); const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8; if (bitcast_ok) { - const llvm_store = self.builder.buildStore(operand, array_ptr); - llvm_store.setAlignment(inst_ty.abiAlignment(mod)); + const alignment = Builder.Alignment.fromByteUnits(inst_ty.abiAlignment(mod)); + _ = try self.wip.store(.normal, operand, array_ptr, alignment); } else { // If the ABI size of the element type is not evenly divisible by size in bits; // a simple bitcast will not work, and we fall back to extractelement. const llvm_usize = try o.lowerType(Type.usize); - const zero = try o.builder.intConst(llvm_usize, 0); + const usize_zero = try o.builder.intValue(llvm_usize, 0); const vector_len = operand_ty.arrayLen(mod); var i: u64 = 0; while (i < vector_len) : (i += 1) { - const index_usize = try o.builder.intConst(llvm_usize, i); - const index_u32 = try o.builder.intConst(.i32, i); - const indexes: [2]*llvm.Value = .{ - zero.toLlvm(&o.builder), - index_usize.toLlvm(&o.builder), - }; - const elem_ptr = self.builder.buildInBoundsGEP(llvm_dest_ty, array_ptr, &indexes, indexes.len, ""); - const elem = self.builder.buildExtractElement(operand, index_u32.toLlvm(&o.builder), ""); - _ = self.builder.buildStore(elem, elem_ptr); + const elem_ptr = try self.wip.gep(.inbounds, llvm_dest_ty, array_ptr, &.{ + usize_zero, try o.builder.intValue(llvm_usize, i), + }, ""); + const elem = + try self.wip.extractElement(operand, try o.builder.intValue(.i32, i), ""); + _ = try self.wip.store(.normal, elem, elem_ptr, .default); } } return array_ptr; } else if (operand_ty.zigTypeTag(mod) == .Array and inst_ty.zigTypeTag(mod) == .Vector) { const elem_ty = operand_ty.childType(mod); - const llvm_vector_ty = (try o.lowerType(inst_ty)).toLlvm(&o.builder); - if (!operand_is_ref) { - return self.dg.todo("implement bitcast non-ref array to vector", .{}); - } + const llvm_vector_ty = try o.lowerType(inst_ty); + if (!operand_is_ref) return self.dg.todo("implement bitcast non-ref array to vector", .{}); const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8; if (bitcast_ok) { - const vector = self.builder.buildLoad(llvm_vector_ty, operand, ""); // The array is aligned to the element's alignment, while the vector might have a completely // different alignment. This means we need to enforce the alignment of this load. - vector.setAlignment(elem_ty.abiAlignment(mod)); - return vector; + const alignment = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod)); + return self.wip.load(.normal, llvm_vector_ty, operand, alignment, ""); } else { // If the ABI size of the element type is not evenly divisible by size in bits; // a simple bitcast will not work, and we fall back to extractelement. - const array_llvm_ty = (try o.lowerType(operand_ty)).toLlvm(&o.builder); - const elem_llvm_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); + const array_llvm_ty = try o.lowerType(operand_ty); + const elem_llvm_ty = try o.lowerType(elem_ty); const llvm_usize = try o.lowerType(Type.usize); - const zero = try o.builder.intConst(llvm_usize, 0); + const usize_zero = try o.builder.intValue(llvm_usize, 0); const vector_len = operand_ty.arrayLen(mod); - var vector = llvm_vector_ty.getUndef(); + var vector = try o.builder.poisonValue(llvm_vector_ty); var i: u64 = 0; while (i < vector_len) : (i += 1) { - const index_usize = try o.builder.intConst(llvm_usize, i); - const index_u32 = try o.builder.intConst(.i32, i); - const indexes: [2]*llvm.Value = .{ - zero.toLlvm(&o.builder), - index_usize.toLlvm(&o.builder), - }; - const elem_ptr = self.builder.buildInBoundsGEP(array_llvm_ty, operand, &indexes, indexes.len, ""); - const elem = self.builder.buildLoad(elem_llvm_ty, elem_ptr, ""); - vector = self.builder.buildInsertElement(vector, elem, index_u32.toLlvm(&o.builder), ""); + const elem_ptr = try self.wip.gep(.inbounds, array_llvm_ty, operand, &.{ + usize_zero, try o.builder.intValue(llvm_usize, i), + }, ""); + const elem = try self.wip.load(.normal, elem_llvm_ty, elem_ptr, .default, ""); + vector = + try self.wip.insertElement(vector, elem, try o.builder.intValue(.i32, i), ""); } - return vector; } } if (operand_is_ref) { - const load_inst = self.builder.buildLoad(llvm_dest_ty, operand, ""); - load_inst.setAlignment(operand_ty.abiAlignment(mod)); - return load_inst; + const alignment = Builder.Alignment.fromByteUnits(operand_ty.abiAlignment(mod)); + return self.wip.load(.normal, llvm_dest_ty, operand, alignment, ""); } if (result_is_ref) { - const alignment = @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)); + const alignment = Builder.Alignment.fromByteUnits( + @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)), + ); const result_ptr = try self.buildAlloca(llvm_dest_ty, alignment); - const store_inst = self.builder.buildStore(operand, result_ptr); - store_inst.setAlignment(alignment); + _ = try self.wip.store(.normal, operand, result_ptr, alignment); return result_ptr; } - if (llvm_dest_ty.getTypeKind() == .Struct) { + if (llvm_dest_ty.isStruct(&o.builder)) { // Both our operand and our result are values, not pointers, // but LLVM won't let us bitcast struct values. // Therefore, we store operand to alloca, then load for result. - const alignment = @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)); + const alignment = Builder.Alignment.fromByteUnits( + @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)), + ); const result_ptr = try self.buildAlloca(llvm_dest_ty, alignment); - const store_inst = self.builder.buildStore(operand, result_ptr); - store_inst.setAlignment(alignment); - const load_inst = self.builder.buildLoad(llvm_dest_ty, result_ptr, ""); - load_inst.setAlignment(alignment); - return load_inst; + _ = try self.wip.store(.normal, operand, result_ptr, alignment); + return self.wip.load(.normal, llvm_dest_ty, result_ptr, alignment, ""); } - return self.builder.buildBitCast(operand, llvm_dest_ty, ""); + return self.wip.cast(.bitcast, operand, llvm_dest_ty, ""); } - fn airIntFromBool(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airIntFromBool(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); return operand; } - fn airArg(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airArg(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const arg_val = self.args[self.arg_index]; @@ -8433,9 +8367,7 @@ pub const FuncGen = struct { const inst_ty = self.typeOfIndex(inst); if (o.di_builder) |dib| { - if (needDbgVarWorkaround(o)) { - return arg_val; - } + if (needDbgVarWorkaround(o)) return arg_val; const src_index = self.air.instructions.items(.data)[inst].arg.src_index; const func_index = self.dg.decl.getOwnedFunctionIndex(); @@ -8450,62 +8382,64 @@ pub const FuncGen = struct { try o.lowerDebugType(inst_ty, .full), true, // always preserve 0, // flags - self.arg_index, // includes +1 because 0 is return type + @intCast(self.arg_index), // includes +1 because 0 is return type ); const debug_loc = llvm.getDebugLoc(lbrace_line, lbrace_col, self.di_scope.?, null); - const insert_block = self.builder.getInsertBlock(); + const insert_block = self.wip.cursor.block.toLlvm(&self.wip); if (isByRef(inst_ty, mod)) { - _ = dib.insertDeclareAtEnd(arg_val, di_local_var, debug_loc, insert_block); + _ = dib.insertDeclareAtEnd(arg_val.toLlvm(&self.wip), di_local_var, debug_loc, insert_block); } else if (o.module.comp.bin_file.options.optimize_mode == .Debug) { - const alignment = inst_ty.abiAlignment(mod); - const alloca = try self.buildAlloca(arg_val.typeOf(), alignment); - const store_inst = self.builder.buildStore(arg_val, alloca); - store_inst.setAlignment(alignment); - _ = dib.insertDeclareAtEnd(alloca, di_local_var, debug_loc, insert_block); + const alignment = Builder.Alignment.fromByteUnits(inst_ty.abiAlignment(mod)); + const alloca = try self.buildAlloca(arg_val.typeOfWip(&self.wip), alignment); + _ = try self.wip.store(.normal, arg_val, alloca, alignment); + _ = dib.insertDeclareAtEnd(alloca.toLlvm(&self.wip), di_local_var, debug_loc, insert_block); } else { - _ = dib.insertDbgValueIntrinsicAtEnd(arg_val, di_local_var, debug_loc, insert_block); + _ = dib.insertDbgValueIntrinsicAtEnd(arg_val.toLlvm(&self.wip), di_local_var, debug_loc, insert_block); } } return arg_val; } - fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ptr_ty = self.typeOfIndex(inst); const pointee_type = ptr_ty.childType(mod); if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) - return (try o.lowerPtrToVoid(ptr_ty)).toLlvm(&o.builder); + return (try o.lowerPtrToVoid(ptr_ty)).toValue(); - const pointee_llvm_ty = (try o.lowerType(pointee_type)).toLlvm(&o.builder); - const alignment = ptr_ty.ptrAlignment(mod); + const pointee_llvm_ty = try o.lowerType(pointee_type); + const alignment = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod)); return self.buildAlloca(pointee_llvm_ty, alignment); } - fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ptr_ty = self.typeOfIndex(inst); const ret_ty = ptr_ty.childType(mod); if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) - return (try o.lowerPtrToVoid(ptr_ty)).toLlvm(&o.builder); - if (self.ret_ptr) |ret_ptr| return ret_ptr; - const ret_llvm_ty = (try o.lowerType(ret_ty)).toLlvm(&o.builder); - return self.buildAlloca(ret_llvm_ty, ptr_ty.ptrAlignment(mod)); + return (try o.lowerPtrToVoid(ptr_ty)).toValue(); + if (self.ret_ptr != .none) return self.ret_ptr; + const ret_llvm_ty = try o.lowerType(ret_ty); + const alignment = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod)); + return self.buildAlloca(ret_llvm_ty, alignment); } /// Use this instead of builder.buildAlloca, because this function makes sure to /// put the alloca instruction at the top of the function! - fn buildAlloca(self: *FuncGen, llvm_ty: *llvm.Type, alignment: ?c_uint) Allocator.Error!*llvm.Value { - const o = self.dg.object; - const mod = o.module; - const target = mod.getTarget(); - return o.buildAllocaInner(&self.wip, self.builder, self.llvm_func, self.di_scope != null, llvm_ty, alignment, target); + fn buildAlloca( + self: *FuncGen, + llvm_ty: Builder.Type, + alignment: Builder.Alignment, + ) Allocator.Error!Builder.Value { + const target = self.dg.object.module.getTarget(); + return buildAllocaInner(&self.wip, self.di_scope != null, llvm_ty, alignment, target); } - fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value { + fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -8519,23 +8453,29 @@ pub const FuncGen = struct { // extra information to LLVM. However, safety makes the difference between using // 0xaa or actual undefined for the fill byte. const fill_byte = if (safety) - (try o.builder.intConst(.i8, 0xaa)).toLlvm(&o.builder) + try o.builder.intConst(.i8, 0xaa) else - Builder.Type.i8.toLlvm(&o.builder).getUndef(); + try o.builder.undefConst(.i8); const operand_size = operand_ty.abiSize(mod); const usize_ty = try o.lowerType(Type.usize); - const len = (try o.builder.intConst(usize_ty, operand_size)).toLlvm(&o.builder); - const dest_ptr_align = ptr_ty.ptrAlignment(mod); - _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr(mod)); + const len = try o.builder.intValue(usize_ty, operand_size); + const dest_ptr_align = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod)); + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemSet( + dest_ptr.toLlvm(&self.wip), + fill_byte.toLlvm(&o.builder), + len.toLlvm(&self.wip), + @intCast(dest_ptr_align.toByteUnits() orelse 0), + ptr_ty.isVolatilePtr(mod), + ), &self.wip); if (safety and mod.comp.bin_file.options.valgrind) { try self.valgrindMarkUndef(dest_ptr, len); } - return null; + return .none; } const src_operand = try self.resolveInst(bin_op.rhs); - try self.store(dest_ptr, ptr_ty, src_operand, .NotAtomic); - return null; + try self.store(dest_ptr, ptr_ty, src_operand, .none); + return .none; } /// As an optimization, we want to avoid unnecessary copies of isByRef=true @@ -8560,7 +8500,7 @@ pub const FuncGen = struct { return false; } - fn airLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + fn airLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = fg.dg.object; const mod = o.module; const inst = body_tail[0]; @@ -8577,22 +8517,40 @@ pub const FuncGen = struct { return fg.load(ptr, ptr_ty); } - fn airTrap(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airTrap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; + const o = self.dg.object; const llvm_fn = try self.getIntrinsic("llvm.trap", &.{}); - _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, undefined, 0, .Cold, .Auto, ""); - _ = self.builder.buildUnreachable(); - return null; + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCall( + (try o.builder.fnType(.void, &.{}, .normal)).toLlvm(&o.builder), + llvm_fn, + undefined, + 0, + .Cold, + .Auto, + "", + ), &self.wip); + _ = try self.wip.@"unreachable"(); + return .none; } - fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; + const o = self.dg.object; const llvm_fn = try self.getIntrinsic("llvm.debugtrap", &.{}); - _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, undefined, 0, .C, .Auto, ""); - return null; + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCall( + (try o.builder.fnType(.void, &.{}, .normal)).toLlvm(&o.builder), + llvm_fn, + undefined, + 0, + .C, + .Auto, + "", + ), &self.wip); + return .none; } - fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; const o = self.dg.object; const mod = o.module; @@ -8600,18 +8558,26 @@ pub const FuncGen = struct { const target = mod.getTarget(); if (!target_util.supportsReturnAddress(target)) { // https://github.com/ziglang/zig/issues/11946 - return (try o.builder.intConst(llvm_usize, 0)).toLlvm(&o.builder); + return o.builder.intValue(llvm_usize, 0); } const llvm_fn = try self.getIntrinsic("llvm.returnaddress", &.{}); const params = [_]*llvm.Value{ (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), }; - const ptr_val = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .Fast, .Auto, ""); - return self.builder.buildPtrToInt(ptr_val, llvm_usize.toLlvm(&o.builder), ""); + const ptr_val = (try self.wip.unimplemented(.ptr, "")).finish(self.builder.buildCall( + (try o.builder.fnType(.ptr, &.{.i32}, .normal)).toLlvm(&o.builder), + llvm_fn, + ¶ms, + params.len, + .Fast, + .Auto, + "", + ), &self.wip); + return self.wip.cast(.ptrtoint, ptr_val, llvm_usize, ""); } - fn airFrameAddress(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airFrameAddress(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; const o = self.dg.object; const llvm_fn_name = "llvm.frameaddress.p0"; @@ -8619,24 +8585,34 @@ pub const FuncGen = struct { const fn_type = try o.builder.fnType(.ptr, &.{.i32}, .normal); break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type.toLlvm(&o.builder)); }; + const llvm_fn_ty = try o.builder.fnType(.ptr, &.{.i32}, .normal); const params = [_]*llvm.Value{ (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), }; - const ptr_val = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .Fast, .Auto, ""); - const llvm_usize = (try o.lowerType(Type.usize)).toLlvm(&o.builder); - return self.builder.buildPtrToInt(ptr_val, llvm_usize, ""); + const ptr_val = (try self.wip.unimplemented(llvm_fn_ty.functionReturn(&o.builder), "")).finish( + self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + llvm_fn, + ¶ms, + params.len, + .Fast, + .Auto, + "", + ), + &self.wip, + ); + return self.wip.cast(.ptrtoint, ptr_val, try o.lowerType(Type.usize), ""); } - fn airFence(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airFence(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const atomic_order = self.air.instructions.items(.data)[inst].fence; - const llvm_memory_order = toLlvmAtomicOrdering(atomic_order); - const single_threaded = llvm.Bool.fromBool(self.single_threaded); - _ = self.builder.buildFence(llvm_memory_order, single_threaded, ""); - return null; + const ordering = toLlvmAtomicOrdering(atomic_order); + _ = try self.wip.fence(self.sync_scope, ordering); + return .none; } - fn airCmpxchg(self: *FuncGen, inst: Air.Inst.Index, is_weak: bool) !?*llvm.Value { + fn airCmpxchg(self: *FuncGen, inst: Air.Inst.Index, is_weak: bool) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -8645,47 +8621,51 @@ pub const FuncGen = struct { var expected_value = try self.resolveInst(extra.expected_value); var new_value = try self.resolveInst(extra.new_value); const operand_ty = self.typeOf(extra.ptr).childType(mod); - const abi_ty = try o.getAtomicAbiType(operand_ty, false); - if (abi_ty != .none) { - const llvm_abi_ty = abi_ty.toLlvm(&o.builder); + const llvm_operand_ty = try o.lowerType(operand_ty); + const llvm_abi_ty = try o.getAtomicAbiType(operand_ty, false); + if (llvm_abi_ty != .none) { // operand needs widening and truncating - if (operand_ty.isSignedInt(mod)) { - expected_value = self.builder.buildSExt(expected_value, llvm_abi_ty, ""); - new_value = self.builder.buildSExt(new_value, llvm_abi_ty, ""); - } else { - expected_value = self.builder.buildZExt(expected_value, llvm_abi_ty, ""); - new_value = self.builder.buildZExt(new_value, llvm_abi_ty, ""); - } + const signedness: Builder.Function.Instruction.Cast.Signedness = + if (operand_ty.isSignedInt(mod)) .signed else .unsigned; + expected_value = try self.wip.conv(signedness, expected_value, llvm_abi_ty, ""); + new_value = try self.wip.conv(signedness, new_value, llvm_abi_ty, ""); } - const result = self.builder.buildAtomicCmpXchg( - ptr, - expected_value, - new_value, - toLlvmAtomicOrdering(extra.successOrder()), - toLlvmAtomicOrdering(extra.failureOrder()), - llvm.Bool.fromBool(self.single_threaded), + + const llvm_result_ty = try o.builder.structType(.normal, &.{ + if (llvm_abi_ty != .none) llvm_abi_ty else llvm_operand_ty, + .i1, + }); + const result = (try self.wip.unimplemented(llvm_result_ty, "")).finish( + self.builder.buildAtomicCmpXchg( + ptr.toLlvm(&self.wip), + expected_value.toLlvm(&self.wip), + new_value.toLlvm(&self.wip), + @enumFromInt(@intFromEnum(toLlvmAtomicOrdering(extra.successOrder()))), + @enumFromInt(@intFromEnum(toLlvmAtomicOrdering(extra.failureOrder()))), + llvm.Bool.fromBool(self.sync_scope == .singlethread), + ), + &self.wip, ); - result.setWeak(llvm.Bool.fromBool(is_weak)); + result.toLlvm(&self.wip).setWeak(llvm.Bool.fromBool(is_weak)); const optional_ty = self.typeOfIndex(inst); - var payload = self.builder.buildExtractValue(result, 0, ""); - if (abi_ty != .none) { - payload = self.builder.buildTrunc(payload, (try o.lowerType(operand_ty)).toLlvm(&o.builder), ""); - } - const success_bit = self.builder.buildExtractValue(result, 1, ""); + var payload = try self.wip.extractValue(result, &.{0}, ""); + if (llvm_abi_ty != .none) payload = try self.wip.cast(.trunc, payload, llvm_operand_ty, ""); + const success_bit = try self.wip.extractValue(result, &.{1}, ""); if (optional_ty.optionalReprIsPayload(mod)) { - return self.builder.buildSelect(success_bit, payload.typeOf().constNull(), payload, ""); + const zero = try o.builder.zeroInitValue(payload.typeOfWip(&self.wip)); + return self.wip.select(success_bit, zero, payload, ""); } comptime assert(optional_layout_version == 3); - const non_null_bit = self.builder.buildNot(success_bit, ""); + const non_null_bit = try self.wip.not(success_bit, ""); return buildOptional(self, optional_ty, payload, non_null_bit); } - fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; @@ -8698,121 +8678,146 @@ pub const FuncGen = struct { const is_float = operand_ty.isRuntimeFloat(); const op = toLlvmAtomicRmwBinOp(extra.op(), is_signed_int, is_float); const ordering = toLlvmAtomicOrdering(extra.ordering()); - const single_threaded = llvm.Bool.fromBool(self.single_threaded); - const abi_ty = try o.getAtomicAbiType(operand_ty, op == .Xchg); - if (abi_ty != .none) { - const llvm_abi_ty = abi_ty.toLlvm(&o.builder); + const single_threaded = llvm.Bool.fromBool(self.sync_scope == .singlethread); + const llvm_abi_ty = try o.getAtomicAbiType(operand_ty, op == .Xchg); + const llvm_operand_ty = try o.lowerType(operand_ty); + if (llvm_abi_ty != .none) { // operand needs widening and truncating or bitcasting. - const casted_operand = if (is_float) - self.builder.buildBitCast(operand, llvm_abi_ty, "") - else if (is_signed_int) - self.builder.buildSExt(operand, llvm_abi_ty, "") - else - self.builder.buildZExt(operand, llvm_abi_ty, ""); + const casted_operand = try self.wip.cast( + if (is_float) .bitcast else if (is_signed_int) .sext else .zext, + @enumFromInt(@intFromEnum(operand)), + llvm_abi_ty, + "", + ); - const uncasted_result = self.builder.buildAtomicRmw( - op, - ptr, - casted_operand, - ordering, - single_threaded, + const uncasted_result = (try self.wip.unimplemented(llvm_abi_ty, "")).finish( + self.builder.buildAtomicRmw( + op, + ptr.toLlvm(&self.wip), + casted_operand.toLlvm(&self.wip), + @enumFromInt(@intFromEnum(ordering)), + single_threaded, + ), + &self.wip, ); - const operand_llvm_ty = (try o.lowerType(operand_ty)).toLlvm(&o.builder); + if (is_float) { - return self.builder.buildBitCast(uncasted_result, operand_llvm_ty, ""); + return self.wip.cast(.bitcast, uncasted_result, llvm_operand_ty, ""); } else { - return self.builder.buildTrunc(uncasted_result, operand_llvm_ty, ""); + return self.wip.cast(.trunc, uncasted_result, llvm_operand_ty, ""); } } - if (operand.typeOf().getTypeKind() != .Pointer) { - return self.builder.buildAtomicRmw(op, ptr, operand, ordering, single_threaded); + if (!llvm_operand_ty.isPointer(&o.builder)) { + return (try self.wip.unimplemented(llvm_operand_ty, "")).finish( + self.builder.buildAtomicRmw( + op, + ptr.toLlvm(&self.wip), + operand.toLlvm(&self.wip), + @enumFromInt(@intFromEnum(ordering)), + single_threaded, + ), + &self.wip, + ); } // It's a pointer but we need to treat it as an int. - const usize_llvm_ty = (try o.lowerType(Type.usize)).toLlvm(&o.builder); - const casted_operand = self.builder.buildPtrToInt(operand, usize_llvm_ty, ""); - const uncasted_result = self.builder.buildAtomicRmw( - op, - ptr, - casted_operand, - ordering, - single_threaded, + const llvm_usize = try o.lowerType(Type.usize); + const casted_operand = try self.wip.cast(.ptrtoint, operand, llvm_usize, ""); + const uncasted_result = (try self.wip.unimplemented(llvm_usize, "")).finish( + self.builder.buildAtomicRmw( + op, + ptr.toLlvm(&self.wip), + casted_operand.toLlvm(&self.wip), + @enumFromInt(@intFromEnum(ordering)), + single_threaded, + ), + &self.wip, ); - const operand_llvm_ty = (try o.lowerType(operand_ty)).toLlvm(&o.builder); - return self.builder.buildIntToPtr(uncasted_result, operand_llvm_ty, ""); + return self.wip.cast(.inttoptr, uncasted_result, llvm_operand_ty, ""); } - fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const atomic_load = self.air.instructions.items(.data)[inst].atomic_load; const ptr = try self.resolveInst(atomic_load.ptr); const ptr_ty = self.typeOf(atomic_load.ptr); - const ptr_info = ptr_ty.ptrInfo(mod); - const elem_ty = ptr_info.child.toType(); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) - return null; + const info = ptr_ty.ptrInfo(mod); + const elem_ty = info.child.toType(); + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; const ordering = toLlvmAtomicOrdering(atomic_load.order); - const abi_ty = try o.getAtomicAbiType(elem_ty, false); - const ptr_alignment: u32 = @intCast(ptr_info.flags.alignment.toByteUnitsOptional() orelse - ptr_info.child.toType().abiAlignment(mod)); - const ptr_volatile = llvm.Bool.fromBool(ptr_info.flags.is_volatile); - const elem_llvm_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); - - if (abi_ty != .none) { - const llvm_abi_ty = abi_ty.toLlvm(&o.builder); + const llvm_abi_ty = try o.getAtomicAbiType(elem_ty, false); + const ptr_alignment = Builder.Alignment.fromByteUnits( + info.flags.alignment.toByteUnitsOptional() orelse info.child.toType().abiAlignment(mod), + ); + const ptr_kind: Builder.MemoryAccessKind = switch (info.flags.is_volatile) { + false => .normal, + true => .@"volatile", + }; + const elem_llvm_ty = try o.lowerType(elem_ty); + + if (llvm_abi_ty != .none) { // operand needs widening and truncating - const load_inst = self.builder.buildLoad(llvm_abi_ty, ptr, ""); - load_inst.setAlignment(ptr_alignment); - load_inst.setVolatile(ptr_volatile); - load_inst.setOrdering(ordering); - return self.builder.buildTrunc(load_inst, elem_llvm_ty, ""); + const loaded = try self.wip.loadAtomic( + ptr_kind, + llvm_abi_ty, + ptr, + self.sync_scope, + ordering, + ptr_alignment, + "", + ); + return self.wip.cast(.trunc, loaded, elem_llvm_ty, ""); } - const load_inst = self.builder.buildLoad(elem_llvm_ty, ptr, ""); - load_inst.setAlignment(ptr_alignment); - load_inst.setVolatile(ptr_volatile); - load_inst.setOrdering(ordering); - return load_inst; + return self.wip.loadAtomic( + ptr_kind, + elem_llvm_ty, + ptr, + self.sync_scope, + ordering, + ptr_alignment, + "", + ); } fn airAtomicStore( self: *FuncGen, inst: Air.Inst.Index, - ordering: llvm.AtomicOrdering, - ) !?*llvm.Value { + ordering: Builder.AtomicOrdering, + ) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(mod); - if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return null; + if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .none; const ptr = try self.resolveInst(bin_op.lhs); var element = try self.resolveInst(bin_op.rhs); - const abi_ty = try o.getAtomicAbiType(operand_ty, false); + const llvm_abi_ty = try o.getAtomicAbiType(operand_ty, false); - if (abi_ty != .none) { - const llvm_abi_ty = abi_ty.toLlvm(&o.builder); + if (llvm_abi_ty != .none) { // operand needs widening - if (operand_ty.isSignedInt(mod)) { - element = self.builder.buildSExt(element, llvm_abi_ty, ""); - } else { - element = self.builder.buildZExt(element, llvm_abi_ty, ""); - } + element = try self.wip.conv( + if (operand_ty.isSignedInt(mod)) .signed else .unsigned, + element, + llvm_abi_ty, + "", + ); } try self.store(ptr, ptr_ty, element, ordering); - return null; + return .none; } - fn airMemset(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value { + fn airMemset(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dest_slice = try self.resolveInst(bin_op.lhs); const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = self.typeOf(bin_op.rhs); - const dest_ptr_align = ptr_ty.ptrAlignment(mod); - const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty); + const dest_ptr_align = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod)); + const dest_ptr = try self.sliceOrArrayPtr(dest_slice, ptr_ty); const is_volatile = ptr_ty.isVolatilePtr(mod); // Any WebAssembly runtime will trap when the destination pointer is out-of-bounds, regardless @@ -8829,20 +8834,26 @@ pub const FuncGen = struct { // extra information to LLVM. However, safety makes the difference between using // 0xaa or actual undefined for the fill byte. const fill_byte = if (safety) - (try o.builder.intConst(.i8, 0xaa)).toLlvm(&o.builder) + try o.builder.intValue(.i8, 0xaa) else - Builder.Type.i8.toLlvm(&o.builder).getUndef(); + try o.builder.undefValue(.i8); const len = try self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); if (intrinsic_len0_traps) { try self.safeWasmMemset(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); } else { - _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemSet( + dest_ptr.toLlvm(&self.wip), + fill_byte.toLlvm(&self.wip), + len.toLlvm(&self.wip), + @intCast(dest_ptr_align.toByteUnits() orelse 0), + is_volatile, + ), &self.wip); } if (safety and mod.comp.bin_file.options.valgrind) { try self.valgrindMarkUndef(dest_ptr, len); } - return null; + return .none; } // Test if the element value is compile-time known to be a @@ -8850,18 +8861,21 @@ pub const FuncGen = struct { // repeating byte pattern of 0 bytes. In such case, the memset // intrinsic can be used. if (try elem_val.hasRepeatedByteRepr(elem_ty, mod)) |byte_val| { - const fill_byte = try self.resolveValue(.{ - .ty = Type.u8, - .val = byte_val, - }); + const fill_byte = try self.resolveValue(.{ .ty = Type.u8, .val = byte_val }); const len = try self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); if (intrinsic_len0_traps) { - try self.safeWasmMemset(dest_ptr, fill_byte.toLlvm(&o.builder), len, dest_ptr_align, is_volatile); + try self.safeWasmMemset(dest_ptr, fill_byte.toValue(), len, dest_ptr_align, is_volatile); } else { - _ = self.builder.buildMemSet(dest_ptr, fill_byte.toLlvm(&o.builder), len, dest_ptr_align, is_volatile); + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemSet( + dest_ptr.toLlvm(&self.wip), + fill_byte.toLlvm(&o.builder), + len.toLlvm(&self.wip), + @intCast(dest_ptr_align.toByteUnits() orelse 0), + is_volatile, + ), &self.wip); } - return null; + return .none; } } @@ -8876,9 +8890,15 @@ pub const FuncGen = struct { if (intrinsic_len0_traps) { try self.safeWasmMemset(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); } else { - _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemSet( + dest_ptr.toLlvm(&self.wip), + fill_byte.toLlvm(&self.wip), + len.toLlvm(&self.wip), + @intCast(dest_ptr_align.toByteUnits() orelse 0), + is_volatile, + ), &self.wip); } - return null; + return .none; } // non-byte-sized element. lower with a loop. something like this: @@ -8886,96 +8906,92 @@ pub const FuncGen = struct { // entry: // ... // %end_ptr = getelementptr %ptr, %len - // br loop + // br %loop // loop: // %it_ptr = phi body %next_ptr, entry %ptr // %end = cmp eq %it_ptr, %end_ptr - // cond_br %end body, end + // br %end, %body, %end // body: // store %it_ptr, %value // %next_ptr = getelementptr %it_ptr, 1 - // br loop + // br %loop // end: // ... - const entry_block = self.builder.getInsertBlock(); - const loop_block = try self.wip.block("InlineMemsetLoop"); - const body_block = try self.wip.block("InlineMemsetBody"); - const end_block = try self.wip.block("InlineMemsetEnd"); + const entry_block = self.wip.cursor.block; + const loop_block = try self.wip.block(2, "InlineMemsetLoop"); + const body_block = try self.wip.block(1, "InlineMemsetBody"); + const end_block = try self.wip.block(1, "InlineMemsetEnd"); const usize_ty = try o.lowerType(Type.usize); const len = switch (ptr_ty.ptrSize(mod)) { - .Slice => self.builder.buildExtractValue(dest_slice, 1, ""), - .One => (try o.builder.intConst(usize_ty, ptr_ty.childType(mod).arrayLen(mod))).toLlvm(&o.builder), + .Slice => try self.wip.extractValue(dest_slice, &.{1}, ""), + .One => try o.builder.intValue(usize_ty, ptr_ty.childType(mod).arrayLen(mod)), .Many, .C => unreachable, }; - const elem_llvm_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); - const len_gep = [_]*llvm.Value{len}; - const end_ptr = self.builder.buildInBoundsGEP(elem_llvm_ty, dest_ptr, &len_gep, len_gep.len, ""); - _ = self.builder.buildBr(loop_block.toLlvm(&self.wip)); + const elem_llvm_ty = try o.lowerType(elem_ty); + const end_ptr = try self.wip.gep(.inbounds, elem_llvm_ty, dest_ptr, &.{len}, ""); + _ = try self.wip.br(loop_block); self.wip.cursor = .{ .block = loop_block }; - self.builder.positionBuilderAtEnd(loop_block.toLlvm(&self.wip)); - const it_ptr = self.builder.buildPhi(Builder.Type.ptr.toLlvm(&o.builder), ""); - const end = self.builder.buildICmp(.NE, it_ptr, end_ptr, ""); - _ = self.builder.buildCondBr(end, body_block.toLlvm(&self.wip), end_block.toLlvm(&self.wip)); + const it_ptr = try self.wip.phi(.ptr, ""); + const end = try self.wip.icmp(.ne, it_ptr.toValue(), end_ptr, ""); + _ = try self.wip.brCond(end, body_block, end_block); self.wip.cursor = .{ .block = body_block }; - self.builder.positionBuilderAtEnd(body_block.toLlvm(&self.wip)); const elem_abi_alignment = elem_ty.abiAlignment(mod); - const it_ptr_alignment = @min(elem_abi_alignment, dest_ptr_align); + const it_ptr_alignment = Builder.Alignment.fromByteUnits( + @min(elem_abi_alignment, dest_ptr_align.toByteUnits() orelse std.math.maxInt(u64)), + ); if (isByRef(elem_ty, mod)) { - _ = self.builder.buildMemCpy( - it_ptr, - it_ptr_alignment, - value, + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemCpy( + it_ptr.toValue().toLlvm(&self.wip), + @intCast(it_ptr_alignment.toByteUnits() orelse 0), + value.toLlvm(&self.wip), elem_abi_alignment, (try o.builder.intConst(usize_ty, elem_abi_size)).toLlvm(&o.builder), is_volatile, - ); - } else { - const store_inst = self.builder.buildStore(value, it_ptr); - store_inst.setAlignment(it_ptr_alignment); - store_inst.setVolatile(llvm.Bool.fromBool(is_volatile)); - } - const one_gep = [_]*llvm.Value{ - (try o.builder.intConst(usize_ty, 1)).toLlvm(&o.builder), - }; - const next_ptr = self.builder.buildInBoundsGEP(elem_llvm_ty, it_ptr, &one_gep, one_gep.len, ""); - _ = self.builder.buildBr(loop_block.toLlvm(&self.wip)); + ), &self.wip); + } else _ = try self.wip.store(switch (is_volatile) { + false => .normal, + true => .@"volatile", + }, value, it_ptr.toValue(), it_ptr_alignment); + const next_ptr = try self.wip.gep(.inbounds, elem_llvm_ty, it_ptr.toValue(), &.{ + try o.builder.intValue(usize_ty, 1), + }, ""); + _ = try self.wip.br(loop_block); self.wip.cursor = .{ .block = end_block }; - self.builder.positionBuilderAtEnd(end_block.toLlvm(&self.wip)); - - const incoming_values: [2]*llvm.Value = .{ next_ptr, dest_ptr }; - const incoming_blocks: [2]*llvm.BasicBlock = .{ body_block.toLlvm(&self.wip), entry_block }; - it_ptr.addIncoming(&incoming_values, &incoming_blocks, 2); - - return null; + try it_ptr.finish(&.{ next_ptr, dest_ptr }, &.{ body_block, entry_block }, &self.wip); + return .none; } fn safeWasmMemset( self: *FuncGen, - dest_ptr: *llvm.Value, - fill_byte: *llvm.Value, - len: *llvm.Value, - dest_ptr_align: u32, + dest_ptr: Builder.Value, + fill_byte: Builder.Value, + len: Builder.Value, + dest_ptr_align: Builder.Alignment, is_volatile: bool, ) !void { const o = self.dg.object; const llvm_usize_ty = try o.lowerType(Type.usize); - const cond = try self.cmp(len, (try o.builder.intConst(llvm_usize_ty, 0)).toLlvm(&o.builder), Type.usize, .neq); - const memset_block = try self.wip.block("MemsetTrapSkip"); - const end_block = try self.wip.block("MemsetTrapEnd"); - _ = self.builder.buildCondBr(cond, memset_block.toLlvm(&self.wip), end_block.toLlvm(&self.wip)); + const cond = try self.cmp(len, try o.builder.intValue(llvm_usize_ty, 0), Type.usize, .neq); + const memset_block = try self.wip.block(1, "MemsetTrapSkip"); + const end_block = try self.wip.block(2, "MemsetTrapEnd"); + _ = try self.wip.brCond(cond, memset_block, end_block); self.wip.cursor = .{ .block = memset_block }; - self.builder.positionBuilderAtEnd(memset_block.toLlvm(&self.wip)); - _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); - _ = self.builder.buildBr(end_block.toLlvm(&self.wip)); + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemSet( + dest_ptr.toLlvm(&self.wip), + fill_byte.toLlvm(&self.wip), + len.toLlvm(&self.wip), + @intCast(dest_ptr_align.toByteUnits() orelse 0), + is_volatile, + ), &self.wip); + _ = try self.wip.br(end_block); self.wip.cursor = .{ .block = end_block }; - self.builder.positionBuilderAtEnd(end_block.toLlvm(&self.wip)); } - fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -8983,9 +8999,9 @@ pub const FuncGen = struct { const dest_ptr_ty = self.typeOf(bin_op.lhs); const src_slice = try self.resolveInst(bin_op.rhs); const src_ptr_ty = self.typeOf(bin_op.rhs); - const src_ptr = self.sliceOrArrayPtr(src_slice, src_ptr_ty); + const src_ptr = try self.sliceOrArrayPtr(src_slice, src_ptr_ty); const len = try self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty); - const dest_ptr = self.sliceOrArrayPtr(dest_slice, dest_ptr_ty); + const dest_ptr = try self.sliceOrArrayPtr(dest_slice, dest_ptr_ty); const is_volatile = src_ptr_ty.isVolatilePtr(mod) or dest_ptr_ty.isVolatilePtr(mod); // When bulk-memory is enabled, this will be lowered to WebAssembly's memory.copy instruction. @@ -8997,86 +9013,81 @@ pub const FuncGen = struct { std.Target.wasm.featureSetHas(o.target.cpu.features, .bulk_memory) and dest_ptr_ty.isSlice(mod)) { - const llvm_usize_ty = try o.lowerType(Type.usize); - const cond = try self.cmp(len, (try o.builder.intConst(llvm_usize_ty, 0)).toLlvm(&o.builder), Type.usize, .neq); - const memcpy_block = try self.wip.block("MemcpyTrapSkip"); - const end_block = try self.wip.block("MemcpyTrapEnd"); - _ = self.builder.buildCondBr(cond, memcpy_block.toLlvm(&self.wip), end_block.toLlvm(&self.wip)); + const zero_usize = try o.builder.intValue(try o.lowerType(Type.usize), 0); + const cond = try self.cmp(len, zero_usize, Type.usize, .neq); + const memcpy_block = try self.wip.block(1, "MemcpyTrapSkip"); + const end_block = try self.wip.block(2, "MemcpyTrapEnd"); + _ = try self.wip.brCond(cond, memcpy_block, end_block); self.wip.cursor = .{ .block = memcpy_block }; - self.builder.positionBuilderAtEnd(memcpy_block.toLlvm(&self.wip)); - _ = self.builder.buildMemCpy( - dest_ptr, + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemCpy( + dest_ptr.toLlvm(&self.wip), dest_ptr_ty.ptrAlignment(mod), - src_ptr, + src_ptr.toLlvm(&self.wip), src_ptr_ty.ptrAlignment(mod), - len, + len.toLlvm(&self.wip), is_volatile, - ); - _ = self.builder.buildBr(end_block.toLlvm(&self.wip)); + ), &self.wip); + _ = try self.wip.br(end_block); self.wip.cursor = .{ .block = end_block }; - self.builder.positionBuilderAtEnd(end_block.toLlvm(&self.wip)); - return null; + return .none; } - _ = self.builder.buildMemCpy( - dest_ptr, + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemCpy( + dest_ptr.toLlvm(&self.wip), dest_ptr_ty.ptrAlignment(mod), - src_ptr, + src_ptr.toLlvm(&self.wip), src_ptr_ty.ptrAlignment(mod), - len, + len.toLlvm(&self.wip), is_volatile, - ); - return null; + ), &self.wip); + return .none; } - fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const un_ty = self.typeOf(bin_op.lhs).childType(mod); const layout = un_ty.unionGetLayout(mod); - if (layout.tag_size == 0) return null; + if (layout.tag_size == 0) return .none; const union_ptr = try self.resolveInst(bin_op.lhs); const new_tag = try self.resolveInst(bin_op.rhs); if (layout.payload_size == 0) { // TODO alignment on this store - _ = self.builder.buildStore(new_tag, union_ptr); - return null; + _ = try self.wip.store(.normal, new_tag, union_ptr, .default); + return .none; } - const un_llvm_ty = (try o.lowerType(un_ty)).toLlvm(&o.builder); const tag_index = @intFromBool(layout.tag_align < layout.payload_align); - const tag_field_ptr = self.builder.buildStructGEP(un_llvm_ty, union_ptr, tag_index, ""); + const tag_field_ptr = try self.wip.gepStruct(try o.lowerType(un_ty), union_ptr, tag_index, ""); // TODO alignment on this store - _ = self.builder.buildStore(new_tag, tag_field_ptr); - return null; + _ = try self.wip.store(.normal, new_tag, tag_field_ptr, .default); + return .none; } - fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const un_ty = self.typeOf(ty_op.operand); const layout = un_ty.unionGetLayout(mod); - if (layout.tag_size == 0) return null; + if (layout.tag_size == 0) return .none; const union_handle = try self.resolveInst(ty_op.operand); if (isByRef(un_ty, mod)) { - const llvm_un_ty = (try o.lowerType(un_ty)).toLlvm(&o.builder); - if (layout.payload_size == 0) { - return self.builder.buildLoad(llvm_un_ty, union_handle, ""); - } + const llvm_un_ty = try o.lowerType(un_ty); + if (layout.payload_size == 0) + return self.wip.load(.normal, llvm_un_ty, union_handle, .default, ""); const tag_index = @intFromBool(layout.tag_align < layout.payload_align); - const tag_field_ptr = self.builder.buildStructGEP(llvm_un_ty, union_handle, tag_index, ""); - return self.builder.buildLoad(llvm_un_ty.structGetTypeAtIndex(tag_index), tag_field_ptr, ""); + const tag_field_ptr = try self.wip.gepStruct(llvm_un_ty, union_handle, tag_index, ""); + const llvm_tag_ty = llvm_un_ty.structFields(&o.builder)[tag_index]; + return self.wip.load(.normal, llvm_tag_ty, tag_field_ptr, .default, ""); } else { - if (layout.payload_size == 0) { - return union_handle; - } + if (layout.payload_size == 0) return union_handle; const tag_index = @intFromBool(layout.tag_align < layout.payload_align); - return self.builder.buildExtractValue(union_handle, tag_index, ""); + return self.wip.extractValue(union_handle, &.{tag_index}, ""); } } - fn airUnaryOp(self: *FuncGen, inst: Air.Inst.Index, comptime op: FloatOp) !?*llvm.Value { + fn airUnaryOp(self: *FuncGen, inst: Air.Inst.Index, comptime op: FloatOp) !Builder.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const operand_ty = self.typeOf(un_op); @@ -9084,7 +9095,7 @@ pub const FuncGen = struct { return self.buildFloatOp(op, operand_ty, 1, .{operand}); } - fn airNeg(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airNeg(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const un_op = self.air.instructions.items(.data)[inst].un_op; @@ -9094,57 +9105,64 @@ pub const FuncGen = struct { return self.buildFloatOp(.neg, operand_ty, 1, .{operand}); } - fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { + fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !Builder.Value { const o = self.dg.object; - const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); - const fn_val = try self.getIntrinsic(llvm_fn_name, &.{try o.lowerType(operand_ty)}); + const llvm_operand_ty = try o.lowerType(operand_ty); + const llvm_fn_ty = try o.builder.fnType(llvm_operand_ty, &.{ llvm_operand_ty, .i1 }, .normal); + const fn_val = try self.getIntrinsic(llvm_fn_name, &.{llvm_operand_ty}); - const params = [_]*llvm.Value{ operand, Builder.Constant.false.toLlvm(&o.builder) }; - const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); + const params = [_]*llvm.Value{ + operand.toLlvm(&self.wip), + Builder.Constant.false.toLlvm(&o.builder), + }; + const wrong_size_result = (try self.wip.unimplemented(llvm_operand_ty, "")).finish( + self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + fn_val, + ¶ms, + params.len, + .C, + .Auto, + "", + ), + &self.wip, + ); const result_ty = self.typeOfIndex(inst); - const result_llvm_ty = (try o.lowerType(result_ty)).toLlvm(&o.builder); - - const bits = operand_ty.intInfo(mod).bits; - const result_bits = result_ty.intInfo(mod).bits; - if (bits > result_bits) { - return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); - } else if (bits < result_bits) { - return self.builder.buildZExt(wrong_size_result, result_llvm_ty, ""); - } else { - return wrong_size_result; - } + return self.wip.conv(.unsigned, wrong_size_result, try o.lowerType(result_ty), ""); } - fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { + fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !Builder.Value { const o = self.dg.object; - const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); - const params = [_]*llvm.Value{operand}; - const fn_val = try self.getIntrinsic(llvm_fn_name, &.{try o.lowerType(operand_ty)}); - - const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); + const llvm_operand_ty = try o.lowerType(operand_ty); + const llvm_fn_ty = try o.builder.fnType(llvm_operand_ty, &.{llvm_operand_ty}, .normal); + const fn_val = try self.getIntrinsic(llvm_fn_name, &.{llvm_operand_ty}); + + const params = [_]*llvm.Value{operand.toLlvm(&self.wip)}; + const wrong_size_result = (try self.wip.unimplemented(llvm_operand_ty, "")).finish( + self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + fn_val, + ¶ms, + params.len, + .C, + .Auto, + "", + ), + &self.wip, + ); const result_ty = self.typeOfIndex(inst); - const result_llvm_ty = (try o.lowerType(result_ty)).toLlvm(&o.builder); - - const bits = operand_ty.intInfo(mod).bits; - const result_bits = result_ty.intInfo(mod).bits; - if (bits > result_bits) { - return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); - } else if (bits < result_bits) { - return self.builder.buildZExt(wrong_size_result, result_llvm_ty, ""); - } else { - return wrong_size_result; - } + return self.wip.conv(.unsigned, wrong_size_result, try o.lowerType(result_ty), ""); } - fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { + fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; @@ -9153,7 +9171,7 @@ pub const FuncGen = struct { assert(bits % 8 == 0); var operand = try self.resolveInst(ty_op.operand); - var operand_llvm_ty = try o.lowerType(operand_ty); + var llvm_operand_ty = try o.lowerType(operand_ty); if (bits % 16 == 8) { // If not an even byte-multiple, we need zero-extend + shift-left 1 byte @@ -9161,35 +9179,39 @@ pub const FuncGen = struct { const scalar_ty = try o.builder.intType(@intCast(bits + 8)); if (operand_ty.zigTypeTag(mod) == .Vector) { const vec_len = operand_ty.vectorLen(mod); - operand_llvm_ty = try o.builder.vectorType(.normal, vec_len, scalar_ty); - } else operand_llvm_ty = scalar_ty; + llvm_operand_ty = try o.builder.vectorType(.normal, vec_len, scalar_ty); + } else llvm_operand_ty = scalar_ty; const shift_amt = - try o.builder.splatConst(operand_llvm_ty, try o.builder.intConst(scalar_ty, 8)); - const extended = self.builder.buildZExt(operand, operand_llvm_ty.toLlvm(&o.builder), ""); - operand = self.builder.buildShl(extended, shift_amt.toLlvm(&o.builder), ""); + try o.builder.splatValue(llvm_operand_ty, try o.builder.intConst(scalar_ty, 8)); + const extended = try self.wip.cast(.zext, operand, llvm_operand_ty, ""); + operand = try self.wip.bin(.shl, extended, shift_amt, ""); bits = bits + 8; } - const params = [_]*llvm.Value{operand}; - const fn_val = try self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); - - const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); + const llvm_fn_ty = try o.builder.fnType(llvm_operand_ty, &.{llvm_operand_ty}, .normal); + const fn_val = try self.getIntrinsic(llvm_fn_name, &.{llvm_operand_ty}); + + const params = [_]*llvm.Value{operand.toLlvm(&self.wip)}; + const wrong_size_result = (try self.wip.unimplemented(llvm_operand_ty, "")).finish( + self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + fn_val, + ¶ms, + params.len, + .C, + .Auto, + "", + ), + &self.wip, + ); const result_ty = self.typeOfIndex(inst); - const result_llvm_ty = (try o.lowerType(result_ty)).toLlvm(&o.builder); - const result_bits = result_ty.intInfo(mod).bits; - if (bits > result_bits) { - return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); - } else if (bits < result_bits) { - return self.builder.buildZExt(wrong_size_result, result_llvm_ty, ""); - } else { - return wrong_size_result; - } + return self.wip.conv(.unsigned, wrong_size_result, try o.lowerType(result_ty), ""); } - fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; @@ -9197,58 +9219,60 @@ pub const FuncGen = struct { const error_set_ty = self.air.getRefType(ty_op.ty); const names = error_set_ty.errorSetNames(mod); - const valid_block = try self.wip.block("Valid"); - const invalid_block = try self.wip.block("Invalid"); - const end_block = try self.wip.block("End"); - const switch_instr = self.builder.buildSwitch(operand, invalid_block.toLlvm(&self.wip), @intCast(names.len)); + const valid_block = try self.wip.block(@intCast(names.len), "Valid"); + const invalid_block = try self.wip.block(1, "Invalid"); + const end_block = try self.wip.block(2, "End"); + var wip_switch = try self.wip.@"switch"(operand, invalid_block, @intCast(names.len)); + defer wip_switch.finish(&self.wip); for (names) |name| { const err_int = mod.global_error_set.getIndex(name).?; - const this_tag_int_value = - try o.lowerValue((try mod.intValue(Type.err_int, err_int)).toIntern()); - switch_instr.addCase(this_tag_int_value.toLlvm(&o.builder), valid_block.toLlvm(&self.wip)); + const this_tag_int_value = try o.builder.intConst(Builder.Type.err_int, err_int); + try wip_switch.addCase(this_tag_int_value, valid_block, &self.wip); } self.wip.cursor = .{ .block = valid_block }; - self.builder.positionBuilderAtEnd(valid_block.toLlvm(&self.wip)); - _ = self.builder.buildBr(end_block.toLlvm(&self.wip)); + _ = try self.wip.br(end_block); self.wip.cursor = .{ .block = invalid_block }; - self.builder.positionBuilderAtEnd(invalid_block.toLlvm(&self.wip)); - _ = self.builder.buildBr(end_block.toLlvm(&self.wip)); + _ = try self.wip.br(end_block); self.wip.cursor = .{ .block = end_block }; - self.builder.positionBuilderAtEnd(end_block.toLlvm(&self.wip)); - - const incoming_values: [2]*llvm.Value = .{ - Builder.Constant.true.toLlvm(&o.builder), - Builder.Constant.false.toLlvm(&o.builder), - }; - const incoming_blocks: [2]*llvm.BasicBlock = .{ - valid_block.toLlvm(&self.wip), invalid_block.toLlvm(&self.wip), - }; - const phi_node = self.builder.buildPhi(Builder.Type.i1.toLlvm(&o.builder), ""); - phi_node.addIncoming(&incoming_values, &incoming_blocks, 2); - return phi_node; + const phi = try self.wip.phi(.i1, ""); + try phi.finish( + &.{ Builder.Constant.true.toValue(), Builder.Constant.false.toValue() }, + &.{ valid_block, invalid_block }, + &self.wip, + ); + return phi.toValue(); } - fn airIsNamedEnumValue(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airIsNamedEnumValue(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { + const o = self.dg.object; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const enum_ty = self.typeOf(un_op); const llvm_fn = try self.getIsNamedEnumValueFunction(enum_ty); - const params = [_]*llvm.Value{operand}; - return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .Fast, .Auto, ""); + const params = [_]*llvm.Value{operand.toLlvm(&self.wip)}; + return (try self.wip.unimplemented(.i1, "")).finish(self.builder.buildCall( + llvm_fn.typeOf(&o.builder).toLlvm(&o.builder), + llvm_fn.toLlvm(&o.builder), + ¶ms, + params.len, + .Fast, + .Auto, + "", + ), &self.wip); } - fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value { + fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !Builder.Function.Index { const o = self.dg.object; const mod = o.module; const enum_type = mod.intern_pool.indexToKey(enum_ty.toIntern()).enum_type; // TODO: detect when the type changes and re-emit this function. const gop = try o.named_enum_map.getOrPut(o.gpa, enum_type.decl); - if (gop.found_existing) return gop.value_ptr.toLlvm(&o.builder); + if (gop.found_existing) return gop.value_ptr.*; errdefer assert(o.named_enum_map.remove(enum_type.decl)); const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod); @@ -9256,9 +9280,9 @@ pub const FuncGen = struct { fqn.fmt(&mod.intern_pool), }); - const fn_type = try o.builder.fnType(.i1, &.{try o.lowerType( - enum_type.tag_ty.toType(), - )}, .normal); + const fn_type = try o.builder.fnType(.i1, &.{ + try o.lowerType(enum_type.tag_ty.toType()), + }, .normal); const fn_val = o.llvm_module.addFunction(llvm_fn_name.toSlice(&o.builder).?, fn_type.toLlvm(&o.builder)); fn_val.setLinkage(.Internal); fn_val.setFunctionCallConv(.Fast); @@ -9277,63 +9301,63 @@ pub const FuncGen = struct { try o.builder.functions.append(self.gpa, function); gop.value_ptr.* = global.kind.function; - const prev_block = self.builder.getInsertBlock(); - const prev_debug_location = self.builder.getCurrentDebugLocation2(); - defer { - self.builder.positionBuilderAtEnd(prev_block); - if (self.di_scope != null) { - self.builder.setCurrentDebugLocation2(prev_debug_location); - } - } - - var wip = Builder.WipFunction.init(&o.builder, global.kind.function); + var wip = try Builder.WipFunction.init(&o.builder, global.kind.function); defer wip.deinit(); + wip.cursor = .{ .block = try wip.block(0, "Entry") }; - const entry_block = try wip.block("Entry"); - wip.cursor = .{ .block = entry_block }; - self.builder.positionBuilderAtEnd(entry_block.toLlvm(&wip)); - self.builder.clearCurrentDebugLocation(); - - const named_block = try wip.block("Named"); - const unnamed_block = try wip.block("Unnamed"); - const tag_int_value = fn_val.getParam(0); - const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block.toLlvm(&wip), @intCast(enum_type.names.len)); + const named_block = try wip.block(@intCast(enum_type.names.len), "Named"); + const unnamed_block = try wip.block(1, "Unnamed"); + const tag_int_value = wip.arg(0); + var wip_switch = try wip.@"switch"(tag_int_value, unnamed_block, @intCast(enum_type.names.len)); + defer wip_switch.finish(&wip); for (0..enum_type.names.len) |field_index| { - const this_tag_int_value = - try o.lowerValue((try mod.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern()); - switch_instr.addCase(this_tag_int_value.toLlvm(&o.builder), named_block.toLlvm(&wip)); + const this_tag_int_value = try o.lowerValue( + (try mod.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(), + ); + try wip_switch.addCase(this_tag_int_value, named_block, &wip); } wip.cursor = .{ .block = named_block }; - self.builder.positionBuilderAtEnd(named_block.toLlvm(&wip)); - _ = self.builder.buildRet(Builder.Constant.true.toLlvm(&o.builder)); + _ = try wip.ret(Builder.Constant.true.toValue()); wip.cursor = .{ .block = unnamed_block }; - self.builder.positionBuilderAtEnd(unnamed_block.toLlvm(&wip)); - _ = self.builder.buildRet(Builder.Constant.false.toLlvm(&o.builder)); + _ = try wip.ret(Builder.Constant.false.toValue()); try wip.finish(); - return fn_val; + return global.kind.function; } - fn airTagName(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airTagName(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { + const o = self.dg.object; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const enum_ty = self.typeOf(un_op); const llvm_fn = try self.getEnumTagNameFunction(enum_ty); - const params = [_]*llvm.Value{operand}; - return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .Fast, .Auto, ""); + const llvm_fn_ty = llvm_fn.typeOf(&o.builder); + const params = [_]*llvm.Value{operand.toLlvm(&self.wip)}; + return (try self.wip.unimplemented(llvm_fn_ty.functionReturn(&o.builder), "")).finish( + self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + llvm_fn.toLlvm(&o.builder), + ¶ms, + params.len, + .Fast, + .Auto, + "", + ), + &self.wip, + ); } - fn getEnumTagNameFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value { + fn getEnumTagNameFunction(self: *FuncGen, enum_ty: Type) !Builder.Function.Index { const o = self.dg.object; const mod = o.module; const enum_type = mod.intern_pool.indexToKey(enum_ty.toIntern()).enum_type; // TODO: detect when the type changes and re-emit this function. const gop = try o.decl_map.getOrPut(o.gpa, enum_type.decl); - if (gop.found_existing) return gop.value_ptr.toLlvm(&o.builder); + if (gop.found_existing) return gop.value_ptr.ptrConst(&o.builder).kind.function; errdefer assert(o.decl_map.remove(enum_type.decl)); const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod); @@ -9362,26 +9386,15 @@ pub const FuncGen = struct { gop.value_ptr.* = try o.builder.addGlobal(llvm_fn_name, global); try o.builder.functions.append(self.gpa, function); - const prev_block = self.builder.getInsertBlock(); - const prev_debug_location = self.builder.getCurrentDebugLocation2(); - defer { - self.builder.positionBuilderAtEnd(prev_block); - if (self.di_scope != null) { - self.builder.setCurrentDebugLocation2(prev_debug_location); - } - } - - var wip = Builder.WipFunction.init(&o.builder, global.kind.function); + var wip = try Builder.WipFunction.init(&o.builder, global.kind.function); defer wip.deinit(); + wip.cursor = .{ .block = try wip.block(0, "Entry") }; - const entry_block = try wip.block("Entry"); - wip.cursor = .{ .block = entry_block }; - self.builder.positionBuilderAtEnd(entry_block.toLlvm(&wip)); - self.builder.clearCurrentDebugLocation(); - - const bad_value_block = try wip.block("BadValue"); - const tag_int_value = fn_val.getParam(0); - const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block.toLlvm(&wip), @intCast(enum_type.names.len)); + const bad_value_block = try wip.block(1, "BadValue"); + const tag_int_value = wip.arg(0); + var wip_switch = + try wip.@"switch"(tag_int_value, bad_value_block, @intCast(enum_type.names.len)); + defer wip_switch.finish(&wip); for (enum_type.names, 0..) |name_ip, field_index| { const name = try o.builder.string(mod.intern_pool.stringToSlice(name_ip)); @@ -9398,46 +9411,45 @@ pub const FuncGen = struct { .linkage = .private, .unnamed_addr = .unnamed_addr, .type = str_ty, - .alignment = comptime Builder.Alignment.fromByteUnits(1), .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, }; var str_variable = Builder.Variable{ .global = @enumFromInt(o.builder.globals.count()), .mutability = .constant, .init = str_init, + .alignment = comptime Builder.Alignment.fromByteUnits(1), }; try o.builder.llvm.globals.append(o.gpa, str_llvm_global); const global_index = try o.builder.addGlobal(.empty, str_global); try o.builder.variables.append(o.gpa, str_variable); - const slice_val = try o.builder.structConst(ret_ty, &.{ + const slice_val = try o.builder.structValue(ret_ty, &.{ global_index.toConst(), try o.builder.intConst(usize_ty, name.toSlice(&o.builder).?.len), }); - const return_block = try wip.block("Name"); - const this_tag_int_value = - try o.lowerValue((try mod.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern()); - switch_instr.addCase(this_tag_int_value.toLlvm(&o.builder), return_block.toLlvm(&wip)); + const return_block = try wip.block(1, "Name"); + const this_tag_int_value = try o.lowerValue( + (try mod.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(), + ); + try wip_switch.addCase(this_tag_int_value, return_block, &wip); wip.cursor = .{ .block = return_block }; - self.builder.positionBuilderAtEnd(return_block.toLlvm(&wip)); - _ = self.builder.buildRet(slice_val.toLlvm(&o.builder)); + _ = try wip.ret(slice_val); } wip.cursor = .{ .block = bad_value_block }; - self.builder.positionBuilderAtEnd(bad_value_block.toLlvm(&wip)); - _ = self.builder.buildUnreachable(); + _ = try wip.@"unreachable"(); try wip.finish(); - return fn_val; + return global.kind.function; } - fn getCmpLtErrorsLenFunction(self: *FuncGen) !*llvm.Value { + fn getCmpLtErrorsLenFunction(self: *FuncGen) !Builder.Function.Index { const o = self.dg.object; const name = try o.builder.string(lt_errors_fn_name); - if (o.builder.getGlobal(name)) |llvm_fn| return llvm_fn.toLlvm(&o.builder); + if (o.builder.getGlobal(name)) |llvm_fn| return llvm_fn.ptrConst(&o.builder).kind.function; // Function signature: fn (anyerror) bool @@ -9458,47 +9470,45 @@ pub const FuncGen = struct { }; try o.builder.llvm.globals.append(self.gpa, llvm_fn); - const global_index = try o.builder.addGlobal(name, global); + _ = try o.builder.addGlobal(name, global); try o.builder.functions.append(self.gpa, function); - return global_index.toLlvm(&o.builder); + return global.kind.function; } - fn airErrorName(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airErrorName(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const slice_ty = self.typeOfIndex(inst); - const slice_llvm_ty = (try o.lowerType(slice_ty)).toLlvm(&o.builder); + const slice_llvm_ty = try o.lowerType(slice_ty); const error_name_table_ptr = try self.getErrorNameTable(); - const ptr_slice_llvm_ty = self.context.pointerType(0); - const error_name_table = self.builder.buildLoad(ptr_slice_llvm_ty, error_name_table_ptr.toLlvm(&o.builder), ""); - const indices = [_]*llvm.Value{operand}; - const error_name_ptr = self.builder.buildInBoundsGEP(slice_llvm_ty, error_name_table, &indices, indices.len, ""); - return self.builder.buildLoad(slice_llvm_ty, error_name_ptr, ""); + const error_name_table = + try self.wip.load(.normal, .ptr, error_name_table_ptr.toValue(&o.builder), .default, ""); + const error_name_ptr = + try self.wip.gep(.inbounds, slice_llvm_ty, error_name_table, &.{operand}, ""); + return self.wip.load(.normal, slice_llvm_ty, error_name_ptr, .default, ""); } - fn airSplat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airSplat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const scalar = try self.resolveInst(ty_op.operand); const vector_ty = self.typeOfIndex(inst); - const len = vector_ty.vectorLen(mod); - return self.builder.buildVectorSplat(len, scalar, ""); + return self.wip.splatVector(try o.lowerType(vector_ty), scalar, ""); } - fn airSelect(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airSelect(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; const pred = try self.resolveInst(pl_op.operand); const a = try self.resolveInst(extra.lhs); const b = try self.resolveInst(extra.rhs); - return self.builder.buildSelect(pred, a, b, ""); + return self.wip.select(pred, a, b, ""); } - fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -9528,11 +9538,11 @@ pub const FuncGen = struct { } } - const llvm_mask_value = try o.builder.vectorConst( + const llvm_mask_value = try o.builder.vectorValue( try o.builder.vectorType(.normal, mask_len, .i32), values, ); - return self.builder.buildShuffleVector(a, b, llvm_mask_value.toLlvm(&o.builder), ""); + return self.wip.shuffleVector(a, b, llvm_mask_value, ""); } /// Reduce a vector by repeatedly applying `llvm_fn` to produce an accumulated result. @@ -9549,61 +9559,69 @@ pub const FuncGen = struct { /// fn buildReducedCall( self: *FuncGen, - llvm_fn: *llvm.Value, - operand_vector: *llvm.Value, + llvm_fn: Builder.Function.Index, + operand_vector: Builder.Value, vector_len: usize, - accum_init: *llvm.Value, - ) !*llvm.Value { + accum_init: Builder.Value, + ) !Builder.Value { const o = self.dg.object; const usize_ty = try o.lowerType(Type.usize); - const llvm_vector_len = try o.builder.intConst(usize_ty, vector_len); - const llvm_result_ty = accum_init.typeOf(); + const llvm_vector_len = try o.builder.intValue(usize_ty, vector_len); + const llvm_result_ty = accum_init.typeOfWip(&self.wip); // Allocate and initialize our mutable variables - const i_ptr = try self.buildAlloca(usize_ty.toLlvm(&o.builder), null); - _ = self.builder.buildStore((try o.builder.intConst(usize_ty, 0)).toLlvm(&o.builder), i_ptr); - const accum_ptr = try self.buildAlloca(llvm_result_ty, null); - _ = self.builder.buildStore(accum_init, accum_ptr); + const i_ptr = try self.buildAlloca(usize_ty, .default); + _ = try self.wip.store(.normal, try o.builder.intValue(usize_ty, 0), i_ptr, .default); + const accum_ptr = try self.buildAlloca(llvm_result_ty, .default); + _ = try self.wip.store(.normal, accum_init, accum_ptr, .default); // Setup the loop - const loop = try self.wip.block("ReduceLoop"); - const loop_exit = try self.wip.block("AfterReduce"); - _ = self.builder.buildBr(loop.toLlvm(&self.wip)); + const loop = try self.wip.block(2, "ReduceLoop"); + const loop_exit = try self.wip.block(1, "AfterReduce"); + _ = try self.wip.br(loop); { self.wip.cursor = .{ .block = loop }; - self.builder.positionBuilderAtEnd(loop.toLlvm(&self.wip)); // while (i < vec.len) - const i = self.builder.buildLoad(usize_ty.toLlvm(&o.builder), i_ptr, ""); - const cond = self.builder.buildICmp(.ULT, i, llvm_vector_len.toLlvm(&o.builder), ""); - const loop_then = try self.wip.block("ReduceLoopThen"); + const i = try self.wip.load(.normal, usize_ty, i_ptr, .default, ""); + const cond = try self.wip.icmp(.ult, i, llvm_vector_len, ""); + const loop_then = try self.wip.block(1, "ReduceLoopThen"); - _ = self.builder.buildCondBr(cond, loop_then.toLlvm(&self.wip), loop_exit.toLlvm(&self.wip)); + _ = try self.wip.brCond(cond, loop_then, loop_exit); { self.wip.cursor = .{ .block = loop_then }; - self.builder.positionBuilderAtEnd(loop_then.toLlvm(&self.wip)); // accum = f(accum, vec[i]); - const accum = self.builder.buildLoad(llvm_result_ty, accum_ptr, ""); - const element = self.builder.buildExtractElement(operand_vector, i, ""); - const params = [2]*llvm.Value{ accum, element }; - const new_accum = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .C, .Auto, ""); - _ = self.builder.buildStore(new_accum, accum_ptr); + const accum = try self.wip.load(.normal, llvm_result_ty, accum_ptr, .default, ""); + const element = try self.wip.extractElement(operand_vector, i, ""); + const params = [2]*llvm.Value{ accum.toLlvm(&self.wip), element.toLlvm(&self.wip) }; + const new_accum = (try self.wip.unimplemented(llvm_result_ty, "")).finish( + self.builder.buildCall( + llvm_fn.typeOf(&o.builder).toLlvm(&o.builder), + llvm_fn.toLlvm(&o.builder), + ¶ms, + params.len, + .C, + .Auto, + "", + ), + &self.wip, + ); + _ = try self.wip.store(.normal, new_accum, accum_ptr, .default); // i += 1 - const new_i = self.builder.buildAdd(i, (try o.builder.intConst(usize_ty, 1)).toLlvm(&o.builder), ""); - _ = self.builder.buildStore(new_i, i_ptr); - _ = self.builder.buildBr(loop.toLlvm(&self.wip)); + const new_i = try self.wip.bin(.add, i, try o.builder.intValue(usize_ty, 1), ""); + _ = try self.wip.store(.normal, new_i, i_ptr, .default); + _ = try self.wip.br(loop); } } self.wip.cursor = .{ .block = loop_exit }; - self.builder.positionBuilderAtEnd(loop_exit.toLlvm(&self.wip)); - return self.builder.buildLoad(llvm_result_ty, accum_ptr, ""); + return self.wip.load(.normal, llvm_result_ty, accum_ptr, .default, ""); } - fn airReduce(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airReduce(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const o = self.dg.object; const mod = o.module; @@ -9613,40 +9631,70 @@ pub const FuncGen = struct { const operand = try self.resolveInst(reduce.operand); const operand_ty = self.typeOf(reduce.operand); const scalar_ty = self.typeOfIndex(inst); + const llvm_scalar_ty = try o.lowerType(scalar_ty); switch (reduce.operation) { - .And => return self.builder.buildAndReduce(operand), - .Or => return self.builder.buildOrReduce(operand), - .Xor => return self.builder.buildXorReduce(operand), + .And => return (try self.wip.unimplemented(llvm_scalar_ty, "")) + .finish(self.builder.buildAndReduce(operand.toLlvm(&self.wip)), &self.wip), + .Or => return (try self.wip.unimplemented(llvm_scalar_ty, "")) + .finish(self.builder.buildOrReduce(operand.toLlvm(&self.wip)), &self.wip), + .Xor => return (try self.wip.unimplemented(llvm_scalar_ty, "")) + .finish(self.builder.buildXorReduce(operand.toLlvm(&self.wip)), &self.wip), .Min => switch (scalar_ty.zigTypeTag(mod)) { - .Int => return self.builder.buildIntMinReduce(operand, scalar_ty.isSignedInt(mod)), + .Int => return (try self.wip.unimplemented(llvm_scalar_ty, "")).finish( + self.builder.buildIntMinReduce( + operand.toLlvm(&self.wip), + scalar_ty.isSignedInt(mod), + ), + &self.wip, + ), .Float => if (intrinsicsAllowed(scalar_ty, target)) { - return self.builder.buildFPMinReduce(operand); + return (try self.wip.unimplemented(llvm_scalar_ty, "")) + .finish(self.builder.buildFPMinReduce(operand.toLlvm(&self.wip)), &self.wip); }, else => unreachable, }, .Max => switch (scalar_ty.zigTypeTag(mod)) { - .Int => return self.builder.buildIntMaxReduce(operand, scalar_ty.isSignedInt(mod)), + .Int => return (try self.wip.unimplemented(llvm_scalar_ty, "")).finish( + self.builder.buildIntMaxReduce( + operand.toLlvm(&self.wip), + scalar_ty.isSignedInt(mod), + ), + &self.wip, + ), .Float => if (intrinsicsAllowed(scalar_ty, target)) { - return self.builder.buildFPMaxReduce(operand); + return (try self.wip.unimplemented(llvm_scalar_ty, "")) + .finish(self.builder.buildFPMaxReduce(operand.toLlvm(&self.wip)), &self.wip); }, else => unreachable, }, .Add => switch (scalar_ty.zigTypeTag(mod)) { - .Int => return self.builder.buildAddReduce(operand), + .Int => return (try self.wip.unimplemented(llvm_scalar_ty, "")) + .finish(self.builder.buildAddReduce(operand.toLlvm(&self.wip)), &self.wip), .Float => if (intrinsicsAllowed(scalar_ty, target)) { - const scalar_llvm_ty = try o.lowerType(scalar_ty); - const neutral_value = try o.builder.fpConst(scalar_llvm_ty, -0.0); - return self.builder.buildFPAddReduce(neutral_value.toLlvm(&o.builder), operand); + const neutral_value = try o.builder.fpConst(llvm_scalar_ty, -0.0); + return (try self.wip.unimplemented(llvm_scalar_ty, "")).finish( + self.builder.buildFPAddReduce( + neutral_value.toLlvm(&o.builder), + operand.toLlvm(&self.wip), + ), + &self.wip, + ); }, else => unreachable, }, .Mul => switch (scalar_ty.zigTypeTag(mod)) { - .Int => return self.builder.buildMulReduce(operand), + .Int => return (try self.wip.unimplemented(llvm_scalar_ty, "")) + .finish(self.builder.buildMulReduce(operand.toLlvm(&self.wip)), &self.wip), .Float => if (intrinsicsAllowed(scalar_ty, target)) { - const scalar_llvm_ty = try o.lowerType(scalar_ty); - const neutral_value = try o.builder.fpConst(scalar_llvm_ty, 1.0); - return self.builder.buildFPMulReduce(neutral_value.toLlvm(&o.builder), operand); + const neutral_value = try o.builder.fpConst(llvm_scalar_ty, 1.0); + return (try self.wip.unimplemented(llvm_scalar_ty, "")).finish( + self.builder.buildFPMulReduce( + neutral_value.toLlvm(&o.builder), + operand.toLlvm(&self.wip), + ), + &self.wip, + ); }, else => unreachable, }, @@ -9671,34 +9719,54 @@ pub const FuncGen = struct { else => unreachable, }; - const param_llvm_ty = try o.lowerType(scalar_ty); - const libc_fn = try self.getLibcFunction(fn_name, &(.{param_llvm_ty} ** 2), param_llvm_ty); - const init_value = try o.lowerValue((try mod.floatValue(scalar_ty, switch (reduce.operation) { - .Min => std.math.nan(f32), - .Max => std.math.nan(f32), - .Add => -0.0, - .Mul => 1.0, + const libc_fn = + try self.getLibcFunction(fn_name, &.{ llvm_scalar_ty, llvm_scalar_ty }, llvm_scalar_ty); + const init_val = switch (llvm_scalar_ty) { + .i16 => try o.builder.intValue(.i16, @as(i16, @bitCast( + @as(f16, switch (reduce.operation) { + .Min, .Max => std.math.nan(f16), + .Add => -0.0, + .Mul => 1.0, + else => unreachable, + }), + ))), + .i80 => try o.builder.intValue(.i80, @as(i80, @bitCast( + @as(f80, switch (reduce.operation) { + .Min, .Max => std.math.nan(f80), + .Add => -0.0, + .Mul => 1.0, + else => unreachable, + }), + ))), + .i128 => try o.builder.intValue(.i128, @as(i128, @bitCast( + @as(f128, switch (reduce.operation) { + .Min, .Max => std.math.nan(f128), + .Add => -0.0, + .Mul => 1.0, + else => unreachable, + }), + ))), else => unreachable, - })).toIntern()); - return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(mod), init_value.toLlvm(&o.builder)); + }; + return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(mod), init_val); } - fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const result_ty = self.typeOfIndex(inst); const len: usize = @intCast(result_ty.arrayLen(mod)); const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]); - const llvm_result_ty = (try o.lowerType(result_ty)).toLlvm(&o.builder); + const llvm_result_ty = try o.lowerType(result_ty); switch (result_ty.zigTypeTag(mod)) { .Vector => { - var vector = llvm_result_ty.getUndef(); + var vector = try o.builder.poisonValue(llvm_result_ty); for (elements, 0..) |elem, i| { - const index_u32 = try o.builder.intConst(.i32, i); + const index_u32 = try o.builder.intValue(.i32, i); const llvm_elem = try self.resolveInst(elem); - vector = self.builder.buildInsertElement(vector, llvm_elem, index_u32.toLlvm(&o.builder), ""); + vector = try self.wip.insertElement(vector, llvm_elem, index_u32, ""); } return vector; }, @@ -9710,7 +9778,7 @@ pub const FuncGen = struct { const int_ty = try o.builder.intType(@intCast(big_bits)); const fields = struct_obj.fields.values(); comptime assert(Type.packed_struct_layout_version == 2); - var running_int = (try o.builder.intConst(int_ty, 0)).toLlvm(&o.builder); + var running_int = try o.builder.intValue(int_ty, 0); var running_bits: u16 = 0; for (elements, 0..) |elem, i| { const field = fields[i]; @@ -9718,18 +9786,18 @@ pub const FuncGen = struct { const non_int_val = try self.resolveInst(elem); const ty_bit_size: u16 = @intCast(field.ty.bitSize(mod)); - const small_int_ty = (try o.builder.intType(ty_bit_size)).toLlvm(&o.builder); + const small_int_ty = try o.builder.intType(ty_bit_size); const small_int_val = if (field.ty.isPtrAtRuntime(mod)) - self.builder.buildPtrToInt(non_int_val, small_int_ty, "") + try self.wip.cast(.ptrtoint, non_int_val, small_int_ty, "") else - self.builder.buildBitCast(non_int_val, small_int_ty, ""); - const shift_rhs = try o.builder.intConst(int_ty, running_bits); + try self.wip.cast(.bitcast, non_int_val, small_int_ty, ""); + const shift_rhs = try o.builder.intValue(int_ty, running_bits); // If the field is as large as the entire packed struct, this // zext would go from, e.g. i16 to i16. This is legal with // constZExtOrBitCast but not legal with constZExt. - const extended_int_val = self.builder.buildZExtOrBitCast(small_int_val, int_ty.toLlvm(&o.builder), ""); - const shifted = self.builder.buildShl(extended_int_val, shift_rhs.toLlvm(&o.builder), ""); - running_int = self.builder.buildOr(running_int, shifted, ""); + const extended_int_val = try self.wip.conv(.unsigned, small_int_val, int_ty, ""); + const shifted = try self.wip.bin(.shl, extended_int_val, shift_rhs, ""); + running_int = try self.wip.bin(.@"or", running_int, shifted, ""); running_bits += ty_bit_size; } return running_int; @@ -9738,19 +9806,16 @@ pub const FuncGen = struct { if (isByRef(result_ty, mod)) { // TODO in debug builds init to undef so that the padding will be 0xaa // even if we fully populate the fields. - const alloca_inst = try self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); + const alignment = Builder.Alignment.fromByteUnits(result_ty.abiAlignment(mod)); + const alloca_inst = try self.buildAlloca(llvm_result_ty, alignment); - var indices: [2]*llvm.Value = .{ - (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), - undefined, - }; for (elements, 0..) |elem, i| { if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); const llvm_i = llvmField(result_ty, i, mod).?.index; - indices[1] = (try o.builder.intConst(.i32, llvm_i)).toLlvm(&o.builder); - const field_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); + const field_ptr = + try self.wip.gepStruct(llvm_result_ty, alloca_inst, llvm_i, ""); const field_ptr_ty = try mod.ptrType(.{ .child = self.typeOf(elem).toIntern(), .flags = .{ @@ -9759,18 +9824,18 @@ pub const FuncGen = struct { ), }, }); - try self.store(field_ptr, field_ptr_ty, llvm_elem, .NotAtomic); + try self.store(field_ptr, field_ptr_ty, llvm_elem, .none); } return alloca_inst; } else { - var result = llvm_result_ty.getUndef(); + var result = try o.builder.poisonValue(llvm_result_ty); for (elements, 0..) |elem, i| { if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); const llvm_i = llvmField(result_ty, i, mod).?.index; - result = self.builder.buildInsertValue(result, llvm_elem, llvm_i, ""); + result = try self.wip.insertValue(result, llvm_elem, &.{llvm_i}, ""); } return result; } @@ -9778,8 +9843,10 @@ pub const FuncGen = struct { .Array => { assert(isByRef(result_ty, mod)); - const usize_ty = try o.lowerType(Type.usize); - const alloca_inst = try self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); + const llvm_usize = try o.lowerType(Type.usize); + const usize_zero = try o.builder.intValue(llvm_usize, 0); + const alignment = Builder.Alignment.fromByteUnits(result_ty.abiAlignment(mod)); + const alloca_inst = try self.buildAlloca(llvm_result_ty, alignment); const array_info = result_ty.arrayInfo(mod); const elem_ptr_ty = try mod.ptrType(.{ @@ -9787,26 +9854,21 @@ pub const FuncGen = struct { }); for (elements, 0..) |elem, i| { - const indices: [2]*llvm.Value = .{ - (try o.builder.intConst(usize_ty, 0)).toLlvm(&o.builder), - (try o.builder.intConst(usize_ty, i)).toLlvm(&o.builder), - }; - const elem_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); + const elem_ptr = try self.wip.gep(.inbounds, llvm_result_ty, alloca_inst, &.{ + usize_zero, try o.builder.intValue(llvm_usize, i), + }, ""); const llvm_elem = try self.resolveInst(elem); - try self.store(elem_ptr, elem_ptr_ty, llvm_elem, .NotAtomic); + try self.store(elem_ptr, elem_ptr_ty, llvm_elem, .none); } if (array_info.sentinel) |sent_val| { - const indices: [2]*llvm.Value = .{ - (try o.builder.intConst(usize_ty, 0)).toLlvm(&o.builder), - (try o.builder.intConst(usize_ty, array_info.len)).toLlvm(&o.builder), - }; - const elem_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); + const elem_ptr = try self.wip.gep(.inbounds, llvm_result_ty, alloca_inst, &.{ + usize_zero, try o.builder.intValue(llvm_usize, array_info.len), + }, ""); const llvm_elem = try self.resolveValue(.{ .ty = array_info.elem_type, .val = sent_val, }); - - try self.store(elem_ptr, elem_ptr_ty, llvm_elem.toLlvm(&o.builder), .NotAtomic); + try self.store(elem_ptr, elem_ptr_ty, llvm_elem.toValue(), .none); } return alloca_inst; @@ -9815,7 +9877,7 @@ pub const FuncGen = struct { } } - fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -9827,15 +9889,15 @@ pub const FuncGen = struct { if (union_obj.layout == .Packed) { const big_bits = union_ty.bitSize(mod); - const int_llvm_ty = (try o.builder.intType(@intCast(big_bits))).toLlvm(&o.builder); + const int_llvm_ty = try o.builder.intType(@intCast(big_bits)); const field = union_obj.fields.values()[extra.field_index]; const non_int_val = try self.resolveInst(extra.init); - const small_int_ty = (try o.builder.intType(@intCast(field.ty.bitSize(mod)))).toLlvm(&o.builder); + const small_int_ty = try o.builder.intType(@intCast(field.ty.bitSize(mod))); const small_int_val = if (field.ty.isPtrAtRuntime(mod)) - self.builder.buildPtrToInt(non_int_val, small_int_ty, "") + try self.wip.cast(.ptrtoint, non_int_val, small_int_ty, "") else - self.builder.buildBitCast(non_int_val, small_int_ty, ""); - return self.builder.buildZExtOrBitCast(small_int_val, int_llvm_ty, ""); + try self.wip.cast(.bitcast, non_int_val, small_int_ty, ""); + return self.wip.conv(.unsigned, small_int_val, int_llvm_ty, ""); } const tag_int = blk: { @@ -9848,25 +9910,29 @@ pub const FuncGen = struct { }; if (layout.payload_size == 0) { if (layout.tag_size == 0) { - return null; + return .none; } assert(!isByRef(union_ty, mod)); - return (try o.builder.intConst(union_llvm_ty, tag_int)).toLlvm(&o.builder); + return o.builder.intValue(union_llvm_ty, tag_int); } assert(isByRef(union_ty, mod)); // The llvm type of the alloca will be the named LLVM union type, and will not // necessarily match the format that we need, depending on which tag is active. // We must construct the correct unnamed struct type here, in order to then set // the fields appropriately. - const result_ptr = try self.buildAlloca(union_llvm_ty.toLlvm(&o.builder), layout.abi_align); + const alignment = Builder.Alignment.fromByteUnits(layout.abi_align); + const result_ptr = try self.buildAlloca(union_llvm_ty, alignment); const llvm_payload = try self.resolveInst(extra.init); assert(union_obj.haveFieldTypes()); const field = union_obj.fields.values()[extra.field_index]; const field_llvm_ty = try o.lowerType(field.ty); const field_size = field.ty.abiSize(mod); const field_align = field.normalAlignment(mod); + const llvm_usize = try o.lowerType(Type.usize); + const usize_zero = try o.builder.intValue(llvm_usize, 0); + const i32_zero = try o.builder.intValue(.i32, 0); - const llvm_union_ty = (t: { + const llvm_union_ty = t: { const payload_ty = p: { if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) { const padding_len = layout.payload_size; @@ -9894,52 +9960,46 @@ pub const FuncGen = struct { fields_len += 1; } break :t try o.builder.structType(.normal, fields[0..fields_len]); - }).toLlvm(&o.builder); + }; // Now we follow the layout as expressed above with GEP instructions to set the // tag and the payload. const field_ptr_ty = try mod.ptrType(.{ .child = field.ty.toIntern(), - .flags = .{ - .alignment = InternPool.Alignment.fromNonzeroByteUnits(field_align), - }, + .flags = .{ .alignment = InternPool.Alignment.fromNonzeroByteUnits(field_align) }, }); if (layout.tag_size == 0) { - const indices: [3]*llvm.Value = .{ - (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), - } ** 3; - const len: c_uint = if (field_size == layout.payload_size) 2 else 3; - const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, result_ptr, &indices, len, ""); - try self.store(field_ptr, field_ptr_ty, llvm_payload, .NotAtomic); + const indices = [3]Builder.Value{ usize_zero, i32_zero, i32_zero }; + const len: usize = if (field_size == layout.payload_size) 2 else 3; + const field_ptr = + try self.wip.gep(.inbounds, llvm_union_ty, result_ptr, indices[0..len], ""); + try self.store(field_ptr, field_ptr_ty, llvm_payload, .none); return result_ptr; } { - const indices: [3]*llvm.Value = .{ - (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), - (try o.builder.intConst(.i32, @intFromBool(layout.tag_align >= layout.payload_align))).toLlvm(&o.builder), - (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), - }; - const len: c_uint = if (field_size == layout.payload_size) 2 else 3; - const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, result_ptr, &indices, len, ""); - try self.store(field_ptr, field_ptr_ty, llvm_payload, .NotAtomic); + const payload_index = @intFromBool(layout.tag_align >= layout.payload_align); + const indices: [3]Builder.Value = + .{ usize_zero, try o.builder.intValue(.i32, payload_index), i32_zero }; + const len: usize = if (field_size == layout.payload_size) 2 else 3; + const field_ptr = + try self.wip.gep(.inbounds, llvm_union_ty, result_ptr, indices[0..len], ""); + try self.store(field_ptr, field_ptr_ty, llvm_payload, .none); } { - const indices: [2]*llvm.Value = .{ - (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), - (try o.builder.intConst(.i32, @intFromBool(layout.tag_align < layout.payload_align))).toLlvm(&o.builder), - }; - const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, result_ptr, &indices, indices.len, ""); + const tag_index = @intFromBool(layout.tag_align < layout.payload_align); + const indices: [2]Builder.Value = .{ usize_zero, try o.builder.intValue(.i32, tag_index) }; + const field_ptr = try self.wip.gep(.inbounds, llvm_union_ty, result_ptr, &indices, ""); const tag_ty = try o.lowerType(union_obj.tag_ty); - const llvm_tag = try o.builder.intConst(tag_ty, tag_int); - const store_inst = self.builder.buildStore(llvm_tag.toLlvm(&o.builder), field_ptr); - store_inst.setAlignment(union_obj.tag_ty.abiAlignment(mod)); + const llvm_tag = try o.builder.intValue(tag_ty, tag_int); + const tag_alignment = Builder.Alignment.fromByteUnits(union_obj.tag_ty.abiAlignment(mod)); + _ = try self.wip.store(.normal, llvm_tag, field_ptr, tag_alignment); } return result_ptr; } - fn airPrefetch(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airPrefetch(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const prefetch = self.air.instructions.items(.data)[inst].prefetch; @@ -9968,10 +10028,10 @@ pub const FuncGen = struct { .powerpcle, .powerpc64, .powerpc64le, - => return null, + => return .none, .arm, .armeb, .thumb, .thumbeb => { switch (prefetch.rw) { - .write => return null, + .write => return .none, else => {}, } }, @@ -9981,48 +10041,63 @@ pub const FuncGen = struct { } const llvm_fn_name = "llvm.prefetch.p0"; - const fn_val = o.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: { - // declare void @llvm.prefetch(i8*, i32, i32, i32) - const fn_type = try o.builder.fnType(.void, &.{ .ptr, .i32, .i32, .i32 }, .normal); - break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type.toLlvm(&o.builder)); - }; + // declare void @llvm.prefetch(i8*, i32, i32, i32) + const llvm_fn_ty = try o.builder.fnType(.void, &.{ .ptr, .i32, .i32, .i32 }, .normal); + const fn_val = o.llvm_module.getNamedFunction(llvm_fn_name) orelse + o.llvm_module.addFunction(llvm_fn_name, llvm_fn_ty.toLlvm(&o.builder)); const ptr = try self.resolveInst(prefetch.ptr); const params = [_]*llvm.Value{ - ptr, + ptr.toLlvm(&self.wip), (try o.builder.intConst(.i32, @intFromEnum(prefetch.rw))).toLlvm(&o.builder), (try o.builder.intConst(.i32, prefetch.locality)).toLlvm(&o.builder), (try o.builder.intConst(.i32, @intFromEnum(prefetch.cache))).toLlvm(&o.builder), }; - _ = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); - return null; + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + fn_val, + ¶ms, + params.len, + .C, + .Auto, + "", + ), &self.wip); + return .none; } - fn airAddrSpaceCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airAddrSpaceCast(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const inst_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); - const llvm_dest_ty = (try o.lowerType(inst_ty)).toLlvm(&o.builder); - return self.builder.buildAddrSpaceCast(operand, llvm_dest_ty, ""); + return self.wip.cast(.addrspacecast, operand, try o.lowerType(inst_ty), ""); } - fn amdgcnWorkIntrinsic(self: *FuncGen, dimension: u32, default: u32, comptime basename: []const u8) !?*llvm.Value { + fn amdgcnWorkIntrinsic(self: *FuncGen, dimension: u32, default: u32, comptime basename: []const u8) !Builder.Value { + const o = self.dg.object; const llvm_fn_name = switch (dimension) { 0 => basename ++ ".x", 1 => basename ++ ".y", 2 => basename ++ ".z", - else => return (try self.dg.object.builder.intConst(.i32, default)).toLlvm(&self.dg.object.builder), + else => return o.builder.intValue(.i32, default), }; const args: [0]*llvm.Value = .{}; const llvm_fn = try self.getIntrinsic(llvm_fn_name, &.{}); - return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); + return (try self.wip.unimplemented(.i32, "")).finish(self.builder.buildCall( + (try o.builder.fnType(.i32, &.{}, .normal)).toLlvm(&o.builder), + llvm_fn, + &args, + args.len, + .Fast, + .Auto, + "", + ), &self.wip); } - fn airWorkItemId(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airWorkItemId(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const target = o.module.getTarget(); assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures @@ -10032,38 +10107,41 @@ pub const FuncGen = struct { return self.amdgcnWorkIntrinsic(dimension, 0, "llvm.amdgcn.workitem.id"); } - fn airWorkGroupSize(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airWorkGroupSize(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const target = o.module.getTarget(); assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures const pl_op = self.air.instructions.items(.data)[inst].pl_op; const dimension = pl_op.payload; - if (dimension >= 3) { - return (try o.builder.intConst(.i32, 1)).toLlvm(&o.builder); - } + if (dimension >= 3) return o.builder.intValue(.i32, 1); // Fetch the dispatch pointer, which points to this structure: // https://github.com/RadeonOpenCompute/ROCR-Runtime/blob/adae6c61e10d371f7cbc3d0e94ae2c070cab18a4/src/inc/hsa.h#L2913 const llvm_fn = try self.getIntrinsic("llvm.amdgcn.dispatch.ptr", &.{}); const args: [0]*llvm.Value = .{}; - const dispatch_ptr = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); - dispatch_ptr.setAlignment(4); + const llvm_ret_ty = try o.builder.ptrType(Builder.AddrSpace.amdgpu.constant); + const dispatch_ptr = (try self.wip.unimplemented(llvm_ret_ty, "")).finish(self.builder.buildCall( + (try o.builder.fnType(llvm_ret_ty, &.{}, .normal)).toLlvm(&o.builder), + llvm_fn, + &args, + args.len, + .Fast, + .Auto, + "", + ), &self.wip); + o.addAttrInt(dispatch_ptr.toLlvm(&self.wip), 0, "align", 4); // Load the work_group_* member from the struct as u16. // Just treat the dispatch pointer as an array of u16 to keep things simple. - const offset = 2 + dimension; - const index = [_]*llvm.Value{ - (try o.builder.intConst(.i32, offset)).toLlvm(&o.builder), - }; - const llvm_u16 = Builder.Type.i16.toLlvm(&o.builder); - const workgroup_size_ptr = self.builder.buildInBoundsGEP(llvm_u16, dispatch_ptr, &index, index.len, ""); - const workgroup_size = self.builder.buildLoad(llvm_u16, workgroup_size_ptr, ""); - workgroup_size.setAlignment(2); - return workgroup_size; + const workgroup_size_ptr = try self.wip.gep(.inbounds, .i16, dispatch_ptr, &.{ + try o.builder.intValue(try o.lowerType(Type.usize), 2 + dimension), + }, ""); + const workgroup_size_alignment = comptime Builder.Alignment.fromByteUnits(2); + return self.wip.load(.normal, .i16, workgroup_size_ptr, workgroup_size_alignment, ""); } - fn airWorkGroupId(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airWorkGroupId(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const target = o.module.getTarget(); assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures @@ -10095,13 +10173,13 @@ pub const FuncGen = struct { .linkage = .private, .unnamed_addr = .unnamed_addr, .type = .ptr, - .alignment = Builder.Alignment.fromByteUnits(slice_alignment), .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, }; var variable = Builder.Variable{ .global = @enumFromInt(o.builder.globals.count()), .mutability = .constant, .init = undef_init, + .alignment = Builder.Alignment.fromByteUnits(slice_alignment), }; try o.builder.llvm.globals.append(o.gpa, error_name_table_global); _ = try o.builder.addGlobal(name, global); @@ -10112,97 +10190,95 @@ pub const FuncGen = struct { } /// Assumes the optional is not pointer-like and payload has bits. - fn optIsNonNull( + fn optCmpNull( self: *FuncGen, - opt_llvm_ty: *llvm.Type, - opt_handle: *llvm.Value, + cond: Builder.IntegerCondition, + opt_llvm_ty: Builder.Type, + opt_handle: Builder.Value, is_by_ref: bool, - ) Allocator.Error!*llvm.Value { + ) Allocator.Error!Builder.Value { + const o = self.dg.object; const field = b: { if (is_by_ref) { - const field_ptr = self.builder.buildStructGEP(opt_llvm_ty, opt_handle, 1, ""); - break :b self.builder.buildLoad(Builder.Type.i8.toLlvm(&self.dg.object.builder), field_ptr, ""); + const field_ptr = try self.wip.gepStruct(opt_llvm_ty, opt_handle, 1, ""); + break :b try self.wip.load(.normal, .i8, field_ptr, .default, ""); } - break :b self.builder.buildExtractValue(opt_handle, 1, ""); + break :b try self.wip.extractValue(opt_handle, &.{1}, ""); }; comptime assert(optional_layout_version == 3); - return self.builder.buildICmp(.NE, field, (try self.dg.object.builder.intConst(.i8, 0)).toLlvm(&self.dg.object.builder), ""); + return self.wip.icmp(cond, field, try o.builder.intValue(.i8, 0), ""); } /// Assumes the optional is not pointer-like and payload has bits. fn optPayloadHandle( fg: *FuncGen, - opt_llvm_ty: *llvm.Type, - opt_handle: *llvm.Value, + opt_llvm_ty: Builder.Type, + opt_handle: Builder.Value, opt_ty: Type, can_elide_load: bool, - ) !*llvm.Value { + ) !Builder.Value { const o = fg.dg.object; const mod = o.module; const payload_ty = opt_ty.optionalChild(mod); if (isByRef(opt_ty, mod)) { // We have a pointer and we need to return a pointer to the first field. - const payload_ptr = fg.builder.buildStructGEP(opt_llvm_ty, opt_handle, 0, ""); + const payload_ptr = try fg.wip.gepStruct(opt_llvm_ty, opt_handle, 0, ""); - const payload_alignment = payload_ty.abiAlignment(mod); + const payload_alignment = Builder.Alignment.fromByteUnits(payload_ty.abiAlignment(mod)); if (isByRef(payload_ty, mod)) { if (can_elide_load) return payload_ptr; return fg.loadByRef(payload_ptr, payload_ty, payload_alignment, false); } - const payload_llvm_ty = (try o.lowerType(payload_ty)).toLlvm(&o.builder); - const load_inst = fg.builder.buildLoad(payload_llvm_ty, payload_ptr, ""); - load_inst.setAlignment(payload_alignment); - return load_inst; + const payload_llvm_ty = try o.lowerType(payload_ty); + return fg.wip.load(.normal, payload_llvm_ty, payload_ptr, payload_alignment, ""); } assert(!isByRef(payload_ty, mod)); - return fg.builder.buildExtractValue(opt_handle, 0, ""); + return fg.wip.extractValue(opt_handle, &.{0}, ""); } fn buildOptional( self: *FuncGen, optional_ty: Type, - payload: *llvm.Value, - non_null_bit: *llvm.Value, - ) !?*llvm.Value { + payload: Builder.Value, + non_null_bit: Builder.Value, + ) !Builder.Value { const o = self.dg.object; - const optional_llvm_ty = (try o.lowerType(optional_ty)).toLlvm(&o.builder); - const non_null_field = self.builder.buildZExt(non_null_bit, Builder.Type.i8.toLlvm(&o.builder), ""); + const optional_llvm_ty = try o.lowerType(optional_ty); + const non_null_field = try self.wip.cast(.zext, non_null_bit, .i8, ""); const mod = o.module; if (isByRef(optional_ty, mod)) { - const payload_alignment = optional_ty.abiAlignment(mod); + const payload_alignment = Builder.Alignment.fromByteUnits(optional_ty.abiAlignment(mod)); const alloca_inst = try self.buildAlloca(optional_llvm_ty, payload_alignment); { - const field_ptr = self.builder.buildStructGEP(optional_llvm_ty, alloca_inst, 0, ""); - const store_inst = self.builder.buildStore(payload, field_ptr); - store_inst.setAlignment(payload_alignment); + const field_ptr = try self.wip.gepStruct(optional_llvm_ty, alloca_inst, 0, ""); + _ = try self.wip.store(.normal, payload, field_ptr, payload_alignment); } { - const field_ptr = self.builder.buildStructGEP(optional_llvm_ty, alloca_inst, 1, ""); - const store_inst = self.builder.buildStore(non_null_field, field_ptr); - store_inst.setAlignment(1); + const non_null_alignment = comptime Builder.Alignment.fromByteUnits(1); + const field_ptr = try self.wip.gepStruct(optional_llvm_ty, alloca_inst, 1, ""); + _ = try self.wip.store(.normal, non_null_field, field_ptr, non_null_alignment); } return alloca_inst; } - const partial = self.builder.buildInsertValue(optional_llvm_ty.getUndef(), payload, 0, ""); - return self.builder.buildInsertValue(partial, non_null_field, 1, ""); + return self.wip.buildAggregate(optional_llvm_ty, &.{ payload, non_null_field }, ""); } fn fieldPtr( self: *FuncGen, inst: Air.Inst.Index, - struct_ptr: *llvm.Value, + struct_ptr: Builder.Value, struct_ptr_ty: Type, field_index: u32, - ) !?*llvm.Value { + ) !Builder.Value { const o = self.dg.object; const mod = o.module; const struct_ty = struct_ptr_ty.childType(mod); @@ -10224,25 +10300,25 @@ pub const FuncGen = struct { // Offset our operand pointer by the correct number of bytes. const byte_offset = struct_ty.packedStructFieldByteOffset(field_index, mod); if (byte_offset == 0) return struct_ptr; - const byte_llvm_ty = Builder.Type.i8.toLlvm(&o.builder); const usize_ty = try o.lowerType(Type.usize); - const llvm_index = try o.builder.intConst(usize_ty, byte_offset); - const indices: [1]*llvm.Value = .{llvm_index.toLlvm(&o.builder)}; - return self.builder.buildInBoundsGEP(byte_llvm_ty, struct_ptr, &indices, indices.len, ""); + const llvm_index = try o.builder.intValue(usize_ty, byte_offset); + return self.wip.gep(.inbounds, .i8, struct_ptr, &.{llvm_index}, ""); }, else => { - const struct_llvm_ty = (try o.lowerPtrElemTy(struct_ty)).toLlvm(&o.builder); + const struct_llvm_ty = try o.lowerPtrElemTy(struct_ty); if (llvmField(struct_ty, field_index, mod)) |llvm_field| { - return self.builder.buildStructGEP(struct_llvm_ty, struct_ptr, llvm_field.index, ""); + return self.wip.gepStruct(struct_llvm_ty, struct_ptr, llvm_field.index, ""); } else { // If we found no index then this means this is a zero sized field at the // end of the struct. Treat our struct pointer as an array of two and get // the index to the element at index `1` to get a pointer to the end of // the struct. - const llvm_index = try o.builder.intConst(.i32, @intFromBool(struct_ty.hasRuntimeBitsIgnoreComptime(mod))); - const indices: [1]*llvm.Value = .{llvm_index.toLlvm(&o.builder)}; - return self.builder.buildInBoundsGEP(struct_llvm_ty, struct_ptr, &indices, indices.len, ""); + const llvm_index = try o.builder.intValue( + try o.lowerType(Type.usize), + @intFromBool(struct_ty.hasRuntimeBitsIgnoreComptime(mod)), + ); + return self.wip.gep(.inbounds, struct_llvm_ty, struct_ptr, &.{llvm_index}, ""); } }, }, @@ -10250,15 +10326,18 @@ pub const FuncGen = struct { const layout = struct_ty.unionGetLayout(mod); if (layout.payload_size == 0 or struct_ty.containerLayout(mod) == .Packed) return struct_ptr; const payload_index = @intFromBool(layout.tag_align >= layout.payload_align); - const union_llvm_ty = (try o.lowerType(struct_ty)).toLlvm(&o.builder); - const union_field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_ptr, payload_index, ""); - return union_field_ptr; + const union_llvm_ty = try o.lowerType(struct_ty); + return self.wip.gepStruct(union_llvm_ty, struct_ptr, payload_index, ""); }, else => unreachable, } } - fn getIntrinsic(fg: *FuncGen, name: []const u8, types: []const Builder.Type) Allocator.Error!*llvm.Value { + fn getIntrinsic( + fg: *FuncGen, + name: []const u8, + types: []const Builder.Type, + ) Allocator.Error!*llvm.Value { const o = fg.dg.object; const id = llvm.lookupIntrinsicID(name.ptr, name.len); assert(id != 0); @@ -10271,109 +10350,105 @@ pub const FuncGen = struct { /// Load a by-ref type by constructing a new alloca and performing a memcpy. fn loadByRef( fg: *FuncGen, - ptr: *llvm.Value, + ptr: Builder.Value, pointee_type: Type, - ptr_alignment: u32, + ptr_alignment: Builder.Alignment, is_volatile: bool, - ) !*llvm.Value { + ) !Builder.Value { const o = fg.dg.object; const mod = o.module; - const pointee_llvm_ty = (try o.lowerType(pointee_type)).toLlvm(&o.builder); - const result_align = @max(ptr_alignment, pointee_type.abiAlignment(mod)); + const pointee_llvm_ty = try o.lowerType(pointee_type); + const result_align = Builder.Alignment.fromByteUnits( + @max(ptr_alignment.toByteUnits() orelse 0, pointee_type.abiAlignment(mod)), + ); const result_ptr = try fg.buildAlloca(pointee_llvm_ty, result_align); const usize_ty = try o.lowerType(Type.usize); const size_bytes = pointee_type.abiSize(mod); - _ = fg.builder.buildMemCpy( - result_ptr, - result_align, - ptr, - ptr_alignment, + _ = (try fg.wip.unimplemented(.void, "")).finish(fg.builder.buildMemCpy( + result_ptr.toLlvm(&fg.wip), + @intCast(result_align.toByteUnits() orelse 0), + ptr.toLlvm(&fg.wip), + @intCast(ptr_alignment.toByteUnits() orelse 0), (try o.builder.intConst(usize_ty, size_bytes)).toLlvm(&o.builder), is_volatile, - ); + ), &fg.wip); return result_ptr; } /// This function always performs a copy. For isByRef=true types, it creates a new /// alloca and copies the value into it, then returns the alloca instruction. /// For isByRef=false types, it creates a load instruction and returns it. - fn load(self: *FuncGen, ptr: *llvm.Value, ptr_ty: Type) !?*llvm.Value { + fn load(self: *FuncGen, ptr: Builder.Value, ptr_ty: Type) !Builder.Value { const o = self.dg.object; const mod = o.module; const info = ptr_ty.ptrInfo(mod); const elem_ty = info.child.toType(); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; - const ptr_alignment: u32 = @intCast(info.flags.alignment.toByteUnitsOptional() orelse - elem_ty.abiAlignment(mod)); - const ptr_volatile = llvm.Bool.fromBool(info.flags.is_volatile); + const ptr_alignment = Builder.Alignment.fromByteUnits( + info.flags.alignment.toByteUnitsOptional() orelse elem_ty.abiAlignment(mod), + ); + const ptr_kind: Builder.MemoryAccessKind = switch (info.flags.is_volatile) { + false => .normal, + true => .@"volatile", + }; assert(info.flags.vector_index != .runtime); if (info.flags.vector_index != .none) { - const index_u32 = try o.builder.intConst(.i32, @intFromEnum(info.flags.vector_index)); + const index_u32 = try o.builder.intValue(.i32, @intFromEnum(info.flags.vector_index)); const vec_elem_ty = try o.lowerType(elem_ty); const vec_ty = try o.builder.vectorType(.normal, info.packed_offset.host_size, vec_elem_ty); - const loaded_vector = self.builder.buildLoad(vec_ty.toLlvm(&o.builder), ptr, ""); - loaded_vector.setAlignment(ptr_alignment); - loaded_vector.setVolatile(ptr_volatile); - - return self.builder.buildExtractElement(loaded_vector, index_u32.toLlvm(&o.builder), ""); + const loaded_vector = try self.wip.load(ptr_kind, vec_ty, ptr, ptr_alignment, ""); + return self.wip.extractElement(loaded_vector, index_u32, ""); } if (info.packed_offset.host_size == 0) { if (isByRef(elem_ty, mod)) { return self.loadByRef(ptr, elem_ty, ptr_alignment, info.flags.is_volatile); } - const elem_llvm_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); - const llvm_inst = self.builder.buildLoad(elem_llvm_ty, ptr, ""); - llvm_inst.setAlignment(ptr_alignment); - llvm_inst.setVolatile(ptr_volatile); - return llvm_inst; + return self.wip.load(ptr_kind, try o.lowerType(elem_ty), ptr, ptr_alignment, ""); } const containing_int_ty = try o.builder.intType(@intCast(info.packed_offset.host_size * 8)); - const containing_int = self.builder.buildLoad(containing_int_ty.toLlvm(&o.builder), ptr, ""); - containing_int.setAlignment(ptr_alignment); - containing_int.setVolatile(ptr_volatile); + const containing_int = try self.wip.load(ptr_kind, containing_int_ty, ptr, ptr_alignment, ""); const elem_bits = ptr_ty.childType(mod).bitSize(mod); - const shift_amt = try o.builder.intConst(containing_int_ty, info.packed_offset.bit_offset); - const shifted_value = self.builder.buildLShr(containing_int, shift_amt.toLlvm(&o.builder), ""); - const elem_llvm_ty = (try o.lowerType(elem_ty)).toLlvm(&o.builder); + const shift_amt = try o.builder.intValue(containing_int_ty, info.packed_offset.bit_offset); + const shifted_value = try self.wip.bin(.lshr, containing_int, shift_amt, ""); + const elem_llvm_ty = try o.lowerType(elem_ty); if (isByRef(elem_ty, mod)) { - const result_align = elem_ty.abiAlignment(mod); + const result_align = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod)); const result_ptr = try self.buildAlloca(elem_llvm_ty, result_align); - const same_size_int = (try o.builder.intType(@intCast(elem_bits))).toLlvm(&o.builder); - const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); - const store_inst = self.builder.buildStore(truncated_int, result_ptr); - store_inst.setAlignment(result_align); + const same_size_int = try o.builder.intType(@intCast(elem_bits)); + const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, ""); + _ = try self.wip.store(.normal, truncated_int, result_ptr, result_align); return result_ptr; } if (elem_ty.zigTypeTag(mod) == .Float or elem_ty.zigTypeTag(mod) == .Vector) { - const same_size_int = (try o.builder.intType(@intCast(elem_bits))).toLlvm(&o.builder); - const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); - return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); + const same_size_int = try o.builder.intType(@intCast(elem_bits)); + const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, ""); + return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, ""); } if (elem_ty.isPtrAtRuntime(mod)) { - const same_size_int = (try o.builder.intType(@intCast(elem_bits))).toLlvm(&o.builder); - const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); - return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); + const same_size_int = try o.builder.intType(@intCast(elem_bits)); + const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, ""); + return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, ""); } - return self.builder.buildTrunc(shifted_value, elem_llvm_ty, ""); + return self.wip.cast(.trunc, shifted_value, elem_llvm_ty, ""); } fn store( self: *FuncGen, - ptr: *llvm.Value, + ptr: Builder.Value, ptr_ty: Type, - elem: *llvm.Value, - ordering: llvm.AtomicOrdering, + elem: Builder.Value, + ordering: Builder.AtomicOrdering, ) !void { const o = self.dg.object; const mod = o.module; @@ -10382,43 +10457,41 @@ pub const FuncGen = struct { if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { return; } - const ptr_alignment = ptr_ty.ptrAlignment(mod); - const ptr_volatile = llvm.Bool.fromBool(info.flags.is_volatile); + const ptr_alignment = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod)); + const ptr_kind: Builder.MemoryAccessKind = switch (info.flags.is_volatile) { + false => .normal, + true => .@"volatile", + }; assert(info.flags.vector_index != .runtime); if (info.flags.vector_index != .none) { - const index_u32 = try o.builder.intConst(.i32, @intFromEnum(info.flags.vector_index)); + const index_u32 = try o.builder.intValue(.i32, @intFromEnum(info.flags.vector_index)); const vec_elem_ty = try o.lowerType(elem_ty); const vec_ty = try o.builder.vectorType(.normal, info.packed_offset.host_size, vec_elem_ty); - const loaded_vector = self.builder.buildLoad(vec_ty.toLlvm(&o.builder), ptr, ""); - loaded_vector.setAlignment(ptr_alignment); - loaded_vector.setVolatile(ptr_volatile); + const loaded_vector = try self.wip.load(ptr_kind, vec_ty, ptr, ptr_alignment, ""); - const modified_vector = self.builder.buildInsertElement(loaded_vector, elem, index_u32.toLlvm(&o.builder), ""); + const modified_vector = try self.wip.insertElement(loaded_vector, elem, index_u32, ""); - const store_inst = self.builder.buildStore(modified_vector, ptr); - assert(ordering == .NotAtomic); - store_inst.setAlignment(ptr_alignment); - store_inst.setVolatile(ptr_volatile); + assert(ordering == .none); + _ = try self.wip.store(ptr_kind, modified_vector, ptr, ptr_alignment); return; } if (info.packed_offset.host_size != 0) { const containing_int_ty = try o.builder.intType(@intCast(info.packed_offset.host_size * 8)); - const containing_int = self.builder.buildLoad(containing_int_ty.toLlvm(&o.builder), ptr, ""); - assert(ordering == .NotAtomic); - containing_int.setAlignment(ptr_alignment); - containing_int.setVolatile(ptr_volatile); + assert(ordering == .none); + const containing_int = + try self.wip.load(ptr_kind, containing_int_ty, ptr, ptr_alignment, ""); const elem_bits = ptr_ty.childType(mod).bitSize(mod); const shift_amt = try o.builder.intConst(containing_int_ty, info.packed_offset.bit_offset); // Convert to equally-sized integer type in order to perform the bit // operations on the value to store const value_bits_type = try o.builder.intType(@intCast(elem_bits)); const value_bits = if (elem_ty.isPtrAtRuntime(mod)) - self.builder.buildPtrToInt(elem, value_bits_type.toLlvm(&o.builder), "") + try self.wip.cast(.ptrtoint, elem, value_bits_type, "") else - self.builder.buildBitCast(elem, value_bits_type.toLlvm(&o.builder), ""); + try self.wip.cast(.bitcast, elem, value_bits_type, ""); var mask_val = try o.builder.intConst(value_bits_type, -1); mask_val = try o.builder.castConst(.zext, mask_val, containing_int_ty); @@ -10426,79 +10499,73 @@ pub const FuncGen = struct { mask_val = try o.builder.binConst(.xor, mask_val, try o.builder.intConst(containing_int_ty, -1)); - const anded_containing_int = self.builder.buildAnd(containing_int, mask_val.toLlvm(&o.builder), ""); - const extended_value = self.builder.buildZExt(value_bits, containing_int_ty.toLlvm(&o.builder), ""); - const shifted_value = self.builder.buildShl(extended_value, shift_amt.toLlvm(&o.builder), ""); - const ored_value = self.builder.buildOr(shifted_value, anded_containing_int, ""); + const anded_containing_int = + try self.wip.bin(.@"and", containing_int, mask_val.toValue(), ""); + const extended_value = try self.wip.cast(.zext, value_bits, containing_int_ty, ""); + const shifted_value = try self.wip.bin(.shl, extended_value, shift_amt.toValue(), ""); + const ored_value = try self.wip.bin(.@"or", shifted_value, anded_containing_int, ""); - const store_inst = self.builder.buildStore(ored_value, ptr); - assert(ordering == .NotAtomic); - store_inst.setAlignment(ptr_alignment); - store_inst.setVolatile(ptr_volatile); + assert(ordering == .none); + _ = try self.wip.store(ptr_kind, ored_value, ptr, ptr_alignment); return; } if (!isByRef(elem_ty, mod)) { - const store_inst = self.builder.buildStore(elem, ptr); - store_inst.setOrdering(ordering); - store_inst.setAlignment(ptr_alignment); - store_inst.setVolatile(ptr_volatile); + _ = try self.wip.storeAtomic(ptr_kind, elem, ptr, self.sync_scope, ordering, ptr_alignment); return; } - assert(ordering == .NotAtomic); + assert(ordering == .none); const size_bytes = elem_ty.abiSize(mod); - _ = self.builder.buildMemCpy( - ptr, - ptr_alignment, - elem, + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemCpy( + ptr.toLlvm(&self.wip), + @intCast(ptr_alignment.toByteUnits() orelse 0), + elem.toLlvm(&self.wip), elem_ty.abiAlignment(mod), (try o.builder.intConst(try o.lowerType(Type.usize), size_bytes)).toLlvm(&o.builder), info.flags.is_volatile, - ); + ), &self.wip); } - fn valgrindMarkUndef(fg: *FuncGen, ptr: *llvm.Value, len: *llvm.Value) Allocator.Error!void { + fn valgrindMarkUndef(fg: *FuncGen, ptr: Builder.Value, len: Builder.Value) Allocator.Error!void { const VG_USERREQ__MAKE_MEM_UNDEFINED = 1296236545; const o = fg.dg.object; const usize_ty = try o.lowerType(Type.usize); - const zero = (try o.builder.intConst(usize_ty, 0)).toLlvm(&o.builder); - const req = (try o.builder.intConst(usize_ty, VG_USERREQ__MAKE_MEM_UNDEFINED)).toLlvm(&o.builder); - const ptr_as_usize = fg.builder.buildPtrToInt(ptr, usize_ty.toLlvm(&o.builder), ""); + const zero = try o.builder.intValue(usize_ty, 0); + const req = try o.builder.intValue(usize_ty, VG_USERREQ__MAKE_MEM_UNDEFINED); + const ptr_as_usize = try fg.wip.cast(.ptrtoint, ptr, usize_ty, ""); _ = try valgrindClientRequest(fg, zero, req, ptr_as_usize, len, zero, zero, zero); } fn valgrindClientRequest( fg: *FuncGen, - default_value: *llvm.Value, - request: *llvm.Value, - a1: *llvm.Value, - a2: *llvm.Value, - a3: *llvm.Value, - a4: *llvm.Value, - a5: *llvm.Value, - ) Allocator.Error!*llvm.Value { + default_value: Builder.Value, + request: Builder.Value, + a1: Builder.Value, + a2: Builder.Value, + a3: Builder.Value, + a4: Builder.Value, + a5: Builder.Value, + ) Allocator.Error!Builder.Value { const o = fg.dg.object; const mod = o.module; const target = mod.getTarget(); if (!target_util.hasValgrindSupport(target)) return default_value; const llvm_usize = try o.lowerType(Type.usize); - const usize_alignment = Type.usize.abiSize(mod); + const usize_alignment = Builder.Alignment.fromByteUnits(Type.usize.abiAlignment(mod)); - const array_llvm_ty = (try o.builder.arrayType(6, llvm_usize)).toLlvm(&o.builder); - const array_ptr = fg.valgrind_client_request_array orelse a: { - const array_ptr = try fg.buildAlloca(array_llvm_ty, @intCast(usize_alignment)); + const array_llvm_ty = try o.builder.arrayType(6, llvm_usize); + const array_ptr = if (fg.valgrind_client_request_array == .none) a: { + const array_ptr = try fg.buildAlloca(array_llvm_ty, usize_alignment); fg.valgrind_client_request_array = array_ptr; break :a array_ptr; - }; - const array_elements = [_]*llvm.Value{ request, a1, a2, a3, a4, a5 }; - const zero = (try o.builder.intConst(llvm_usize, 0)).toLlvm(&o.builder); + } else fg.valgrind_client_request_array; + const array_elements = [_]Builder.Value{ request, a1, a2, a3, a4, a5 }; + const zero = try o.builder.intValue(llvm_usize, 0); for (array_elements, 0..) |elem, i| { - const indexes = [_]*llvm.Value{ - zero, (try o.builder.intConst(llvm_usize, i)).toLlvm(&o.builder), - }; - const elem_ptr = fg.builder.buildInBoundsGEP(array_llvm_ty, array_ptr, &indexes, indexes.len, ""); - const store_inst = fg.builder.buildStore(elem, elem_ptr); - store_inst.setAlignment(@intCast(usize_alignment)); + const elem_ptr = try fg.wip.gep(.inbounds, array_llvm_ty, array_ptr, &.{ + zero, try o.builder.intValue(llvm_usize, i), + }, ""); + _ = try fg.wip.store(.normal, elem, elem_ptr, usize_alignment); } const arch_specific: struct { @@ -10533,8 +10600,8 @@ pub const FuncGen = struct { }; const fn_llvm_ty = (try o.builder.fnType(llvm_usize, &(.{llvm_usize} ** 2), .normal)).toLlvm(&o.builder); - const array_ptr_as_usize = fg.builder.buildPtrToInt(array_ptr, llvm_usize.toLlvm(&o.builder), ""); - const args = [_]*llvm.Value{ array_ptr_as_usize, default_value }; + const array_ptr_as_usize = try fg.wip.cast(.ptrtoint, array_ptr, llvm_usize, ""); + const args = [_]*llvm.Value{ array_ptr_as_usize.toLlvm(&fg.wip), default_value.toLlvm(&fg.wip) }; const asm_fn = llvm.getInlineAsm( fn_llvm_ty, arch_specific.template.ptr, @@ -10547,14 +10614,9 @@ pub const FuncGen = struct { .False, // can throw ); - const call = fg.builder.buildCall( - fn_llvm_ty, - asm_fn, - &args, - args.len, - .C, - .Auto, - "", + const call = (try fg.wip.unimplemented(llvm_usize, "")).finish( + fg.builder.buildCall(fn_llvm_ty, asm_fn, &args, args.len, .C, .Auto, ""), + &fg.wip, ); return call; } @@ -10764,14 +10826,14 @@ fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void { } } -fn toLlvmAtomicOrdering(atomic_order: std.builtin.AtomicOrder) llvm.AtomicOrdering { +fn toLlvmAtomicOrdering(atomic_order: std.builtin.AtomicOrder) Builder.AtomicOrdering { return switch (atomic_order) { - .Unordered => .Unordered, - .Monotonic => .Monotonic, - .Acquire => .Acquire, - .Release => .Release, - .AcqRel => .AcquireRelease, - .SeqCst => .SequentiallyConsistent, + .Unordered => .unordered, + .Monotonic => .monotonic, + .Acquire => .acquire, + .Release => .release, + .AcqRel => .acq_rel, + .SeqCst => .seq_cst, }; } @@ -11718,12 +11780,40 @@ fn compilerRtIntBits(bits: u16) u16 { return bits; } +fn buildAllocaInner( + wip: *Builder.WipFunction, + di_scope_non_null: bool, + llvm_ty: Builder.Type, + alignment: Builder.Alignment, + target: std.Target, +) Allocator.Error!Builder.Value { + const address_space = llvmAllocaAddressSpace(target); + + const alloca = blk: { + const prev_cursor = wip.cursor; + const prev_debug_location = wip.llvm.builder.getCurrentDebugLocation2(); + defer { + wip.cursor = prev_cursor; + if (wip.cursor.block == .entry) wip.cursor.instruction += 1; + if (di_scope_non_null) wip.llvm.builder.setCurrentDebugLocation2(prev_debug_location); + } + + wip.cursor = .{ .block = .entry }; + wip.llvm.builder.clearCurrentDebugLocation(); + break :blk try wip.alloca(.normal, llvm_ty, .none, alignment, address_space, ""); + }; + + // The pointer returned from this function should have the generic address space, + // if this isn't the case then cast it to the generic address space. + return wip.conv(.unneeded, alloca, .ptr, ""); +} + fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u1 { - return @intFromBool(Type.anyerror.abiAlignment(mod) > payload_ty.abiAlignment(mod)); + return @intFromBool(Type.err_int.abiAlignment(mod) > payload_ty.abiAlignment(mod)); } fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u1 { - return @intFromBool(Type.anyerror.abiAlignment(mod) <= payload_ty.abiAlignment(mod)); + return @intFromBool(Type.err_int.abiAlignment(mod) <= payload_ty.abiAlignment(mod)); } /// Returns true for asm constraint (e.g. "=*m", "=r") if it accepts a memory location diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index a7aeea1a45..67bafc0113 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -43,6 +43,8 @@ constant_limbs: std.ArrayListUnmanaged(std.math.big.Limb), pub const expected_fields_len = 32; pub const expected_gep_indices_len = 8; +pub const expected_cases_len = 8; +pub const expected_incoming_len = 8; pub const Options = struct { allocator: Allocator, @@ -186,6 +188,7 @@ pub const Type = enum(u32) { pub const Function = struct { ret: Type, params_len: u32, + //params: [params_len]Value, pub const Kind = enum { normal, vararg }; }; @@ -194,12 +197,18 @@ pub const Type = enum(u32) { name: String, types_len: u32, ints_len: u32, + //types: [types_len]Type, + //ints: [ints_len]u32, }; pub const Vector = extern struct { len: u32, child: Type, + fn length(self: Vector) u32 { + return self.len; + } + pub const Kind = enum { normal, scalable }; }; @@ -208,13 +217,14 @@ pub const Type = enum(u32) { len_hi: u32, child: Type, - fn len(self: Array) u64 { + fn length(self: Array) u64 { return @as(u64, self.len_hi) << 32 | self.len_lo; } }; pub const Structure = struct { fields_len: u32, + //fields: [fields_len]Type, pub const Kind = enum { normal, @"packed" }; }; @@ -295,6 +305,29 @@ pub const Type = enum(u32) { }; } + pub fn functionParameters(self: Type, builder: *const Builder) []const Type { + const item = builder.type_items.items[@intFromEnum(self)]; + switch (item.tag) { + .function, + .vararg_function, + => { + const extra = builder.typeExtraDataTrail(Type.Function, item.data); + return @ptrCast(builder.type_extra.items[extra.end..][0..extra.data.params_len]); + }, + else => unreachable, + } + } + + pub fn functionReturn(self: Type, builder: *const Builder) Type { + const item = builder.type_items.items[@intFromEnum(self)]; + switch (item.tag) { + .function, + .vararg_function, + => return builder.typeExtraData(Type.Function, item.data).ret, + else => unreachable, + } + } + pub fn isVector(self: Type, builder: *const Builder) bool { return switch (self.tag(builder)) { .vector, .scalable_vector => true, @@ -325,6 +358,13 @@ pub const Type = enum(u32) { }; } + pub fn isAggregate(self: Type, builder: *const Builder) bool { + return switch (self.tag(builder)) { + .small_array, .array, .structure, .packed_structure, .named_structure => true, + else => false, + }; + } + pub fn scalarBits(self: Type, builder: *const Builder) u24 { return switch (self) { .void, .label, .token, .metadata, .none, .x86_amx => unreachable, @@ -388,6 +428,33 @@ pub const Type = enum(u32) { }; } + pub fn changeScalar(self: Type, scalar: Type, builder: *Builder) Allocator.Error!Type { + try builder.ensureUnusedTypeCapacity(1, Type.Vector, 0); + return self.changeScalarAssumeCapacity(scalar, builder); + } + + pub fn changeScalarAssumeCapacity(self: Type, scalar: Type, builder: *Builder) Type { + if (self.isFloatingPoint()) return scalar; + const item = builder.type_items.items[@intFromEnum(self)]; + return switch (item.tag) { + .integer, + .pointer, + => scalar, + inline .vector, + .scalable_vector, + => |kind| builder.vectorTypeAssumeCapacity( + switch (kind) { + .vector => .normal, + .scalable_vector => .scalable, + else => unreachable, + }, + builder.typeExtraData(Type.Vector, item.data).len, + scalar, + ), + else => unreachable, + }; + } + pub fn vectorLen(self: Type, builder: *const Builder) u32 { const item = builder.type_items.items[@intFromEnum(self)]; return switch (item.tag) { @@ -398,6 +465,37 @@ pub const Type = enum(u32) { }; } + pub fn changeLength(self: Type, len: u32, builder: *Builder) Allocator.Error!Type { + try builder.ensureUnusedTypeCapacity(1, Type.Array, 0); + return self.changeLengthAssumeCapacity(len, builder); + } + + pub fn changeLengthAssumeCapacity(self: Type, len: u32, builder: *Builder) Type { + const item = builder.type_items.items[@intFromEnum(self)]; + return switch (item.tag) { + inline .vector, + .scalable_vector, + => |kind| builder.vectorTypeAssumeCapacity( + switch (kind) { + .vector => .normal, + .scalable_vector => .scalable, + else => unreachable, + }, + len, + builder.typeExtraData(Type.Vector, item.data).child, + ), + .small_array => builder.arrayTypeAssumeCapacity( + len, + builder.typeExtraData(Type.Vector, item.data).child, + ), + .array => builder.arrayTypeAssumeCapacity( + len, + builder.typeExtraData(Type.Array, item.data).child, + ), + else => unreachable, + }; + } + pub fn aggregateLen(self: Type, builder: *const Builder) u64 { const item = builder.type_items.items[@intFromEnum(self)]; return switch (item.tag) { @@ -405,7 +503,7 @@ pub const Type = enum(u32) { .scalable_vector, .small_array, => builder.typeExtraData(Type.Vector, item.data).len, - .array => builder.typeExtraData(Type.Array, item.data).len(), + .array => builder.typeExtraData(Type.Array, item.data).length(), .structure, .packed_structure, => builder.typeExtraData(Type.Structure, item.data).fields_len, @@ -430,7 +528,40 @@ pub const Type = enum(u32) { } } - pub const FormatData = struct { + pub fn childTypeAt(self: Type, indices: []const u32, builder: *const Builder) Type { + if (indices.len == 0) return self; + const item = builder.type_items.items[@intFromEnum(self)]; + return switch (item.tag) { + .small_array => builder.typeExtraData(Type.Vector, item.data).child + .childTypeAt(indices[1..], builder), + .array => builder.typeExtraData(Type.Array, item.data).child + .childTypeAt(indices[1..], builder), + .structure, + .packed_structure, + => { + const extra = builder.typeExtraDataTrail(Type.Structure, item.data); + const fields: []const Type = + @ptrCast(builder.type_extra.items[extra.end..][0..extra.data.fields_len]); + return fields[indices[0]].childTypeAt(indices[1..], builder); + }, + .named_structure => builder.typeExtraData(Type.NamedStructure, item.data).body + .childTypeAt(indices, builder), + else => unreachable, + }; + } + + pub fn targetLayoutType(self: Type, builder: *const Builder) Type { + _ = self; + _ = builder; + @panic("TODO: implement targetLayoutType"); + } + + pub fn isSized(self: Type, builder: *const Builder) Allocator.Error!bool { + var visited: IsSizedVisited = .{}; + return self.isSizedVisited(&visited, builder); + } + + const FormatData = struct { type: Type, builder: *const Builder, }; @@ -441,11 +572,90 @@ pub const Type = enum(u32) { writer: anytype, ) @TypeOf(writer).Error!void { assert(data.type != .none); + if (comptime std.mem.eql(u8, fmt_str, "m")) { + const item = data.builder.type_items.items[@intFromEnum(data.type)]; + switch (item.tag) { + .simple => try writer.writeAll(switch (@as(Simple, @enumFromInt(item.data))) { + .void => "isVoid", + .half => "f16", + .bfloat => "bf16", + .float => "f32", + .double => "f64", + .fp128 => "f128", + .x86_fp80 => "f80", + .ppc_fp128 => "ppcf128", + .x86_amx => "x86amx", + .x86_mmx => "x86mmx", + .label, .token => unreachable, + .metadata => "Metadata", + }), + .function, .vararg_function => |kind| { + const extra = data.builder.typeExtraDataTrail(Type.Function, item.data); + const params: []const Type = + @ptrCast(data.builder.type_extra.items[extra.end..][0..extra.data.params_len]); + try writer.print("f_{m}", .{extra.data.ret.fmt(data.builder)}); + for (params) |param| try writer.print("{m}", .{param.fmt(data.builder)}); + switch (kind) { + .function => {}, + .vararg_function => try writer.writeAll("vararg"), + else => unreachable, + } + try writer.writeByte('f'); + }, + .integer => try writer.print("i{d}", .{item.data}), + .pointer => try writer.print("p{d}", .{item.data}), + .target => { + const extra = data.builder.typeExtraDataTrail(Type.Target, item.data); + const types: []const Type = + @ptrCast(data.builder.type_extra.items[extra.end..][0..extra.data.types_len]); + const ints: []const u32 = @ptrCast(data.builder.type_extra.items[extra.end + + extra.data.types_len ..][0..extra.data.ints_len]); + try writer.print("t{s}", .{extra.data.name.toSlice(data.builder).?}); + for (types) |ty| try writer.print("_{m}", .{ty.fmt(data.builder)}); + for (ints) |int| try writer.print("_{d}", .{int}); + try writer.writeByte('t'); + }, + .vector, .scalable_vector => |kind| { + const extra = data.builder.typeExtraData(Type.Vector, item.data); + try writer.print("{s}v{d}{m}", .{ + switch (kind) { + .vector => "", + .scalable_vector => "nx", + else => unreachable, + }, + extra.len, + extra.child.fmt(data.builder), + }); + }, + inline .small_array, .array => |kind| { + const extra = data.builder.typeExtraData(switch (kind) { + .small_array => Type.Vector, + .array => Type.Array, + else => unreachable, + }, item.data); + try writer.print("a{d}{m}", .{ extra.length(), extra.child.fmt(data.builder) }); + }, + .structure, .packed_structure => { + const extra = data.builder.typeExtraDataTrail(Type.Structure, item.data); + const fields: []const Type = + @ptrCast(data.builder.type_extra.items[extra.end..][0..extra.data.fields_len]); + try writer.writeAll("sl_"); + for (fields) |field| try writer.print("{m}", .{field.fmt(data.builder)}); + try writer.writeByte('s'); + }, + .named_structure => { + const extra = data.builder.typeExtraData(Type.NamedStructure, item.data); + try writer.writeAll("s_"); + if (extra.id.toSlice(data.builder)) |id| try writer.writeAll(id); + }, + } + return; + } if (std.enums.tagName(Type, data.type)) |name| return writer.writeAll(name); const item = data.builder.type_items.items[@intFromEnum(data.type)]; switch (item.tag) { .simple => unreachable, - .function, .vararg_function => { + .function, .vararg_function => |kind| { const extra = data.builder.typeExtraDataTrail(Type.Function, item.data); const params: []const Type = @ptrCast(data.builder.type_extra.items[extra.end..][0..extra.data.params_len]); @@ -457,7 +667,7 @@ pub const Type = enum(u32) { if (index > 0) try writer.writeAll(", "); try writer.print("{%}", .{param.fmt(data.builder)}); } - switch (item.tag) { + switch (kind) { .function => {}, .vararg_function => { if (params.len > 0) try writer.writeAll(", "); @@ -483,29 +693,31 @@ pub const Type = enum(u32) { for (ints) |int| try writer.print(", {d}", .{int}); try writer.writeByte(')'); }, - .vector => { + .vector, .scalable_vector => |kind| { const extra = data.builder.typeExtraData(Type.Vector, item.data); - try writer.print("<{d} x {%}>", .{ extra.len, extra.child.fmt(data.builder) }); - }, - .scalable_vector => { - const extra = data.builder.typeExtraData(Type.Vector, item.data); - try writer.print("", .{ extra.len, extra.child.fmt(data.builder) }); - }, - .small_array => { - const extra = data.builder.typeExtraData(Type.Vector, item.data); - try writer.print("[{d} x {%}]", .{ extra.len, extra.child.fmt(data.builder) }); + try writer.print("<{s}{d} x {%}>", .{ + switch (kind) { + .vector => "", + .scalable_vector => "vscale x ", + else => unreachable, + }, + extra.len, + extra.child.fmt(data.builder), + }); }, - .array => { - const extra = data.builder.typeExtraData(Type.Array, item.data); - try writer.print("[{d} x {%}]", .{ extra.len(), extra.child.fmt(data.builder) }); + inline .small_array, .array => |kind| { + const extra = data.builder.typeExtraData(switch (kind) { + .small_array => Type.Vector, + .array => Type.Array, + else => unreachable, + }, item.data); + try writer.print("[{d} x {%}]", .{ extra.length(), extra.child.fmt(data.builder) }); }, - .structure, - .packed_structure, - => { + .structure, .packed_structure => |kind| { const extra = data.builder.typeExtraDataTrail(Type.Structure, item.data); const fields: []const Type = @ptrCast(data.builder.type_extra.items[extra.end..][0..extra.data.fields_len]); - switch (item.tag) { + switch (kind) { .structure => {}, .packed_structure => try writer.writeByte('<'), else => unreachable, @@ -516,7 +728,7 @@ pub const Type = enum(u32) { try writer.print("{%}", .{field.fmt(data.builder)}); } try writer.writeAll(" }"); - switch (item.tag) { + switch (kind) { .structure => {}, .packed_structure => try writer.writeByte('>'), else => unreachable, @@ -544,6 +756,82 @@ pub const Type = enum(u32) { assert(builder.useLibLlvm()); return builder.llvm.types.items[@intFromEnum(self)]; } + + const IsSizedVisited = std.AutoHashMapUnmanaged(Type, void); + fn isSizedVisited( + self: Type, + visited: *IsSizedVisited, + builder: *const Builder, + ) Allocator.Error!bool { + return switch (self) { + .void, + .label, + .token, + .metadata, + => false, + .half, + .bfloat, + .float, + .double, + .fp128, + .x86_fp80, + .ppc_fp128, + .x86_amx, + .x86_mmx, + .i1, + .i8, + .i16, + .i29, + .i32, + .i64, + .i80, + .i128, + .ptr, + => true, + .none => unreachable, + _ => { + const item = builder.type_items.items[@intFromEnum(self)]; + return switch (item.tag) { + .simple => unreachable, + .function, + .vararg_function, + => false, + .integer, + .pointer, + => true, + .target => self.targetLayoutType(builder).isSizedVisited(visited, builder), + .vector, + .scalable_vector, + .small_array, + => builder.typeExtraData(Type.Vector, item.data) + .child.isSizedVisited(visited, builder), + .array => builder.typeExtraData(Type.Array, item.data) + .child.isSizedVisited(visited, builder), + .structure, + .packed_structure, + => { + if (try visited.fetchPut(builder.gpa, self, {})) |_| return false; + + const extra = builder.typeExtraDataTrail(Type.Structure, item.data); + const fields: []const Type = @ptrCast( + builder.type_extra.items[extra.end..][0..extra.data.fields_len], + ); + for (fields) |field| { + if (field.isVector(builder) and field.vectorKind(builder) == .scalable) + return false; + if (!try field.isSizedVisited(visited, builder)) + return false; + } + return true; + }, + .named_structure => { + const body = builder.typeExtraData(Type.NamedStructure, item.data).body; + return body != .none and try body.isSizedVisited(visited, builder); + }, + }; + }, + }; + } }; pub const Linkage = enum { @@ -727,11 +1015,11 @@ pub const AddrSpace = enum(u24) { pub fn format( self: AddrSpace, - comptime _: []const u8, + comptime prefix: []const u8, _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - if (self != .default) try writer.print(" addrspace({d})", .{@intFromEnum(self)}); + if (self != .default) try writer.print("{s} addrspace({d})", .{ prefix, @intFromEnum(self) }); } }; @@ -785,9 +1073,7 @@ pub const Global = struct { addr_space: AddrSpace = .default, externally_initialized: ExternallyInitialized = .default, type: Type, - section: String = .none, partition: String = .none, - alignment: Alignment = .default, kind: union(enum) { alias: Alias.Index, variable: Variable.Index, @@ -824,6 +1110,10 @@ pub const Global = struct { return &builder.globals.values()[@intFromEnum(self.unwrap(builder))]; } + pub fn typeOf(self: Index, builder: *const Builder) Type { + return self.ptrConst(builder).type; + } + pub fn toConst(self: Index) Constant { return @enumFromInt(@intFromEnum(Constant.first_global) + @intFromEnum(self)); } @@ -943,11 +1233,19 @@ pub const Global = struct { pub const Alias = struct { global: Global.Index, + thread_local: ThreadLocal = .default, + init: Constant = .no_init, pub const Index = enum(u32) { none = std.math.maxInt(u32), _, + pub fn getAliasee(self: Index, builder: *const Builder) Global.Index { + const aliasee = self.ptrConst(builder).init.getBase(builder); + assert(aliasee != .none); + return aliasee; + } + pub fn ptr(self: Index, builder: *Builder) *Alias { return &builder.aliases.items[@intFromEnum(self)]; } @@ -956,6 +1254,18 @@ pub const Alias = struct { return &builder.aliases.items[@intFromEnum(self)]; } + pub fn typeOf(self: Index, builder: *const Builder) Type { + return self.ptrConst(builder).global.typeOf(builder); + } + + pub fn toConst(self: Index, builder: *const Builder) Constant { + return self.ptrConst(builder).global.toConst(); + } + + pub fn toValue(self: Index, builder: *const Builder) Value { + return self.toConst(builder).toValue(); + } + pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value { return self.ptrConst(builder).global.toLlvm(builder); } @@ -967,6 +1277,8 @@ pub const Variable = struct { thread_local: ThreadLocal = .default, mutability: enum { global, constant } = .global, init: Constant = .no_init, + section: String = .none, + alignment: Alignment = .default, pub const Index = enum(u32) { none = std.math.maxInt(u32), @@ -980,6 +1292,18 @@ pub const Variable = struct { return &builder.variables.items[@intFromEnum(self)]; } + pub fn typeOf(self: Index, builder: *const Builder) Type { + return self.ptrConst(builder).global.typeOf(builder); + } + + pub fn toConst(self: Index, builder: *const Builder) Constant { + return self.ptrConst(builder).global.toConst(); + } + + pub fn toValue(self: Index, builder: *const Builder) Value { + return self.toConst(builder).toValue(); + } + pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value { return self.ptrConst(builder).global.toLlvm(builder); } @@ -988,9 +1312,11 @@ pub const Variable = struct { pub const Function = struct { global: Global.Index, + section: String = .none, + alignment: Alignment = .default, blocks: []const Block = &.{}, instructions: std.MultiArrayList(Instruction) = .{}, - names: ?[*]const String = null, + names: [*]const String = &[0]String{}, metadata: ?[*]const Metadata = null, extra: []const u32 = &.{}, @@ -1006,6 +1332,18 @@ pub const Function = struct { return &builder.functions.items[@intFromEnum(self)]; } + pub fn typeOf(self: Index, builder: *const Builder) Type { + return self.ptrConst(builder).global.typeOf(builder); + } + + pub fn toConst(self: Index, builder: *const Builder) Constant { + return self.ptrConst(builder).global.toConst(); + } + + pub fn toValue(self: Index, builder: *const Builder) Value { + return self.toConst(builder).toValue(); + } + pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value { return self.ptrConst(builder).global.toLlvm(builder); } @@ -1021,82 +1359,843 @@ pub const Function = struct { tag: Tag, data: u32, - pub const Tag = enum { + pub const Tag = enum(u8) { + add, + @"add nsw", + @"add nuw", + @"add nuw nsw", + addrspacecast, + alloca, + @"alloca inalloca", + @"and", arg, + ashr, + @"ashr exact", + bitcast, block, - @"ret void", + br, + br_cond, + extractelement, + extractvalue, + fadd, + @"fadd fast", + @"fcmp false", + @"fcmp fast false", + @"fcmp fast oeq", + @"fcmp fast oge", + @"fcmp fast ogt", + @"fcmp fast ole", + @"fcmp fast olt", + @"fcmp fast one", + @"fcmp fast ord", + @"fcmp fast true", + @"fcmp fast ueq", + @"fcmp fast uge", + @"fcmp fast ugt", + @"fcmp fast ule", + @"fcmp fast ult", + @"fcmp fast une", + @"fcmp fast uno", + @"fcmp oeq", + @"fcmp oge", + @"fcmp ogt", + @"fcmp ole", + @"fcmp olt", + @"fcmp one", + @"fcmp ord", + @"fcmp true", + @"fcmp ueq", + @"fcmp uge", + @"fcmp ugt", + @"fcmp ule", + @"fcmp ult", + @"fcmp une", + @"fcmp uno", + fdiv, + @"fdiv fast", + fence, + fmul, + @"fmul fast", + fneg, + @"fneg fast", + fpext, + fptosi, + fptoui, + fptrunc, + frem, + @"frem fast", + fsub, + @"fsub fast", + getelementptr, + @"getelementptr inbounds", + @"icmp eq", + @"icmp ne", + @"icmp sge", + @"icmp sgt", + @"icmp sle", + @"icmp slt", + @"icmp uge", + @"icmp ugt", + @"icmp ule", + @"icmp ult", + insertelement, + insertvalue, + inttoptr, + @"llvm.maxnum.", + @"llvm.minnum.", + @"llvm.sadd.sat.", + @"llvm.smax.", + @"llvm.smin.", + @"llvm.smul.fix.sat.", + @"llvm.sshl.sat.", + @"llvm.ssub.sat.", + @"llvm.uadd.sat.", + @"llvm.umax.", + @"llvm.umin.", + @"llvm.umul.fix.sat.", + @"llvm.ushl.sat.", + @"llvm.usub.sat.", + load, + @"load atomic", + @"load atomic volatile", + @"load volatile", + lshr, + @"lshr exact", + mul, + @"mul nsw", + @"mul nuw", + @"mul nuw nsw", + @"or", + phi, + @"phi fast", + ptrtoint, ret, + @"ret void", + sdiv, + @"sdiv exact", + select, + @"select fast", + sext, + shl, + @"shl nsw", + @"shl nuw", + @"shl nuw nsw", + shufflevector, + sitofp, + srem, + store, + @"store atomic", + @"store atomic volatile", + @"store volatile", + sub, + @"sub nsw", + @"sub nuw", + @"sub nuw nsw", + @"switch", + trunc, + udiv, + @"udiv exact", + urem, + uitofp, + unimplemented, + @"unreachable", + va_arg, + xor, + zext, }; pub const Index = enum(u32) { + none = std.math.maxInt(u31), _, pub fn name(self: Instruction.Index, function: *const Function) String { - return if (function.names) |names| - names[@intFromEnum(self)] - else - @enumFromInt(@intFromEnum(self)); + return function.names[@intFromEnum(self)]; } - }; - }; - - pub fn deinit(self: *Function, gpa: Allocator) void { - gpa.free(self.extra); - if (self.metadata) |metadata| gpa.free(metadata[0..self.instructions.len]); - if (self.names) |names| gpa.free(names[0..self.instructions.len]); - self.instructions.deinit(gpa); - self.* = undefined; - } -}; - -pub const WipFunction = struct { - builder: *Builder, - function: Function.Index, - llvm: if (build_options.have_llvm) struct { - builder: *llvm.Builder, - blocks: std.ArrayListUnmanaged(*llvm.BasicBlock), - instructions: std.ArrayListUnmanaged(*llvm.Value), - } else void, - cursor: Cursor, - blocks: std.ArrayListUnmanaged(Block), - instructions: std.MultiArrayList(Instruction), - names: std.ArrayListUnmanaged(String), - metadata: std.ArrayListUnmanaged(Metadata), - extra: std.ArrayListUnmanaged(u32), - - pub const Cursor = struct { block: Block.Index, instruction: u32 = 0 }; - pub const Block = struct { - name: String, - incoming: u32, - instructions: std.ArrayListUnmanaged(Instruction.Index), + pub fn toValue(self: Instruction.Index) Value { + return @enumFromInt(@intFromEnum(self)); + } - const Index = enum(u32) { - entry, - _, + pub fn isTerminatorWip(self: Instruction.Index, wip: *const WipFunction) bool { + return switch (wip.instructions.items(.tag)[@intFromEnum(self)]) { + .br, + .br_cond, + .ret, + .@"ret void", + .@"unreachable", + => true, + else => false, + }; + } - pub fn toLlvm(self: Index, wip: *const WipFunction) *llvm.BasicBlock { - assert(wip.builder.useLibLlvm()); - return wip.llvm.blocks.items[@intFromEnum(self)]; + pub fn hasResultWip(self: Instruction.Index, wip: *const WipFunction) bool { + return switch (wip.instructions.items(.tag)[@intFromEnum(self)]) { + .br, + .br_cond, + .fence, + .ret, + .@"ret void", + .store, + .@"store atomic", + .@"store atomic volatile", + .@"store volatile", + .@"unreachable", + => false, + else => true, + }; } - }; - }; - pub const Instruction = Function.Instruction; + pub fn typeOfWip(self: Instruction.Index, wip: *const WipFunction) Type { + const instruction = wip.instructions.get(@intFromEnum(self)); + return switch (instruction.tag) { + .add, + .@"add nsw", + .@"add nuw", + .@"add nuw nsw", + .@"and", + .ashr, + .@"ashr exact", + .fadd, + .@"fadd fast", + .fdiv, + .@"fdiv fast", + .fmul, + .@"fmul fast", + .frem, + .@"frem fast", + .fsub, + .@"fsub fast", + .@"llvm.maxnum.", + .@"llvm.minnum.", + .@"llvm.sadd.sat.", + .@"llvm.smax.", + .@"llvm.smin.", + .@"llvm.smul.fix.sat.", + .@"llvm.sshl.sat.", + .@"llvm.ssub.sat.", + .@"llvm.uadd.sat.", + .@"llvm.umax.", + .@"llvm.umin.", + .@"llvm.umul.fix.sat.", + .@"llvm.ushl.sat.", + .@"llvm.usub.sat.", + .lshr, + .@"lshr exact", + .mul, + .@"mul nsw", + .@"mul nuw", + .@"mul nuw nsw", + .@"or", + .sdiv, + .@"sdiv exact", + .shl, + .@"shl nsw", + .@"shl nuw", + .@"shl nuw nsw", + .srem, + .sub, + .@"sub nsw", + .@"sub nuw", + .@"sub nuw nsw", + .udiv, + .@"udiv exact", + .urem, + .xor, + => wip.extraData(Binary, instruction.data).lhs.typeOfWip(wip), + .addrspacecast, + .bitcast, + .fpext, + .fptosi, + .fptoui, + .fptrunc, + .inttoptr, + .ptrtoint, + .sext, + .sitofp, + .trunc, + .uitofp, + .zext, + => wip.extraData(Cast, instruction.data).type, + .alloca, + .@"alloca inalloca", + => wip.builder.ptrTypeAssumeCapacity( + wip.extraData(Alloca, instruction.data).info.addr_space, + ), + .arg => wip.function.typeOf(wip.builder) + .functionParameters(wip.builder)[instruction.data], + .block => .label, + .br, + .br_cond, + .fence, + .ret, + .@"ret void", + .store, + .@"store atomic", + .@"store atomic volatile", + .@"store volatile", + .@"switch", + .@"unreachable", + => .none, + .extractelement => wip.extraData(ExtractElement, instruction.data) + .val.typeOfWip(wip).childType(wip.builder), + .extractvalue => { + const extra = wip.extraDataTrail(ExtractValue, instruction.data); + const indices: []const u32 = + wip.extra.items[extra.end..][0..extra.data.indices_len]; + return extra.data.val.typeOfWip(wip).childTypeAt(indices, wip.builder); + }, + .@"fcmp false", + .@"fcmp fast false", + .@"fcmp fast oeq", + .@"fcmp fast oge", + .@"fcmp fast ogt", + .@"fcmp fast ole", + .@"fcmp fast olt", + .@"fcmp fast one", + .@"fcmp fast ord", + .@"fcmp fast true", + .@"fcmp fast ueq", + .@"fcmp fast uge", + .@"fcmp fast ugt", + .@"fcmp fast ule", + .@"fcmp fast ult", + .@"fcmp fast une", + .@"fcmp fast uno", + .@"fcmp oeq", + .@"fcmp oge", + .@"fcmp ogt", + .@"fcmp ole", + .@"fcmp olt", + .@"fcmp one", + .@"fcmp ord", + .@"fcmp true", + .@"fcmp ueq", + .@"fcmp uge", + .@"fcmp ugt", + .@"fcmp ule", + .@"fcmp ult", + .@"fcmp une", + .@"fcmp uno", + .@"icmp eq", + .@"icmp ne", + .@"icmp sge", + .@"icmp sgt", + .@"icmp sle", + .@"icmp slt", + .@"icmp uge", + .@"icmp ugt", + .@"icmp ule", + .@"icmp ult", + => wip.extraData(Binary, instruction.data).lhs.typeOfWip(wip) + .changeScalarAssumeCapacity(.i1, wip.builder), + .fneg, + .@"fneg fast", + => @as(Value, @enumFromInt(instruction.data)).typeOfWip(wip), + .getelementptr, + .@"getelementptr inbounds", + => { + const extra = wip.extraDataTrail(GetElementPtr, instruction.data); + const indices: []const Value = + @ptrCast(wip.extra.items[extra.end..][0..extra.data.indices_len]); + const base_ty = extra.data.base.typeOfWip(wip); + if (!base_ty.isVector(wip.builder)) for (indices) |index| { + const index_ty = index.typeOfWip(wip); + if (!index_ty.isVector(wip.builder)) continue; + return index_ty.changeScalarAssumeCapacity(base_ty, wip.builder); + }; + return base_ty; + }, + .insertelement => wip.extraData(InsertElement, instruction.data).val.typeOfWip(wip), + .insertvalue => wip.extraData(InsertValue, instruction.data).val.typeOfWip(wip), + .load, + .@"load atomic", + .@"load atomic volatile", + .@"load volatile", + => wip.extraData(Load, instruction.data).type, + .phi, + .@"phi fast", + => wip.extraData(WipPhi, instruction.data).type, + .select, + .@"select fast", + => wip.extraData(Select, instruction.data).lhs.typeOfWip(wip), + .shufflevector => { + const extra = wip.extraData(ShuffleVector, instruction.data); + return extra.lhs.typeOfWip(wip).changeLengthAssumeCapacity( + extra.mask.typeOfWip(wip).vectorLen(wip.builder), + wip.builder, + ); + }, + .unimplemented => @enumFromInt(instruction.data), + .va_arg => wip.extraData(VaArg, instruction.data).type, + }; + } - pub fn init(builder: *Builder, function: Function.Index) WipFunction { - if (builder.useLibLlvm()) { - const llvm_function = function.toLlvm(builder); - while (llvm_function.getFirstBasicBlock()) |bb| bb.deleteBasicBlock(); - } - return .{ - .builder = builder, - .function = function, - .llvm = if (builder.useLibLlvm()) .{ - .builder = builder.llvm.context.createBuilder(), - .blocks = .{}, - .instructions = .{}, - } else undefined, + pub fn typeOf( + self: Instruction.Index, + function_index: Function.Index, + builder: *Builder, + ) Type { + const function = function_index.ptrConst(builder); + const instruction = function.instructions.get(@intFromEnum(self)); + return switch (instruction.tag) { + .add, + .@"add nsw", + .@"add nuw", + .@"add nuw nsw", + .@"and", + .ashr, + .@"ashr exact", + .fadd, + .@"fadd fast", + .fdiv, + .@"fdiv fast", + .fmul, + .@"fmul fast", + .frem, + .@"frem fast", + .fsub, + .@"fsub fast", + .@"llvm.maxnum.", + .@"llvm.minnum.", + .@"llvm.sadd.sat.", + .@"llvm.smax.", + .@"llvm.smin.", + .@"llvm.smul.fix.sat.", + .@"llvm.sshl.sat.", + .@"llvm.ssub.sat.", + .@"llvm.uadd.sat.", + .@"llvm.umax.", + .@"llvm.umin.", + .@"llvm.umul.fix.sat.", + .@"llvm.ushl.sat.", + .@"llvm.usub.sat.", + .lshr, + .@"lshr exact", + .mul, + .@"mul nsw", + .@"mul nuw", + .@"mul nuw nsw", + .@"or", + .sdiv, + .@"sdiv exact", + .shl, + .@"shl nsw", + .@"shl nuw", + .@"shl nuw nsw", + .srem, + .sub, + .@"sub nsw", + .@"sub nuw", + .@"sub nuw nsw", + .udiv, + .@"udiv exact", + .urem, + .xor, + => function.extraData(Binary, instruction.data).lhs.typeOf(function_index, builder), + .addrspacecast, + .bitcast, + .fpext, + .fptosi, + .fptoui, + .fptrunc, + .inttoptr, + .ptrtoint, + .sext, + .sitofp, + .trunc, + .uitofp, + .zext, + => function.extraData(Cast, instruction.data).type, + .alloca, + .@"alloca inalloca", + => builder.ptrTypeAssumeCapacity( + function.extraData(Alloca, instruction.data).info.addr_space, + ), + .arg => function.global.typeOf(builder) + .functionParameters(builder)[instruction.data], + .block => .label, + .br, + .br_cond, + .fence, + .ret, + .@"ret void", + .store, + .@"store atomic", + .@"store atomic volatile", + .@"store volatile", + .@"switch", + .@"unreachable", + => .none, + .extractelement => function.extraData(ExtractElement, instruction.data) + .val.typeOf(function_index, builder).childType(builder), + .extractvalue => { + const extra = function.extraDataTrail(ExtractValue, instruction.data); + const indices: []const u32 = + function.extra[extra.end..][0..extra.data.indices_len]; + return extra.data.val.typeOf(function_index, builder) + .childTypeAt(indices, builder); + }, + .@"fcmp false", + .@"fcmp fast false", + .@"fcmp fast oeq", + .@"fcmp fast oge", + .@"fcmp fast ogt", + .@"fcmp fast ole", + .@"fcmp fast olt", + .@"fcmp fast one", + .@"fcmp fast ord", + .@"fcmp fast true", + .@"fcmp fast ueq", + .@"fcmp fast uge", + .@"fcmp fast ugt", + .@"fcmp fast ule", + .@"fcmp fast ult", + .@"fcmp fast une", + .@"fcmp fast uno", + .@"fcmp oeq", + .@"fcmp oge", + .@"fcmp ogt", + .@"fcmp ole", + .@"fcmp olt", + .@"fcmp one", + .@"fcmp ord", + .@"fcmp true", + .@"fcmp ueq", + .@"fcmp uge", + .@"fcmp ugt", + .@"fcmp ule", + .@"fcmp ult", + .@"fcmp une", + .@"fcmp uno", + .@"icmp eq", + .@"icmp ne", + .@"icmp sge", + .@"icmp sgt", + .@"icmp sle", + .@"icmp slt", + .@"icmp uge", + .@"icmp ugt", + .@"icmp ule", + .@"icmp ult", + => function.extraData(Binary, instruction.data).lhs.typeOf(function_index, builder) + .changeScalarAssumeCapacity(.i1, builder), + .fneg, + .@"fneg fast", + => @as(Value, @enumFromInt(instruction.data)).typeOf(function_index, builder), + .getelementptr, + .@"getelementptr inbounds", + => { + const extra = function.extraDataTrail(GetElementPtr, instruction.data); + const indices: []const Value = + @ptrCast(function.extra[extra.end..][0..extra.data.indices_len]); + const base_ty = extra.data.base.typeOf(function_index, builder); + if (!base_ty.isVector(builder)) for (indices) |index| { + const index_ty = index.typeOf(function_index, builder); + if (!index_ty.isVector(builder)) continue; + return index_ty.changeScalarAssumeCapacity(base_ty, builder); + }; + return base_ty; + }, + .insertelement => function.extraData(InsertElement, instruction.data) + .val.typeOf(function_index, builder), + .insertvalue => function.extraData(InsertValue, instruction.data) + .val.typeOf(function_index, builder), + .load, + .@"load atomic", + .@"load atomic volatile", + .@"load volatile", + => function.extraData(Load, instruction.data).type, + .phi, + .@"phi fast", + => { + const extra = function.extraDataTrail(Phi, instruction.data); + const incoming_vals: []const Value = + @ptrCast(function.extra[extra.end..][0..extra.data.incoming_len]); + return incoming_vals[0].typeOf(function_index, builder); + }, + .select, + .@"select fast", + => function.extraData(Select, instruction.data).lhs.typeOf(function_index, builder), + .shufflevector => { + const extra = function.extraData(ShuffleVector, instruction.data); + return extra.lhs.typeOf(function_index, builder).changeLengthAssumeCapacity( + extra.mask.typeOf(function_index, builder).vectorLen(builder), + builder, + ); + }, + .unimplemented => @enumFromInt(instruction.data), + .va_arg => function.extraData(VaArg, instruction.data).type, + }; + } + + const FormatData = struct { + instruction: Instruction.Index, + function: Function.Index, + builder: *Builder, + }; + fn format( + data: FormatData, + comptime fmt_str: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + if (comptime std.mem.indexOfNone(u8, fmt_str, ", %")) |_| + @compileError("invalid format string: '" ++ fmt_str ++ "'"); + if (comptime std.mem.indexOfScalar(u8, fmt_str, ',') != null) { + if (data.instruction == .none) return; + try writer.writeByte(','); + } + if (comptime std.mem.indexOfScalar(u8, fmt_str, ' ') != null) { + if (data.instruction == .none) return; + try writer.writeByte(' '); + } + if (comptime std.mem.indexOfScalar(u8, fmt_str, '%') != null) try writer.print( + "{%} ", + .{data.instruction.typeOf(data.function, data.builder).fmt(data.builder)}, + ); + assert(data.instruction != .none); + try writer.print("%{}", .{ + data.instruction.name(data.function.ptrConst(data.builder)).fmt(data.builder), + }); + } + pub fn fmt( + self: Instruction.Index, + function: Function.Index, + builder: *Builder, + ) std.fmt.Formatter(format) { + return .{ .data = .{ .instruction = self, .function = function, .builder = builder } }; + } + + pub fn toLlvm(self: Instruction.Index, wip: *const WipFunction) *llvm.Value { + assert(wip.builder.useLibLlvm()); + return wip.llvm.instructions.items[@intFromEnum(self)]; + } + + fn llvmName(self: Instruction.Index, wip: *const WipFunction) [*:0]const u8 { + return if (wip.builder.strip) + "" + else + wip.names.items[@intFromEnum(self)].toSlice(wip.builder).?; + } + }; + + pub const ExtraIndex = u32; + + pub const BrCond = struct { + cond: Value, + then: Block.Index, + @"else": Block.Index, + }; + + pub const Switch = struct { + val: Value, + default: Block.Index, + cases_len: u32, + //case_vals: [cases_len]Constant, + //case_blocks: [cases_len]Block.Index, + }; + + pub const Binary = struct { + lhs: Value, + rhs: Value, + }; + + pub const ExtractElement = struct { + val: Value, + index: Value, + }; + + pub const InsertElement = struct { + val: Value, + elem: Value, + index: Value, + }; + + pub const ShuffleVector = struct { + lhs: Value, + rhs: Value, + mask: Value, + }; + + pub const ExtractValue = struct { + val: Value, + indices_len: u32, + //indices: [indices_len]u32, + }; + + pub const InsertValue = struct { + val: Value, + elem: Value, + indices_len: u32, + //indices: [indices_len]u32, + }; + + pub const Alloca = struct { + type: Type, + len: Value, + info: Info, + + pub const Kind = enum { normal, inalloca }; + pub const Info = packed struct(u32) { + alignment: Alignment, + addr_space: AddrSpace, + _: u2 = undefined, + }; + }; + + pub const Load = struct { + type: Type, + ptr: Value, + info: MemoryAccessInfo, + }; + + pub const Store = struct { + val: Value, + ptr: Value, + info: MemoryAccessInfo, + }; + + pub const GetElementPtr = struct { + type: Type, + base: Value, + indices_len: u32, + //indices: [indices_len]Value, + + pub const Kind = Constant.GetElementPtr.Kind; + }; + + pub const Cast = struct { + val: Value, + type: Type, + + pub const Signedness = Constant.Cast.Signedness; + }; + + pub const WipPhi = struct { + type: Type, + //incoming_vals: [block.incoming]Value, + //incoming_blocks: [block.incoming]Block.Index, + }; + + pub const Phi = struct { + incoming_len: u32, + //incoming_vals: [incoming_len]Value, + //incoming_blocks: [incoming_len]Block.Index, + }; + + pub const Select = struct { + cond: Value, + lhs: Value, + rhs: Value, + }; + + pub const VaArg = struct { + list: Value, + type: Type, + }; + }; + + pub fn deinit(self: *Function, gpa: Allocator) void { + gpa.free(self.extra); + if (self.metadata) |metadata| gpa.free(metadata[0..self.instructions.len]); + gpa.free(self.names[0..self.instructions.len]); + self.instructions.deinit(gpa); + self.* = undefined; + } + + pub fn arg(self: *const Function, index: u32) Value { + const argument = self.instructions.get(index); + assert(argument.tag == .arg); + assert(argument.data == index); + + const argument_index: Instruction.Index = @enumFromInt(index); + return argument_index.toValue(); + } + + fn extraDataTrail( + self: *const Function, + comptime T: type, + index: Instruction.ExtraIndex, + ) struct { data: T, end: Instruction.ExtraIndex } { + var result: T = undefined; + const fields = @typeInfo(T).Struct.fields; + inline for (fields, self.extra[index..][0..fields.len]) |field, value| + @field(result, field.name) = switch (field.type) { + u32 => value, + Alignment, AtomicOrdering, Block.Index, Type, Value => @enumFromInt(value), + MemoryAccessInfo, Instruction.Alloca.Info => @bitCast(value), + else => @compileError("bad field type: " ++ @typeName(field.type)), + }; + return .{ .data = result, .end = index + @as(Type.Item.ExtraIndex, @intCast(fields.len)) }; + } + + fn extraData(self: *const Function, comptime T: type, index: Instruction.ExtraIndex) T { + return self.extraDataTrail(T, index).data; + } +}; + +pub const WipFunction = struct { + builder: *Builder, + function: Function.Index, + llvm: if (build_options.have_llvm) struct { + builder: *llvm.Builder, + blocks: std.ArrayListUnmanaged(*llvm.BasicBlock), + instructions: std.ArrayListUnmanaged(*llvm.Value), + } else void, + cursor: Cursor, + blocks: std.ArrayListUnmanaged(Block), + instructions: std.MultiArrayList(Instruction), + names: std.ArrayListUnmanaged(String), + metadata: std.ArrayListUnmanaged(Metadata), + extra: std.ArrayListUnmanaged(u32), + + pub const Cursor = struct { block: Block.Index, instruction: u32 = 0 }; + + pub const Block = struct { + name: String, + incoming: u32, + branches: u32 = 0, + instructions: std.ArrayListUnmanaged(Instruction.Index), + + const Index = enum(u32) { + entry, + _, + + pub fn ptr(self: Index, wip: *WipFunction) *Block { + return &wip.blocks.items[@intFromEnum(self)]; + } + + pub fn ptrConst(self: Index, wip: *const WipFunction) *const Block { + return &wip.blocks.items[@intFromEnum(self)]; + } + + pub fn toInst(self: Index, function: *const Function) Instruction.Index { + return function.blocks[@intFromEnum(self)].instruction; + } + + pub fn toLlvm(self: Index, wip: *const WipFunction) *llvm.BasicBlock { + assert(wip.builder.useLibLlvm()); + return wip.llvm.blocks.items[@intFromEnum(self)]; + } + }; + }; + + pub const Instruction = Function.Instruction; + + pub fn init(builder: *Builder, function: Function.Index) Allocator.Error!WipFunction { + if (builder.useLibLlvm()) { + const llvm_function = function.toLlvm(builder); + while (llvm_function.getFirstBasicBlock()) |bb| bb.deleteBasicBlock(); + } + + var self = WipFunction{ + .builder = builder, + .function = function, + .llvm = if (builder.useLibLlvm()) .{ + .builder = builder.llvm.context.createBuilder(), + .blocks = .{}, + .instructions = .{}, + } else undefined, .cursor = undefined, .blocks = .{}, .instructions = .{}, @@ -1104,102 +2203,1447 @@ pub const WipFunction = struct { .metadata = .{}, .extra = .{}, }; + errdefer self.deinit(); + + const params_len = function.typeOf(self.builder).functionParameters(self.builder).len; + try self.ensureUnusedExtraCapacity(params_len, NoExtra, 0); + try self.instructions.ensureUnusedCapacity(self.builder.gpa, params_len); + if (!self.builder.strip) try self.names.ensureUnusedCapacity(self.builder.gpa, params_len); + if (self.builder.useLibLlvm()) + try self.llvm.instructions.ensureUnusedCapacity(self.builder.gpa, params_len); + for (0..params_len) |param_index| { + self.instructions.appendAssumeCapacity(.{ .tag = .arg, .data = @intCast(param_index) }); + if (!self.builder.strip) self.names.appendAssumeCapacity(.empty); // TODO: param names + if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity( + function.toLlvm(self.builder).getParam(@intCast(param_index)), + ); + } + + return self; } - pub fn block(self: *WipFunction, name: []const u8) Allocator.Error!Block.Index { + pub fn arg(self: *const WipFunction, index: u32) Value { + const argument = self.instructions.get(index); + assert(argument.tag == .arg); + assert(argument.data == index); + + const argument_index: Instruction.Index = @enumFromInt(index); + return argument_index.toValue(); + } + + pub fn block(self: *WipFunction, incoming: u32, name: []const u8) Allocator.Error!Block.Index { try self.blocks.ensureUnusedCapacity(self.builder.gpa, 1); if (self.builder.useLibLlvm()) try self.llvm.blocks.ensureUnusedCapacity(self.builder.gpa, 1); - const index: Block.Index = @enumFromInt(self.blocks.items.len); - const final_name = if (self.builder.strip) .empty else try self.builder.string(name); - self.blocks.appendAssumeCapacity(.{ .name = final_name, .incoming = 0, .instructions = .{} }); - if (self.builder.useLibLlvm()) self.llvm.blocks.appendAssumeCapacity( - self.builder.llvm.context.appendBasicBlock( - self.function.toLlvm(self.builder), - final_name.toSlice(self.builder).?, + const index: Block.Index = @enumFromInt(self.blocks.items.len); + const final_name = if (self.builder.strip) .empty else try self.builder.string(name); + self.blocks.appendAssumeCapacity(.{ + .name = final_name, + .incoming = incoming, + .instructions = .{}, + }); + if (self.builder.useLibLlvm()) self.llvm.blocks.appendAssumeCapacity( + self.builder.llvm.context.appendBasicBlock( + self.function.toLlvm(self.builder), + final_name.toSlice(self.builder).?, + ), + ); + return index; + } + + pub fn ret(self: *WipFunction, val: Value) Allocator.Error!Instruction.Index { + assert(val.typeOfWip(self) == self.function.typeOf(self.builder).functionReturn(self.builder)); + try self.ensureUnusedExtraCapacity(1, NoExtra, 0); + const instruction = try self.addInst(null, .{ .tag = .ret, .data = @intFromEnum(val) }); + if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity( + self.llvm.builder.buildRet(val.toLlvm(self)), + ); + return instruction; + } + + pub fn retVoid(self: *WipFunction) Allocator.Error!Instruction.Index { + try self.ensureUnusedExtraCapacity(1, NoExtra, 0); + const instruction = try self.addInst(null, .{ .tag = .@"ret void", .data = undefined }); + if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity( + self.llvm.builder.buildRetVoid(), + ); + return instruction; + } + + pub fn br(self: *WipFunction, dest: Block.Index) Allocator.Error!Instruction.Index { + try self.ensureUnusedExtraCapacity(1, NoExtra, 0); + const instruction = try self.addInst(null, .{ .tag = .br, .data = @intFromEnum(dest) }); + dest.ptr(self).branches += 1; + if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity( + self.llvm.builder.buildBr(dest.toLlvm(self)), + ); + return instruction; + } + + pub fn brCond( + self: *WipFunction, + cond: Value, + then: Block.Index, + @"else": Block.Index, + ) Allocator.Error!Instruction.Index { + assert(cond.typeOfWip(self) == .i1); + try self.ensureUnusedExtraCapacity(1, Instruction.BrCond, 0); + const instruction = try self.addInst(null, .{ + .tag = .br_cond, + .data = self.addExtraAssumeCapacity(Instruction.BrCond{ + .cond = cond, + .then = then, + .@"else" = @"else", + }), + }); + then.ptr(self).branches += 1; + @"else".ptr(self).branches += 1; + if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity( + self.llvm.builder.buildCondBr(cond.toLlvm(self), then.toLlvm(self), @"else".toLlvm(self)), + ); + return instruction; + } + + pub const WipSwitch = struct { + index: u32, + instruction: Instruction.Index, + + pub fn addCase( + self: *WipSwitch, + val: Constant, + dest: Block.Index, + wip: *WipFunction, + ) Allocator.Error!void { + const instruction = wip.instructions.get(@intFromEnum(self.instruction)); + const extra = wip.extraDataTrail(Instruction.Switch, instruction.data); + const case_vals: []Constant = + @ptrCast(wip.extra.items[extra.end..][0..extra.data.cases_len]); + const case_dests: []Block.Index = + @ptrCast(wip.extra.items[extra.end + extra.data.cases_len ..][0..extra.data.cases_len]); + assert(val.typeOf(wip.builder) == extra.data.val.typeOfWip(wip)); + case_vals[self.index] = val; + case_dests[self.index] = dest; + self.index += 1; + dest.ptr(wip).branches += 1; + if (wip.builder.useLibLlvm()) + self.instruction.toLlvm(wip).addCase(val.toLlvm(wip.builder), dest.toLlvm(wip)); + } + + pub fn finish(self: WipSwitch, wip: *WipFunction) void { + const instruction = wip.instructions.get(@intFromEnum(self.instruction)); + const extra = wip.extraData(Instruction.Switch, instruction.data); + assert(self.index == extra.cases_len); + } + }; + + pub fn @"switch"( + self: *WipFunction, + val: Value, + default: Block.Index, + cases_len: u32, + ) Allocator.Error!WipSwitch { + try self.ensureUnusedExtraCapacity(1, Instruction.Switch, cases_len * 2); + const instruction = try self.addInst(null, .{ + .tag = .@"switch", + .data = self.addExtraAssumeCapacity(Instruction.Switch{ + .val = val, + .default = default, + .cases_len = cases_len, + }), + }); + _ = self.extra.addManyAsSliceAssumeCapacity(cases_len * 2); + default.ptr(self).branches += 1; + if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity( + self.llvm.builder.buildSwitch(val.toLlvm(self), default.toLlvm(self), @intCast(cases_len)), + ); + return .{ .index = 0, .instruction = instruction }; + } + + pub fn @"unreachable"(self: *WipFunction) Allocator.Error!Instruction.Index { + try self.ensureUnusedExtraCapacity(1, NoExtra, 0); + const instruction = try self.addInst(null, .{ .tag = .@"unreachable", .data = undefined }); + if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity( + self.llvm.builder.buildUnreachable(), + ); + return instruction; + } + + pub fn un( + self: *WipFunction, + tag: Instruction.Tag, + val: Value, + name: []const u8, + ) Allocator.Error!Value { + switch (tag) { + .fneg, + .@"fneg fast", + => assert(val.typeOfWip(self).scalarType(self.builder).isFloatingPoint()), + else => unreachable, + } + try self.ensureUnusedExtraCapacity(1, NoExtra, 0); + const instruction = try self.addInst(name, .{ .tag = tag, .data = @intFromEnum(val) }); + if (self.builder.useLibLlvm()) { + switch (tag) { + .fneg => self.llvm.builder.setFastMath(false), + .@"fneg fast" => self.llvm.builder.setFastMath(true), + else => unreachable, + } + self.llvm.instructions.appendAssumeCapacity(switch (tag) { + .fneg, .@"fneg fast" => &llvm.Builder.buildFNeg, + else => unreachable, + }(self.llvm.builder, val.toLlvm(self), instruction.llvmName(self))); + } + return instruction.toValue(); + } + + pub fn not(self: *WipFunction, val: Value, name: []const u8) Allocator.Error!Value { + const ty = val.typeOfWip(self); + const all_ones = try self.builder.splatValue( + ty, + try self.builder.intConst(ty.scalarType(self.builder), -1), + ); + return self.bin(.xor, val, all_ones, name); + } + + pub fn neg(self: *WipFunction, val: Value, name: []const u8) Allocator.Error!Value { + return self.bin(.sub, try self.builder.zeroInitValue(val.typeOfWip(self)), val, name); + } + + pub fn bin( + self: *WipFunction, + tag: Instruction.Tag, + lhs: Value, + rhs: Value, + name: []const u8, + ) Allocator.Error!Value { + switch (tag) { + .add, + .@"add nsw", + .@"add nuw", + .@"and", + .ashr, + .@"ashr exact", + .fadd, + .@"fadd fast", + .fdiv, + .@"fdiv fast", + .fmul, + .@"fmul fast", + .frem, + .@"frem fast", + .fsub, + .@"fsub fast", + .@"llvm.maxnum.", + .@"llvm.minnum.", + .@"llvm.sadd.sat.", + .@"llvm.smax.", + .@"llvm.smin.", + .@"llvm.smul.fix.sat.", + .@"llvm.sshl.sat.", + .@"llvm.ssub.sat.", + .@"llvm.uadd.sat.", + .@"llvm.umax.", + .@"llvm.umin.", + .@"llvm.umul.fix.sat.", + .@"llvm.ushl.sat.", + .@"llvm.usub.sat.", + .lshr, + .@"lshr exact", + .mul, + .@"mul nsw", + .@"mul nuw", + .@"or", + .sdiv, + .@"sdiv exact", + .shl, + .@"shl nsw", + .@"shl nuw", + .srem, + .sub, + .@"sub nsw", + .@"sub nuw", + .udiv, + .@"udiv exact", + .urem, + .xor, + => assert(lhs.typeOfWip(self) == rhs.typeOfWip(self)), + else => unreachable, + } + try self.ensureUnusedExtraCapacity(1, Instruction.Binary, 0); + const instruction = try self.addInst(name, .{ + .tag = tag, + .data = self.addExtraAssumeCapacity(Instruction.Binary{ .lhs = lhs, .rhs = rhs }), + }); + if (self.builder.useLibLlvm()) { + switch (tag) { + .fadd, + .fdiv, + .fmul, + .frem, + .fsub, + => self.llvm.builder.setFastMath(false), + .@"fadd fast", + .@"fdiv fast", + .@"fmul fast", + .@"frem fast", + .@"fsub fast", + => self.llvm.builder.setFastMath(true), + else => {}, + } + self.llvm.instructions.appendAssumeCapacity(switch (tag) { + .add => &llvm.Builder.buildAdd, + .@"add nsw" => &llvm.Builder.buildNSWAdd, + .@"add nuw" => &llvm.Builder.buildNUWAdd, + .@"and" => &llvm.Builder.buildAnd, + .ashr => &llvm.Builder.buildAShr, + .@"ashr exact" => &llvm.Builder.buildAShrExact, + .fadd, .@"fadd fast" => &llvm.Builder.buildFAdd, + .fdiv, .@"fdiv fast" => &llvm.Builder.buildFDiv, + .fmul, .@"fmul fast" => &llvm.Builder.buildFMul, + .frem, .@"frem fast" => &llvm.Builder.buildFRem, + .fsub, .@"fsub fast" => &llvm.Builder.buildFSub, + .@"llvm.maxnum." => &llvm.Builder.buildMaxNum, + .@"llvm.minnum." => &llvm.Builder.buildMinNum, + .@"llvm.sadd.sat." => &llvm.Builder.buildSAddSat, + .@"llvm.smax." => &llvm.Builder.buildSMax, + .@"llvm.smin." => &llvm.Builder.buildSMin, + .@"llvm.smul.fix.sat." => &llvm.Builder.buildSMulFixSat, + .@"llvm.sshl.sat." => &llvm.Builder.buildSShlSat, + .@"llvm.ssub.sat." => &llvm.Builder.buildSSubSat, + .@"llvm.uadd.sat." => &llvm.Builder.buildUAddSat, + .@"llvm.umax." => &llvm.Builder.buildUMax, + .@"llvm.umin." => &llvm.Builder.buildUMin, + .@"llvm.umul.fix.sat." => &llvm.Builder.buildUMulFixSat, + .@"llvm.ushl.sat." => &llvm.Builder.buildUShlSat, + .@"llvm.usub.sat." => &llvm.Builder.buildUSubSat, + .lshr => &llvm.Builder.buildLShr, + .@"lshr exact" => &llvm.Builder.buildLShrExact, + .mul => &llvm.Builder.buildMul, + .@"mul nsw" => &llvm.Builder.buildNSWMul, + .@"mul nuw" => &llvm.Builder.buildNUWMul, + .@"or" => &llvm.Builder.buildOr, + .sdiv => &llvm.Builder.buildSDiv, + .@"sdiv exact" => &llvm.Builder.buildExactSDiv, + .shl => &llvm.Builder.buildShl, + .@"shl nsw" => &llvm.Builder.buildNSWShl, + .@"shl nuw" => &llvm.Builder.buildNUWShl, + .srem => &llvm.Builder.buildSRem, + .sub => &llvm.Builder.buildSub, + .@"sub nsw" => &llvm.Builder.buildNSWSub, + .@"sub nuw" => &llvm.Builder.buildNUWSub, + .udiv => &llvm.Builder.buildUDiv, + .@"udiv exact" => &llvm.Builder.buildExactUDiv, + .urem => &llvm.Builder.buildURem, + .xor => &llvm.Builder.buildXor, + else => unreachable, + }(self.llvm.builder, lhs.toLlvm(self), rhs.toLlvm(self), instruction.llvmName(self))); + } + return instruction.toValue(); + } + + pub fn extractElement( + self: *WipFunction, + val: Value, + index: Value, + name: []const u8, + ) Allocator.Error!Value { + assert(val.typeOfWip(self).isVector(self.builder)); + assert(index.typeOfWip(self).isInteger(self.builder)); + try self.ensureUnusedExtraCapacity(1, Instruction.ExtractElement, 0); + const instruction = try self.addInst(name, .{ + .tag = .extractelement, + .data = self.addExtraAssumeCapacity(Instruction.ExtractElement{ + .val = val, + .index = index, + }), + }); + if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity( + self.llvm.builder.buildExtractElement( + val.toLlvm(self), + index.toLlvm(self), + instruction.llvmName(self), + ), + ); + return instruction.toValue(); + } + + pub fn insertElement( + self: *WipFunction, + val: Value, + elem: Value, + index: Value, + name: []const u8, + ) Allocator.Error!Value { + assert(val.typeOfWip(self).scalarType(self.builder) == elem.typeOfWip(self)); + assert(index.typeOfWip(self).isInteger(self.builder)); + try self.ensureUnusedExtraCapacity(1, Instruction.InsertElement, 0); + const instruction = try self.addInst(name, .{ + .tag = .insertelement, + .data = self.addExtraAssumeCapacity(Instruction.InsertElement{ + .val = val, + .elem = elem, + .index = index, + }), + }); + if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity( + self.llvm.builder.buildInsertElement( + val.toLlvm(self), + elem.toLlvm(self), + index.toLlvm(self), + instruction.llvmName(self), + ), + ); + return instruction.toValue(); + } + + pub fn shuffleVector( + self: *WipFunction, + lhs: Value, + rhs: Value, + mask: Value, + name: []const u8, + ) Allocator.Error!Value { + assert(lhs.typeOfWip(self).isVector(self.builder)); + assert(lhs.typeOfWip(self) == rhs.typeOfWip(self)); + assert(mask.typeOfWip(self).scalarType(self.builder).isInteger(self.builder)); + _ = try self.ensureUnusedExtraCapacity(1, Instruction.ShuffleVector, 0); + const instruction = try self.addInst(name, .{ + .tag = .shufflevector, + .data = self.addExtraAssumeCapacity(Instruction.ShuffleVector{ + .lhs = lhs, + .rhs = rhs, + .mask = mask, + }), + }); + if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity( + self.llvm.builder.buildShuffleVector( + lhs.toLlvm(self), + rhs.toLlvm(self), + mask.toLlvm(self), + instruction.llvmName(self), + ), + ); + return instruction.toValue(); + } + + pub fn splatVector( + self: *WipFunction, + ty: Type, + elem: Value, + name: []const u8, + ) Allocator.Error!Value { + const scalar_ty = try ty.changeLength(1, self.builder); + const mask_ty = try ty.changeScalar(.i32, self.builder); + const zero = try self.builder.intConst(.i32, 0); + const poison = try self.builder.poisonValue(scalar_ty); + const mask = try self.builder.splatValue(mask_ty, zero); + const scalar = try self.insertElement(poison, elem, zero.toValue(), name); + return self.shuffleVector(scalar, poison, mask, name); + } + + pub fn extractValue( + self: *WipFunction, + val: Value, + indices: []const u32, + name: []const u8, + ) Allocator.Error!Value { + assert(indices.len > 0); + _ = val.typeOfWip(self).childTypeAt(indices, self.builder); + try self.ensureUnusedExtraCapacity(1, Instruction.ExtractValue, indices.len); + const instruction = try self.addInst(name, .{ + .tag = .extractvalue, + .data = self.addExtraAssumeCapacity(Instruction.ExtractValue{ + .val = val, + .indices_len = @intCast(indices.len), + }), + }); + self.extra.appendSliceAssumeCapacity(indices); + if (self.builder.useLibLlvm()) { + const llvm_name = instruction.llvmName(self); + var cur = val.toLlvm(self); + for (indices) |index| + cur = self.llvm.builder.buildExtractValue(cur, @intCast(index), llvm_name); + self.llvm.instructions.appendAssumeCapacity(cur); + } + return instruction.toValue(); + } + + pub fn insertValue( + self: *WipFunction, + val: Value, + elem: Value, + indices: []const u32, + name: []const u8, + ) Allocator.Error!Value { + assert(indices.len > 0); + assert(val.typeOfWip(self).childTypeAt(indices, self.builder) == elem.typeOfWip(self)); + try self.ensureUnusedExtraCapacity(1, Instruction.InsertValue, indices.len); + const instruction = try self.addInst(name, .{ + .tag = .insertvalue, + .data = self.addExtraAssumeCapacity(Instruction.InsertValue{ + .val = val, + .elem = elem, + .indices_len = @intCast(indices.len), + }), + }); + self.extra.appendSliceAssumeCapacity(indices); + if (self.builder.useLibLlvm()) { + const ExpectedContents = [expected_gep_indices_len]*llvm.Value; + var stack align(@alignOf(ExpectedContents)) = + std.heap.stackFallback(@sizeOf(ExpectedContents), self.builder.gpa); + const allocator = stack.get(); + + const llvm_name = instruction.llvmName(self); + const llvm_vals = try allocator.alloc(*llvm.Value, indices.len); + defer allocator.free(llvm_vals); + llvm_vals[0] = val.toLlvm(self); + for (llvm_vals[1..], llvm_vals[0 .. llvm_vals.len - 1], indices[0 .. indices.len - 1]) | + *cur_val, + prev_val, + index, + | cur_val.* = self.llvm.builder.buildExtractValue(prev_val, @intCast(index), llvm_name); + + var depth: usize = llvm_vals.len; + var cur = elem.toLlvm(self); + while (depth > 0) { + depth -= 1; + cur = self.llvm.builder.buildInsertValue( + llvm_vals[depth], + cur, + @intCast(indices[depth]), + llvm_name, + ); + } + self.llvm.instructions.appendAssumeCapacity(cur); + } + return instruction.toValue(); + } + + pub fn buildAggregate( + self: *WipFunction, + ty: Type, + elems: []const Value, + name: []const u8, + ) Allocator.Error!Value { + assert(ty.aggregateLen(self.builder) == elems.len); + var cur = try self.builder.poisonValue(ty); + for (elems, 0..) |elem, index| + cur = try self.insertValue(cur, elem, &[_]u32{@intCast(index)}, name); + return cur; + } + + pub fn alloca( + self: *WipFunction, + kind: Instruction.Alloca.Kind, + ty: Type, + len: Value, + alignment: Alignment, + addr_space: AddrSpace, + name: []const u8, + ) Allocator.Error!Value { + assert(len == .none or len.typeOfWip(self).isInteger(self.builder)); + _ = try self.builder.ptrType(addr_space); + try self.ensureUnusedExtraCapacity(1, Instruction.Alloca, 0); + const instruction = try self.addInst(name, .{ + .tag = switch (kind) { + .normal => .alloca, + .inalloca => .@"alloca inalloca", + }, + .data = self.addExtraAssumeCapacity(Instruction.Alloca{ + .type = ty, + .len = len, + .info = .{ .alignment = alignment, .addr_space = addr_space }, + }), + }); + if (self.builder.useLibLlvm()) { + const llvm_instruction = self.llvm.builder.buildAllocaInAddressSpace( + ty.toLlvm(self.builder), + @intFromEnum(addr_space), + instruction.llvmName(self), + ); + if (alignment.toByteUnits()) |a| llvm_instruction.setAlignment(@intCast(a)); + self.llvm.instructions.appendAssumeCapacity(llvm_instruction); + } + return instruction.toValue(); + } + + pub fn load( + self: *WipFunction, + kind: MemoryAccessKind, + ty: Type, + ptr: Value, + alignment: Alignment, + name: []const u8, + ) Allocator.Error!Value { + return self.loadAtomic(kind, ty, ptr, .system, .none, alignment, name); + } + + pub fn loadAtomic( + self: *WipFunction, + kind: MemoryAccessKind, + ty: Type, + ptr: Value, + scope: SyncScope, + ordering: AtomicOrdering, + alignment: Alignment, + name: []const u8, + ) Allocator.Error!Value { + assert(ptr.typeOfWip(self).isPointer(self.builder)); + const final_scope = switch (ordering) { + .none => .system, + else => scope, + }; + try self.ensureUnusedExtraCapacity(1, Instruction.Load, 0); + const instruction = try self.addInst(name, .{ + .tag = switch (ordering) { + .none => switch (kind) { + .normal => .load, + .@"volatile" => .@"load volatile", + }, + else => switch (kind) { + .normal => .@"load atomic", + .@"volatile" => .@"load atomic volatile", + }, + }, + .data = self.addExtraAssumeCapacity(Instruction.Load{ + .type = ty, + .ptr = ptr, + .info = .{ .scope = final_scope, .ordering = ordering, .alignment = alignment }, + }), + }); + if (self.builder.useLibLlvm()) { + const llvm_instruction = self.llvm.builder.buildLoad( + ty.toLlvm(self.builder), + ptr.toLlvm(self), + instruction.llvmName(self), + ); + if (final_scope == .singlethread) llvm_instruction.setAtomicSingleThread(.True); + if (ordering != .none) llvm_instruction.setOrdering(@enumFromInt(@intFromEnum(ordering))); + if (alignment.toByteUnits()) |a| llvm_instruction.setAlignment(@intCast(a)); + self.llvm.instructions.appendAssumeCapacity(llvm_instruction); + } + return instruction.toValue(); + } + + pub fn store( + self: *WipFunction, + kind: MemoryAccessKind, + val: Value, + ptr: Value, + alignment: Alignment, + ) Allocator.Error!Instruction.Index { + return self.storeAtomic(kind, val, ptr, .system, .none, alignment); + } + + pub fn storeAtomic( + self: *WipFunction, + kind: MemoryAccessKind, + val: Value, + ptr: Value, + scope: SyncScope, + ordering: AtomicOrdering, + alignment: Alignment, + ) Allocator.Error!Instruction.Index { + assert(ptr.typeOfWip(self).isPointer(self.builder)); + const final_scope = switch (ordering) { + .none => .system, + else => scope, + }; + try self.ensureUnusedExtraCapacity(1, Instruction.Store, 0); + const instruction = try self.addInst(null, .{ + .tag = switch (ordering) { + .none => switch (kind) { + .normal => .store, + .@"volatile" => .@"store volatile", + }, + else => switch (kind) { + .normal => .@"store atomic", + .@"volatile" => .@"store atomic volatile", + }, + }, + .data = self.addExtraAssumeCapacity(Instruction.Store{ + .val = val, + .ptr = ptr, + .info = .{ .scope = final_scope, .ordering = ordering, .alignment = alignment }, + }), + }); + if (self.builder.useLibLlvm()) { + const llvm_instruction = self.llvm.builder.buildStore(val.toLlvm(self), ptr.toLlvm(self)); + switch (kind) { + .normal => {}, + .@"volatile" => llvm_instruction.setVolatile(.True), + } + if (final_scope == .singlethread) llvm_instruction.setAtomicSingleThread(.True); + if (ordering != .none) llvm_instruction.setOrdering(@enumFromInt(@intFromEnum(ordering))); + if (alignment.toByteUnits()) |a| llvm_instruction.setAlignment(@intCast(a)); + self.llvm.instructions.appendAssumeCapacity(llvm_instruction); + } + return instruction; + } + + pub fn fence( + self: *WipFunction, + scope: SyncScope, + ordering: AtomicOrdering, + ) Allocator.Error!Instruction.Index { + assert(ordering != .none); + try self.ensureUnusedExtraCapacity(1, NoExtra, 0); + const instruction = try self.addInst(null, .{ + .tag = .fence, + .data = @bitCast(MemoryAccessInfo{ + .scope = scope, + .ordering = ordering, + .alignment = undefined, + }), + }); + if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity( + self.llvm.builder.buildFence( + @enumFromInt(@intFromEnum(ordering)), + llvm.Bool.fromBool(scope == .singlethread), + "", + ), + ); + return instruction; + } + + pub fn gep( + self: *WipFunction, + kind: Instruction.GetElementPtr.Kind, + ty: Type, + base: Value, + indices: []const Value, + name: []const u8, + ) Allocator.Error!Value { + const base_ty = base.typeOfWip(self); + const base_is_vector = base_ty.isVector(self.builder); + + const VectorInfo = struct { + kind: Type.Vector.Kind, + len: u32, + + fn init(vector_ty: Type, builder: *const Builder) @This() { + return .{ .kind = vector_ty.vectorKind(builder), .len = vector_ty.vectorLen(builder) }; + } + }; + var vector_info: ?VectorInfo = + if (base_is_vector) VectorInfo.init(base_ty, self.builder) else null; + for (indices) |index| { + const index_ty = index.typeOfWip(self); + switch (index_ty.tag(self.builder)) { + .integer => {}, + .vector, .scalable_vector => { + const index_info = VectorInfo.init(index_ty, self.builder); + if (vector_info) |info| + assert(std.meta.eql(info, index_info)) + else + vector_info = index_info; + }, + else => unreachable, + } + } + if (!base_is_vector) if (vector_info) |info| switch (info.kind) { + inline else => |vector_kind| _ = try self.builder.vectorType( + vector_kind, + info.len, + base_ty, + ), + }; + + try self.ensureUnusedExtraCapacity(1, Instruction.GetElementPtr, indices.len); + const instruction = try self.addInst(name, .{ + .tag = switch (kind) { + .normal => .getelementptr, + .inbounds => .@"getelementptr inbounds", + }, + .data = self.addExtraAssumeCapacity(Instruction.GetElementPtr{ + .type = ty, + .base = base, + .indices_len = @intCast(indices.len), + }), + }); + self.extra.appendSliceAssumeCapacity(@ptrCast(indices)); + if (self.builder.useLibLlvm()) { + const ExpectedContents = [expected_gep_indices_len]*llvm.Value; + var stack align(@alignOf(ExpectedContents)) = + std.heap.stackFallback(@sizeOf(ExpectedContents), self.builder.gpa); + const allocator = stack.get(); + + const llvm_indices = try allocator.alloc(*llvm.Value, indices.len); + defer allocator.free(llvm_indices); + for (llvm_indices, indices) |*llvm_index, index| llvm_index.* = index.toLlvm(self); + + self.llvm.instructions.appendAssumeCapacity(switch (kind) { + .normal => &llvm.Builder.buildGEP, + .inbounds => &llvm.Builder.buildInBoundsGEP, + }( + self.llvm.builder, + ty.toLlvm(self.builder), + base.toLlvm(self), + llvm_indices.ptr, + @intCast(llvm_indices.len), + instruction.llvmName(self), + )); + } + return instruction.toValue(); + } + + pub fn gepStruct( + self: *WipFunction, + ty: Type, + base: Value, + index: usize, + name: []const u8, + ) Allocator.Error!Value { + assert(ty.isStruct(self.builder)); + return self.gep(.inbounds, ty, base, &.{ + try self.builder.intValue(.i32, 0), try self.builder.intValue(.i32, index), + }, name); + } + + pub fn conv( + self: *WipFunction, + signedness: Instruction.Cast.Signedness, + val: Value, + ty: Type, + name: []const u8, + ) Allocator.Error!Value { + const val_ty = val.typeOfWip(self); + if (val_ty == ty) return val; + return self.cast(self.builder.convTag(Instruction.Tag, signedness, val_ty, ty), val, ty, name); + } + + pub fn cast( + self: *WipFunction, + tag: Instruction.Tag, + val: Value, + ty: Type, + name: []const u8, + ) Allocator.Error!Value { + switch (tag) { + .addrspacecast, + .bitcast, + .fpext, + .fptosi, + .fptoui, + .fptrunc, + .inttoptr, + .ptrtoint, + .sext, + .sitofp, + .trunc, + .uitofp, + .zext, + => {}, + else => unreachable, + } + if (val.typeOfWip(self) == ty) return val; + try self.ensureUnusedExtraCapacity(1, Instruction.Cast, 0); + const instruction = try self.addInst(name, .{ + .tag = tag, + .data = self.addExtraAssumeCapacity(Instruction.Cast{ + .val = val, + .type = ty, + }), + }); + if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(switch (tag) { + .addrspacecast => &llvm.Builder.buildAddrSpaceCast, + .bitcast => &llvm.Builder.buildBitCast, + .fpext => &llvm.Builder.buildFPExt, + .fptosi => &llvm.Builder.buildFPToSI, + .fptoui => &llvm.Builder.buildFPToUI, + .fptrunc => &llvm.Builder.buildFPTrunc, + .inttoptr => &llvm.Builder.buildIntToPtr, + .ptrtoint => &llvm.Builder.buildPtrToInt, + .sext => &llvm.Builder.buildSExt, + .sitofp => &llvm.Builder.buildSIToFP, + .trunc => &llvm.Builder.buildTrunc, + .uitofp => &llvm.Builder.buildUIToFP, + .zext => &llvm.Builder.buildZExt, + else => unreachable, + }(self.llvm.builder, val.toLlvm(self), ty.toLlvm(self.builder), instruction.llvmName(self))); + return instruction.toValue(); + } + + pub fn icmp( + self: *WipFunction, + cond: IntegerCondition, + lhs: Value, + rhs: Value, + name: []const u8, + ) Allocator.Error!Value { + return self.cmpTag(switch (cond) { + inline else => |tag| @field(Instruction.Tag, "icmp " ++ @tagName(tag)), + }, @intFromEnum(cond), lhs, rhs, name); + } + + pub fn fcmp( + self: *WipFunction, + cond: FloatCondition, + lhs: Value, + rhs: Value, + name: []const u8, + ) Allocator.Error!Value { + return self.cmpTag(switch (cond) { + inline else => |tag| @field(Instruction.Tag, "fcmp " ++ @tagName(tag)), + }, @intFromEnum(cond), lhs, rhs, name); + } + + pub fn fcmpFast( + self: *WipFunction, + cond: FloatCondition, + lhs: Value, + rhs: Value, + name: []const u8, + ) Allocator.Error!Value { + return self.cmpTag(switch (cond) { + inline else => |tag| @field(Instruction.Tag, "fcmp fast " ++ @tagName(tag)), + }, @intFromEnum(cond), lhs, rhs, name); + } + + pub const WipPhi = struct { + block: Block.Index, + instruction: Instruction.Index, + + pub fn toValue(self: WipPhi) Value { + return self.instruction.toValue(); + } + + pub fn finish( + self: WipPhi, + vals: []const Value, + blocks: []const Block.Index, + wip: *WipFunction, + ) if (build_options.have_llvm) Allocator.Error!void else void { + const incoming_len = self.block.ptrConst(wip).incoming; + assert(vals.len == incoming_len and blocks.len == incoming_len); + const instruction = wip.instructions.get(@intFromEnum(self.instruction)); + const extra = wip.extraDataTrail(Instruction.WipPhi, instruction.data); + for (vals) |val| assert(val.typeOfWip(wip) == extra.data.type); + const incoming_vals: []Value = @ptrCast(wip.extra.items[extra.end..][0..incoming_len]); + const incoming_blocks: []Block.Index = + @ptrCast(wip.extra.items[extra.end + incoming_len ..][0..incoming_len]); + @memcpy(incoming_vals, vals); + @memcpy(incoming_blocks, blocks); + if (wip.builder.useLibLlvm()) { + const ExpectedContents = extern struct { + [expected_incoming_len]*llvm.Value, + [expected_incoming_len]*llvm.BasicBlock, + }; + var stack align(@alignOf(ExpectedContents)) = + std.heap.stackFallback(@sizeOf(ExpectedContents), wip.builder.gpa); + const allocator = stack.get(); + + const llvm_vals = try allocator.alloc(*llvm.Value, incoming_len); + defer allocator.free(llvm_vals); + const llvm_blocks = try allocator.alloc(*llvm.BasicBlock, incoming_len); + defer allocator.free(llvm_blocks); + + for (llvm_vals, vals) |*llvm_val, incoming_val| llvm_val.* = incoming_val.toLlvm(wip); + for (llvm_blocks, blocks) |*llvm_block, incoming_block| + llvm_block.* = incoming_block.toLlvm(wip); + self.instruction.toLlvm(wip) + .addIncoming(llvm_vals.ptr, llvm_blocks.ptr, @intCast(incoming_len)); + } + } + }; + + pub fn phi(self: *WipFunction, ty: Type, name: []const u8) Allocator.Error!WipPhi { + return self.phiTag(.phi, ty, name); + } + + pub fn phiFast(self: *WipFunction, ty: Type, name: []const u8) Allocator.Error!WipPhi { + return self.phiTag(.@"phi fast", ty, name); + } + + pub fn select( + self: *WipFunction, + cond: Value, + lhs: Value, + rhs: Value, + name: []const u8, + ) Allocator.Error!Value { + return self.selectTag(.select, cond, lhs, rhs, name); + } + + pub fn selectFast( + self: *WipFunction, + cond: Value, + lhs: Value, + rhs: Value, + name: []const u8, + ) Allocator.Error!Value { + return self.selectTag(.@"select fast", cond, lhs, rhs, name); + } + + pub fn vaArg(self: *WipFunction, list: Value, ty: Type, name: []const u8) Allocator.Error!Value { + try self.ensureUnusedExtraCapacity(1, Instruction.VaArg, 0); + const instruction = try self.addInst(name, .{ + .tag = .va_arg, + .data = self.addExtraAssumeCapacity(Instruction.VaArg{ + .list = list, + .type = ty, + }), + }); + if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity( + self.llvm.builder.buildVAArg( + list.toLlvm(self), + ty.toLlvm(self.builder), + instruction.llvmName(self), ), ); - return index; + return instruction.toValue(); } - pub fn retVoid(self: *WipFunction) Allocator.Error!void { - _ = try self.addInst(.{ .tag = .@"ret void", .data = undefined }, .none); - if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity( - self.llvm.builder.buildRetVoid(), - ); + pub const WipUnimplemented = struct { + instruction: Instruction.Index, + + pub fn finish(self: WipUnimplemented, val: *llvm.Value, wip: *WipFunction) Value { + assert(wip.builder.useLibLlvm()); + wip.llvm.instructions.items[@intFromEnum(self.instruction)] = val; + return self.instruction.toValue(); + } + }; + + pub fn unimplemented( + self: *WipFunction, + ty: Type, + name: []const u8, + ) Allocator.Error!WipUnimplemented { + try self.ensureUnusedExtraCapacity(1, NoExtra, 0); + const instruction = try self.addInst(name, .{ + .tag = .unimplemented, + .data = @intFromEnum(ty), + }); + if (self.builder.useLibLlvm()) _ = self.llvm.instructions.addOneAssumeCapacity(); + return .{ .instruction = instruction }; } pub fn finish(self: *WipFunction) Allocator.Error!void { const gpa = self.builder.gpa; const function = self.function.ptr(self.builder); + const params_len = self.function.typeOf(self.builder).functionParameters(self.builder).len; const final_instructions_len = self.blocks.items.len + self.instructions.len; const blocks = try gpa.alloc(Function.Block, self.blocks.items.len); errdefer gpa.free(blocks); - const instructions = try gpa.alloc(Instruction.Index, self.instructions.len); - defer gpa.free(instructions); + const instructions: struct { + items: []Instruction.Index, + + fn map(instructions: @This(), val: Value) Value { + if (val == .none) return .none; + return switch (val.unwrap()) { + .instruction => |instruction| instructions.items[ + @intFromEnum(instruction) + ].toValue(), + .constant => |constant| constant.toValue(), + }; + } + } = .{ .items = try gpa.alloc(Instruction.Index, self.instructions.len) }; + defer gpa.free(instructions.items); - const names = if (self.builder.strip) null else try gpa.alloc(String, final_instructions_len); - errdefer if (names) |new_names| gpa.free(new_names); + const names = try gpa.alloc(String, final_instructions_len); + errdefer gpa.free(names); const metadata = if (self.builder.strip) null else try gpa.alloc(Metadata, final_instructions_len); errdefer if (metadata) |new_metadata| gpa.free(new_metadata); + var wip_extra: struct { + index: Instruction.ExtraIndex = 0, + items: []u32, + + fn addExtra(wip_extra: *@This(), extra: anytype) Instruction.ExtraIndex { + const result = wip_extra.index; + inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { + const value = @field(extra, field.name); + wip_extra.items[wip_extra.index] = switch (field.type) { + u32 => value, + Alignment, AtomicOrdering, Block.Index, Type, Value => @intFromEnum(value), + MemoryAccessInfo, Instruction.Alloca.Info => @bitCast(value), + else => @compileError("bad field type: " ++ @typeName(field.type)), + }; + wip_extra.index += 1; + } + return result; + } + + fn appendSlice(wip_extra: *@This(), slice: anytype) void { + if (@typeInfo(@TypeOf(slice)).Pointer.child == Value) @compileError("use appendValues"); + const data: []const u32 = @ptrCast(slice); + @memcpy(wip_extra.items[wip_extra.index..][0..data.len], data); + wip_extra.index += @intCast(data.len); + } + + fn appendValues(wip_extra: *@This(), vals: []const Value, ctx: anytype) void { + for (wip_extra.items[wip_extra.index..][0..vals.len], vals) |*extra, val| + extra.* = @intFromEnum(ctx.map(val)); + wip_extra.index += @intCast(vals.len); + } + + fn finish(wip_extra: *const @This()) []const u32 { + assert(wip_extra.index == wip_extra.items.len); + return wip_extra.items; + } + } = .{ .items = try gpa.alloc(u32, self.extra.items.len) }; + errdefer gpa.free(wip_extra.items); + gpa.free(function.blocks); function.blocks = &.{}; - if (function.names) |old_names| gpa.free(old_names[0..function.instructions.len]); - function.names = null; + gpa.free(function.names[0..function.instructions.len]); if (function.metadata) |old_metadata| gpa.free(old_metadata[0..function.instructions.len]); function.metadata = null; + gpa.free(function.extra); + function.extra = &.{}; function.instructions.shrinkRetainingCapacity(0); try function.instructions.setCapacity(gpa, final_instructions_len); errdefer function.instructions.shrinkRetainingCapacity(0); { - var final_instruction: Instruction.Index = @enumFromInt(0); + var final_instruction_index: Instruction.Index = @enumFromInt(0); + for (0..params_len) |param_index| { + instructions.items[param_index] = final_instruction_index; + final_instruction_index = @enumFromInt(@intFromEnum(final_instruction_index) + 1); + } for (blocks, self.blocks.items) |*final_block, current_block| { - final_block.instruction = final_instruction; - final_instruction = @enumFromInt(@intFromEnum(final_instruction) + 1); + assert(current_block.incoming == current_block.branches); + final_block.instruction = final_instruction_index; + final_instruction_index = @enumFromInt(@intFromEnum(final_instruction_index) + 1); for (current_block.instructions.items) |instruction| { - instructions[@intFromEnum(instruction)] = final_instruction; - final_instruction = @enumFromInt(@intFromEnum(final_instruction) + 1); + instructions.items[@intFromEnum(instruction)] = final_instruction_index; + final_instruction_index = @enumFromInt(@intFromEnum(final_instruction_index) + 1); } } } - var next_name: String = @enumFromInt(0); + var wip_name: struct { + next_name: String = @enumFromInt(0), + + fn map(wip_name: *@This(), old_name: String) String { + if (old_name != .empty) return old_name; + + const new_name = wip_name.next_name; + wip_name.next_name = @enumFromInt(@intFromEnum(new_name) + 1); + return new_name; + } + } = .{}; + for (0..params_len) |param_index| { + const old_argument_index: Instruction.Index = @enumFromInt(param_index); + const new_argument_index: Instruction.Index = @enumFromInt(function.instructions.len); + const argument = self.instructions.get(@intFromEnum(old_argument_index)); + assert(argument.tag == .arg); + assert(argument.data == param_index); + function.instructions.appendAssumeCapacity(argument); + names[@intFromEnum(new_argument_index)] = wip_name.map( + if (self.builder.strip) .empty else self.names.items[@intFromEnum(old_argument_index)], + ); + } for (self.blocks.items) |current_block| { - const block_instruction: Instruction.Index = @enumFromInt(function.instructions.len); + const new_block_index: Instruction.Index = @enumFromInt(function.instructions.len); function.instructions.appendAssumeCapacity(.{ .tag = .block, .data = current_block.incoming, }); - if (names) |new_names| - new_names[@intFromEnum(block_instruction)] = switch (current_block.name) { - .empty => name: { - const name = next_name; - next_name = @enumFromInt(@intFromEnum(name) + 1); - break :name name; - }, - else => |name| name, - }; - for (current_block.instructions.items) |instruction_index| { - var instruction = self.instructions.get(@intFromEnum(instruction_index)); + names[@intFromEnum(new_block_index)] = wip_name.map(current_block.name); + for (current_block.instructions.items) |old_instruction_index| { + const new_instruction_index: Instruction.Index = + @enumFromInt(function.instructions.len); + var instruction = self.instructions.get(@intFromEnum(old_instruction_index)); switch (instruction.tag) { - .block => unreachable, - .@"ret void" => {}, - else => unreachable, + .add, + .@"add nsw", + .@"add nuw", + .@"add nuw nsw", + .@"and", + .ashr, + .@"ashr exact", + .fadd, + .@"fadd fast", + .@"fcmp false", + .@"fcmp fast false", + .@"fcmp fast oeq", + .@"fcmp fast oge", + .@"fcmp fast ogt", + .@"fcmp fast ole", + .@"fcmp fast olt", + .@"fcmp fast one", + .@"fcmp fast ord", + .@"fcmp fast true", + .@"fcmp fast ueq", + .@"fcmp fast uge", + .@"fcmp fast ugt", + .@"fcmp fast ule", + .@"fcmp fast ult", + .@"fcmp fast une", + .@"fcmp fast uno", + .@"fcmp oeq", + .@"fcmp oge", + .@"fcmp ogt", + .@"fcmp ole", + .@"fcmp olt", + .@"fcmp one", + .@"fcmp ord", + .@"fcmp true", + .@"fcmp ueq", + .@"fcmp uge", + .@"fcmp ugt", + .@"fcmp ule", + .@"fcmp ult", + .@"fcmp une", + .@"fcmp uno", + .fdiv, + .@"fdiv fast", + .fmul, + .@"fmul fast", + .frem, + .@"frem fast", + .fsub, + .@"fsub fast", + .@"icmp eq", + .@"icmp ne", + .@"icmp sge", + .@"icmp sgt", + .@"icmp sle", + .@"icmp slt", + .@"icmp uge", + .@"icmp ugt", + .@"icmp ule", + .@"icmp ult", + .@"llvm.maxnum.", + .@"llvm.minnum.", + .@"llvm.sadd.sat.", + .@"llvm.smax.", + .@"llvm.smin.", + .@"llvm.smul.fix.sat.", + .@"llvm.sshl.sat.", + .@"llvm.ssub.sat.", + .@"llvm.uadd.sat.", + .@"llvm.umax.", + .@"llvm.umin.", + .@"llvm.umul.fix.sat.", + .@"llvm.ushl.sat.", + .@"llvm.usub.sat.", + .lshr, + .@"lshr exact", + .mul, + .@"mul nsw", + .@"mul nuw", + .@"mul nuw nsw", + .@"or", + .sdiv, + .@"sdiv exact", + .shl, + .@"shl nsw", + .@"shl nuw", + .@"shl nuw nsw", + .srem, + .sub, + .@"sub nsw", + .@"sub nuw", + .@"sub nuw nsw", + .udiv, + .@"udiv exact", + .urem, + .xor, + => { + const extra = self.extraData(Instruction.Binary, instruction.data); + instruction.data = wip_extra.addExtra(Instruction.Binary{ + .lhs = instructions.map(extra.lhs), + .rhs = instructions.map(extra.rhs), + }); + }, + .addrspacecast, + .bitcast, + .fpext, + .fptosi, + .fptoui, + .fptrunc, + .inttoptr, + .ptrtoint, + .sext, + .sitofp, + .trunc, + .uitofp, + .zext, + => { + const extra = self.extraData(Instruction.Cast, instruction.data); + instruction.data = wip_extra.addExtra(Instruction.Cast{ + .val = instructions.map(extra.val), + .type = extra.type, + }); + }, + .alloca, + .@"alloca inalloca", + => { + const extra = self.extraData(Instruction.Alloca, instruction.data); + instruction.data = wip_extra.addExtra(Instruction.Alloca{ + .type = extra.type, + .len = instructions.map(extra.len), + .info = extra.info, + }); + }, + .arg, + .block, + => unreachable, + .br, + .fence, + .@"ret void", + .unimplemented, + .@"unreachable", + => {}, + .extractelement => { + const extra = self.extraData(Instruction.ExtractElement, instruction.data); + instruction.data = wip_extra.addExtra(Instruction.ExtractElement{ + .val = instructions.map(extra.val), + .index = instructions.map(extra.index), + }); + }, + .br_cond => { + const extra = self.extraData(Instruction.BrCond, instruction.data); + instruction.data = wip_extra.addExtra(Instruction.BrCond{ + .cond = instructions.map(extra.cond), + .then = extra.then, + .@"else" = extra.@"else", + }); + }, + .extractvalue => { + const extra = self.extraDataTrail(Instruction.ExtractValue, instruction.data); + const indices: []const u32 = + self.extra.items[extra.end..][0..extra.data.indices_len]; + instruction.data = wip_extra.addExtra(Instruction.ExtractValue{ + .val = instructions.map(extra.data.val), + .indices_len = extra.data.indices_len, + }); + wip_extra.appendSlice(indices); + }, + .fneg, + .@"fneg fast", + .ret, + => instruction.data = @intFromEnum(instructions.map(@enumFromInt(instruction.data))), + .getelementptr, + .@"getelementptr inbounds", + => { + const extra = self.extraDataTrail(Instruction.GetElementPtr, instruction.data); + const indices: []const Value = + @ptrCast(self.extra.items[extra.end..][0..extra.data.indices_len]); + instruction.data = wip_extra.addExtra(Instruction.GetElementPtr{ + .type = extra.data.type, + .base = instructions.map(extra.data.base), + .indices_len = extra.data.indices_len, + }); + wip_extra.appendValues(indices, instructions); + }, + .insertelement => { + const extra = self.extraData(Instruction.InsertElement, instruction.data); + instruction.data = wip_extra.addExtra(Instruction.InsertElement{ + .val = instructions.map(extra.val), + .elem = instructions.map(extra.elem), + .index = instructions.map(extra.index), + }); + }, + .insertvalue => { + const extra = self.extraDataTrail(Instruction.InsertValue, instruction.data); + const indices: []const u32 = + self.extra.items[extra.end..][0..extra.data.indices_len]; + instruction.data = wip_extra.addExtra(Instruction.InsertValue{ + .val = instructions.map(extra.data.val), + .elem = instructions.map(extra.data.elem), + .indices_len = extra.data.indices_len, + }); + wip_extra.appendSlice(indices); + }, + .load, + .@"load atomic", + .@"load atomic volatile", + .@"load volatile", + => { + const extra = self.extraData(Instruction.Load, instruction.data); + instruction.data = wip_extra.addExtra(Instruction.Load{ + .type = extra.type, + .ptr = instructions.map(extra.ptr), + .info = extra.info, + }); + }, + .phi, + .@"phi fast", + => { + const extra = self.extraDataTrail(Instruction.WipPhi, instruction.data); + const incoming_len = current_block.incoming; + const incoming_vals: []const Value = + @ptrCast(self.extra.items[extra.end..][0..incoming_len]); + const incoming_blocks: []const Block.Index = + @ptrCast(self.extra.items[extra.end + incoming_len ..][0..incoming_len]); + instruction.data = wip_extra.addExtra(Instruction.Phi{ + .incoming_len = incoming_len, + }); + wip_extra.appendValues(incoming_vals, instructions); + wip_extra.appendSlice(incoming_blocks); + }, + .select, + .@"select fast", + => { + const extra = self.extraData(Instruction.Select, instruction.data); + instruction.data = wip_extra.addExtra(Instruction.Select{ + .cond = instructions.map(extra.cond), + .lhs = instructions.map(extra.lhs), + .rhs = instructions.map(extra.rhs), + }); + }, + .shufflevector => { + const extra = self.extraData(Instruction.ShuffleVector, instruction.data); + instruction.data = wip_extra.addExtra(Instruction.ShuffleVector{ + .lhs = instructions.map(extra.lhs), + .rhs = instructions.map(extra.rhs), + .mask = instructions.map(extra.mask), + }); + }, + .store, + .@"store atomic", + .@"store atomic volatile", + .@"store volatile", + => { + const extra = self.extraData(Instruction.Store, instruction.data); + instruction.data = wip_extra.addExtra(Instruction.Store{ + .val = instructions.map(extra.val), + .ptr = instructions.map(extra.ptr), + .info = extra.info, + }); + }, + .@"switch" => { + const extra = self.extraDataTrail(Instruction.Switch, instruction.data); + const case_vals: []const Constant = + @ptrCast(self.extra.items[extra.end..][0..extra.data.cases_len]); + const case_blocks: []const Block.Index = @ptrCast(self.extra + .items[extra.end + extra.data.cases_len ..][0..extra.data.cases_len]); + instruction.data = wip_extra.addExtra(Instruction.Switch{ + .val = instructions.map(extra.data.val), + .default = extra.data.default, + .cases_len = extra.data.cases_len, + }); + wip_extra.appendSlice(case_vals); + wip_extra.appendSlice(case_blocks); + }, + .va_arg => { + const extra = self.extraData(Instruction.VaArg, instruction.data); + instruction.data = wip_extra.addExtra(Instruction.VaArg{ + .list = instructions.map(extra.list), + .type = extra.type, + }); + }, } function.instructions.appendAssumeCapacity(instruction); + names[@intFromEnum(new_instruction_index)] = wip_name.map(if (self.builder.strip) + if (old_instruction_index.hasResultWip(self)) .empty else .none + else + self.names.items[@intFromEnum(old_instruction_index)]); } } - function.extra = try self.extra.toOwnedSlice(gpa); + assert(function.instructions.len == final_instructions_len); + function.extra = wip_extra.finish(); function.blocks = blocks; - function.names = if (names) |new_names| new_names.ptr else null; + function.names = names.ptr; function.metadata = if (metadata) |new_metadata| new_metadata.ptr else null; } @@ -1212,36 +3656,330 @@ pub const WipFunction = struct { self.* = undefined; } + fn cmpTag( + self: *WipFunction, + tag: Instruction.Tag, + cond: u32, + lhs: Value, + rhs: Value, + name: []const u8, + ) Allocator.Error!Value { + switch (tag) { + .@"fcmp false", + .@"fcmp fast false", + .@"fcmp fast oeq", + .@"fcmp fast oge", + .@"fcmp fast ogt", + .@"fcmp fast ole", + .@"fcmp fast olt", + .@"fcmp fast one", + .@"fcmp fast ord", + .@"fcmp fast true", + .@"fcmp fast ueq", + .@"fcmp fast uge", + .@"fcmp fast ugt", + .@"fcmp fast ule", + .@"fcmp fast ult", + .@"fcmp fast une", + .@"fcmp fast uno", + .@"fcmp oeq", + .@"fcmp oge", + .@"fcmp ogt", + .@"fcmp ole", + .@"fcmp olt", + .@"fcmp one", + .@"fcmp ord", + .@"fcmp true", + .@"fcmp ueq", + .@"fcmp uge", + .@"fcmp ugt", + .@"fcmp ule", + .@"fcmp ult", + .@"fcmp une", + .@"fcmp uno", + .@"icmp eq", + .@"icmp ne", + .@"icmp sge", + .@"icmp sgt", + .@"icmp sle", + .@"icmp slt", + .@"icmp uge", + .@"icmp ugt", + .@"icmp ule", + .@"icmp ult", + => assert(lhs.typeOfWip(self) == rhs.typeOfWip(self)), + else => unreachable, + } + _ = try lhs.typeOfWip(self).changeScalar(.i1, self.builder); + try self.ensureUnusedExtraCapacity(1, Instruction.Binary, 0); + const instruction = try self.addInst(name, .{ + .tag = tag, + .data = self.addExtraAssumeCapacity(Instruction.Binary{ + .lhs = lhs, + .rhs = rhs, + }), + }); + if (self.builder.useLibLlvm()) { + switch (tag) { + .@"fcmp false", + .@"fcmp oeq", + .@"fcmp oge", + .@"fcmp ogt", + .@"fcmp ole", + .@"fcmp olt", + .@"fcmp one", + .@"fcmp ord", + .@"fcmp true", + .@"fcmp ueq", + .@"fcmp uge", + .@"fcmp ugt", + .@"fcmp ule", + .@"fcmp ult", + .@"fcmp une", + .@"fcmp uno", + => self.llvm.builder.setFastMath(false), + .@"fcmp fast false", + .@"fcmp fast oeq", + .@"fcmp fast oge", + .@"fcmp fast ogt", + .@"fcmp fast ole", + .@"fcmp fast olt", + .@"fcmp fast one", + .@"fcmp fast ord", + .@"fcmp fast true", + .@"fcmp fast ueq", + .@"fcmp fast uge", + .@"fcmp fast ugt", + .@"fcmp fast ule", + .@"fcmp fast ult", + .@"fcmp fast une", + .@"fcmp fast uno", + => self.llvm.builder.setFastMath(true), + .@"icmp eq", + .@"icmp ne", + .@"icmp sge", + .@"icmp sgt", + .@"icmp sle", + .@"icmp slt", + .@"icmp uge", + .@"icmp ugt", + .@"icmp ule", + .@"icmp ult", + => {}, + else => unreachable, + } + self.llvm.instructions.appendAssumeCapacity(switch (tag) { + .@"fcmp false", + .@"fcmp fast false", + .@"fcmp fast oeq", + .@"fcmp fast oge", + .@"fcmp fast ogt", + .@"fcmp fast ole", + .@"fcmp fast olt", + .@"fcmp fast one", + .@"fcmp fast ord", + .@"fcmp fast true", + .@"fcmp fast ueq", + .@"fcmp fast uge", + .@"fcmp fast ugt", + .@"fcmp fast ule", + .@"fcmp fast ult", + .@"fcmp fast une", + .@"fcmp fast uno", + .@"fcmp oeq", + .@"fcmp oge", + .@"fcmp ogt", + .@"fcmp ole", + .@"fcmp olt", + .@"fcmp one", + .@"fcmp ord", + .@"fcmp true", + .@"fcmp ueq", + .@"fcmp uge", + .@"fcmp ugt", + .@"fcmp ule", + .@"fcmp ult", + .@"fcmp une", + .@"fcmp uno", + => self.llvm.builder.buildFCmp( + @enumFromInt(cond), + lhs.toLlvm(self), + rhs.toLlvm(self), + instruction.llvmName(self), + ), + .@"icmp eq", + .@"icmp ne", + .@"icmp sge", + .@"icmp sgt", + .@"icmp sle", + .@"icmp slt", + .@"icmp uge", + .@"icmp ugt", + .@"icmp ule", + .@"icmp ult", + => self.llvm.builder.buildICmp( + @enumFromInt(cond), + lhs.toLlvm(self), + rhs.toLlvm(self), + instruction.llvmName(self), + ), + else => unreachable, + }); + } + return instruction.toValue(); + } + + fn phiTag( + self: *WipFunction, + tag: Instruction.Tag, + ty: Type, + name: []const u8, + ) Allocator.Error!WipPhi { + switch (tag) { + .phi, .@"phi fast" => assert(try ty.isSized(self.builder)), + else => unreachable, + } + const incoming = self.cursor.block.ptrConst(self).incoming; + assert(incoming > 0); + try self.ensureUnusedExtraCapacity(1, Instruction.WipPhi, incoming * 2); + const instruction = try self.addInst(name, .{ + .tag = tag, + .data = self.addExtraAssumeCapacity(Instruction.WipPhi{ .type = ty }), + }); + _ = self.extra.addManyAsSliceAssumeCapacity(incoming * 2); + if (self.builder.useLibLlvm()) { + switch (tag) { + .phi => self.llvm.builder.setFastMath(false), + .@"phi fast" => self.llvm.builder.setFastMath(true), + else => unreachable, + } + self.llvm.instructions.appendAssumeCapacity( + self.llvm.builder.buildPhi(ty.toLlvm(self.builder), instruction.llvmName(self)), + ); + } + return .{ .block = self.cursor.block, .instruction = instruction }; + } + + fn selectTag( + self: *WipFunction, + tag: Instruction.Tag, + cond: Value, + lhs: Value, + rhs: Value, + name: []const u8, + ) Allocator.Error!Value { + switch (tag) { + .select, .@"select fast" => { + assert(cond.typeOfWip(self).scalarType(self.builder) == .i1); + assert(lhs.typeOfWip(self) == rhs.typeOfWip(self)); + }, + else => unreachable, + } + try self.ensureUnusedExtraCapacity(1, Instruction.Select, 0); + const instruction = try self.addInst(name, .{ + .tag = tag, + .data = self.addExtraAssumeCapacity(Instruction.Select{ + .cond = cond, + .lhs = lhs, + .rhs = rhs, + }), + }); + if (self.builder.useLibLlvm()) { + switch (tag) { + .select => self.llvm.builder.setFastMath(false), + .@"select fast" => self.llvm.builder.setFastMath(true), + else => unreachable, + } + self.llvm.instructions.appendAssumeCapacity(self.llvm.builder.buildSelect( + cond.toLlvm(self), + lhs.toLlvm(self), + rhs.toLlvm(self), + instruction.llvmName(self), + )); + } + return instruction.toValue(); + } + + fn ensureUnusedExtraCapacity( + self: *WipFunction, + count: usize, + comptime Extra: type, + trail_len: usize, + ) Allocator.Error!void { + try self.extra.ensureUnusedCapacity( + self.builder.gpa, + count * (@typeInfo(Extra).Struct.fields.len + trail_len), + ); + } + fn addInst( self: *WipFunction, + name: ?[]const u8, instruction: Instruction, - name: String, ) Allocator.Error!Instruction.Index { - const block_instructions = &self.blocks.items[@intFromEnum(self.cursor.block)].instructions; + const block_instructions = &self.cursor.block.ptr(self).instructions; try self.instructions.ensureUnusedCapacity(self.builder.gpa, 1); - try self.names.ensureUnusedCapacity(self.builder.gpa, 1); + if (!self.builder.strip) try self.names.ensureUnusedCapacity(self.builder.gpa, 1); try block_instructions.ensureUnusedCapacity(self.builder.gpa, 1); - if (self.builder.useLibLlvm()) { + if (self.builder.useLibLlvm()) try self.llvm.instructions.ensureUnusedCapacity(self.builder.gpa, 1); - - self.llvm.builder.positionBuilder( - self.cursor.block.toLlvm(self), - if (self.cursor.instruction < block_instructions.items.len) - self.llvm.instructions.items[ - @intFromEnum(block_instructions.items[self.cursor.instruction]) - ] - else - null, - ); - } + const final_name = if (name) |n| + if (self.builder.strip) .empty else try self.builder.string(n) + else + .none; + + if (self.builder.useLibLlvm()) self.llvm.builder.positionBuilder( + self.cursor.block.toLlvm(self), + for (block_instructions.items[self.cursor.instruction..]) |instruction_index| { + const llvm_instruction = + self.llvm.instructions.items[@intFromEnum(instruction_index)]; + // TODO: remove when constant propagation is implemented + if (!llvm_instruction.isConstant().toBool()) break llvm_instruction; + } else null, + ); const index: Instruction.Index = @enumFromInt(self.instructions.len); self.instructions.appendAssumeCapacity(instruction); - self.names.appendAssumeCapacity(name); + if (!self.builder.strip) self.names.appendAssumeCapacity(final_name); block_instructions.insertAssumeCapacity(self.cursor.instruction, index); self.cursor.instruction += 1; return index; } + + fn addExtraAssumeCapacity(self: *WipFunction, extra: anytype) Instruction.ExtraIndex { + const result: Instruction.ExtraIndex = @intCast(self.extra.items.len); + inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { + const value = @field(extra, field.name); + self.extra.appendAssumeCapacity(switch (field.type) { + u32 => value, + Alignment, AtomicOrdering, Block.Index, Type, Value => @intFromEnum(value), + MemoryAccessInfo, Instruction.Alloca.Info => @bitCast(value), + else => @compileError("bad field type: " ++ @typeName(field.type)), + }); + } + return result; + } + + fn extraDataTrail( + self: *const WipFunction, + comptime T: type, + index: Instruction.ExtraIndex, + ) struct { data: T, end: Instruction.ExtraIndex } { + var result: T = undefined; + const fields = @typeInfo(T).Struct.fields; + inline for (fields, self.extra.items[index..][0..fields.len]) |field, value| + @field(result, field.name) = switch (field.type) { + u32 => value, + Alignment, AtomicOrdering, Block.Index, Type, Value => @enumFromInt(value), + MemoryAccessInfo, Instruction.Alloca.Info => @bitCast(value), + else => @compileError("bad field type: " ++ @typeName(field.type)), + }; + return .{ .data = result, .end = index + @as(Type.Item.ExtraIndex, @intCast(fields.len)) }; + } + + fn extraData(self: *const WipFunction, comptime T: type, index: Instruction.ExtraIndex) T { + return self.extraDataTrail(T, index).data; + } }; pub const FloatCondition = enum(u4) { @@ -1274,6 +4012,73 @@ pub const IntegerCondition = enum(u6) { sle = 41, }; +pub const MemoryAccessKind = enum(u1) { + normal, + @"volatile", +}; + +pub const SyncScope = enum(u1) { + singlethread, + system, + + pub fn format( + self: SyncScope, + comptime prefix: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + if (self != .system) try writer.print( + \\{s} syncscope("{s}") + , .{ prefix, @tagName(self) }); + } +}; + +pub const AtomicOrdering = enum(u3) { + none = 0, + unordered = 1, + monotonic = 2, + acquire = 4, + release = 5, + acq_rel = 6, + seq_cst = 7, + + pub fn format( + self: AtomicOrdering, + comptime prefix: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + if (self != .none) try writer.print("{s} {s}", .{ prefix, @tagName(self) }); + } +}; + +const MemoryAccessInfo = packed struct(u32) { + scope: SyncScope, + ordering: AtomicOrdering, + alignment: Alignment, + _: u22 = undefined, +}; + +pub const FastMath = packed struct(u32) { + nnan: bool = false, + ninf: bool = false, + nsz: bool = false, + arcp: bool = false, + contract: bool = false, + afn: bool = false, + reassoc: bool = false, + + pub const fast = FastMath{ + .nnan = true, + .ninf = true, + .nsz = true, + .arcp = true, + .contract = true, + .afn = true, + .realloc = true, + }; +}; + pub const Constant = enum(u32) { false, true, @@ -1379,6 +4184,7 @@ pub const Constant = enum(u32) { pub const Aggregate = struct { type: Type, + //fields: [type.aggregateLen(builder)]Constant, }; pub const Splat = extern struct { @@ -1391,12 +4197,8 @@ pub const Constant = enum(u32) { block: Function.Block.Index, }; - pub const FunctionReference = struct { - function: Function.Index, - }; - pub const Cast = extern struct { - arg: Constant, + val: Constant, type: Type, pub const Signedness = enum { unsigned, signed, unneeded }; @@ -1405,9 +4207,12 @@ pub const Constant = enum(u32) { pub const GetElementPtr = struct { type: Type, base: Constant, - indices_len: u32, + info: Info, + //indices: [info.indices_len]Constant, pub const Kind = enum { normal, inbounds }; + pub const InRangeIndex = enum(u16) { none = std.math.maxInt(u16), _ }; + pub const Info = packed struct(u32) { indices_len: u16, inrange: InRangeIndex }; }; pub const Compare = extern struct { @@ -1417,12 +4222,12 @@ pub const Constant = enum(u32) { }; pub const ExtractElement = extern struct { - arg: Constant, + val: Constant, index: Constant, }; pub const InsertElement = extern struct { - arg: Constant, + val: Constant, elem: Constant, index: Constant, }; @@ -1448,6 +4253,10 @@ pub const Constant = enum(u32) { .{ .global = @enumFromInt(@intFromEnum(self) - @intFromEnum(first_global)) }; } + pub fn toValue(self: Constant) Value { + return @enumFromInt(@intFromEnum(Value.first_constant) + @intFromEnum(self)); + } + pub fn typeOf(self: Constant, builder: *Builder) Type { switch (self.unwrap()) { .constant => |constant| { @@ -1491,10 +4300,8 @@ pub const Constant = enum(u32) { ), .dso_local_equivalent, .no_cfi, - => builder.ptrTypeAssumeCapacity( - builder.constantExtraData(FunctionReference, item.data) - .function.ptrConst(builder).global.ptrConst(builder).addr_space, - ), + => builder.ptrTypeAssumeCapacity(@as(Function.Index, @enumFromInt(item.data)) + .ptrConst(builder).global.ptrConst(builder).addr_space), .trunc, .zext, .sext, @@ -1514,42 +4321,29 @@ pub const Constant = enum(u32) { => { const extra = builder.constantExtraDataTrail(GetElementPtr, item.data); const indices: []const Constant = @ptrCast(builder.constant_extra - .items[extra.end..][0..extra.data.indices_len]); + .items[extra.end..][0..extra.data.info.indices_len]); const base_ty = extra.data.base.typeOf(builder); if (!base_ty.isVector(builder)) for (indices) |index| { const index_ty = index.typeOf(builder); if (!index_ty.isVector(builder)) continue; - switch (index_ty.vectorKind(builder)) { - inline else => |kind| return builder.vectorTypeAssumeCapacity( - kind, - index_ty.vectorLen(builder), - base_ty, - ), - } + return index_ty.changeScalarAssumeCapacity(base_ty, builder); }; return base_ty; }, - .icmp, .fcmp => { - const ty = builder.constantExtraData(Compare, item.data).lhs.typeOf(builder); - return if (ty.isVector(builder)) switch (ty.vectorKind(builder)) { - inline else => |kind| builder - .vectorTypeAssumeCapacity(kind, ty.vectorLen(builder), .i1), - } else ty; - }, + .icmp, + .fcmp, + => builder.constantExtraData(Compare, item.data).lhs.typeOf(builder) + .changeScalarAssumeCapacity(.i1, builder), .extractelement => builder.constantExtraData(ExtractElement, item.data) - .arg.typeOf(builder).childType(builder), + .val.typeOf(builder).childType(builder), .insertelement => builder.constantExtraData(InsertElement, item.data) - .arg.typeOf(builder), + .val.typeOf(builder), .shufflevector => { const extra = builder.constantExtraData(ShuffleVector, item.data); - const ty = extra.lhs.typeOf(builder); - return switch (ty.vectorKind(builder)) { - inline else => |kind| builder.vectorTypeAssumeCapacity( - kind, - extra.mask.typeOf(builder).vectorLen(builder), - ty.childType(builder), - ), - }; + return extra.lhs.typeOf(builder).changeLengthAssumeCapacity( + extra.mask.typeOf(builder).vectorLen(builder), + builder, + ); }, .add, .@"add nsw", @@ -1617,7 +4411,42 @@ pub const Constant = enum(u32) { } } - pub const FormatData = struct { + pub fn getBase(self: Constant, builder: *const Builder) Global.Index { + var cur = self; + while (true) switch (cur.unwrap()) { + .constant => |constant| { + const item = builder.constant_items.get(constant); + switch (item.tag) { + .ptrtoint, + .inttoptr, + .bitcast, + => cur = builder.constantExtraData(Cast, item.data).val, + .getelementptr => cur = builder.constantExtraData(GetElementPtr, item.data).base, + .add => { + const extra = builder.constantExtraData(Binary, item.data); + const lhs_base = extra.lhs.getBase(builder); + const rhs_base = extra.rhs.getBase(builder); + return if (lhs_base != .none and rhs_base != .none) + .none + else if (lhs_base != .none) lhs_base else rhs_base; + }, + .sub => { + const extra = builder.constantExtraData(Binary, item.data); + if (extra.rhs.getBase(builder) != .none) return .none; + cur = extra.lhs; + }, + else => return .none, + } + }, + .global => |global| switch (global.ptrConst(builder).kind) { + .alias => |alias| cur = alias.ptrConst(builder).init, + .variable, .function => return global, + .replaced => unreachable, + }, + }; + } + + const FormatData = struct { constant: Constant, builder: *Builder, }; @@ -1627,12 +4456,18 @@ pub const Constant = enum(u32) { _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - if (comptime std.mem.eql(u8, fmt_str, "%")) { - try writer.print("{%} ", .{data.constant.typeOf(data.builder).fmt(data.builder)}); - } else if (comptime std.mem.eql(u8, fmt_str, " ")) { + if (comptime std.mem.indexOfNone(u8, fmt_str, ", %")) |_| + @compileError("invalid format string: '" ++ fmt_str ++ "'"); + if (comptime std.mem.indexOfScalar(u8, fmt_str, ',') != null) { + if (data.constant == .no_init) return; + try writer.writeByte(','); + } + if (comptime std.mem.indexOfScalar(u8, fmt_str, ' ') != null) { if (data.constant == .no_init) return; try writer.writeByte(' '); } + if (comptime std.mem.indexOfScalar(u8, fmt_str, '%') != null) + try writer.print("{%} ", .{data.constant.typeOf(data.builder).fmt(data.builder)}); assert(data.constant != .no_init); if (std.enums.tagName(Constant, data.constant)) |name| return writer.writeAll(name); switch (data.constant.unwrap()) { @@ -1770,10 +4605,10 @@ pub const Constant = enum(u32) { .dso_local_equivalent, .no_cfi, => |tag| { - const extra = data.builder.constantExtraData(FunctionReference, item.data); + const function: Function.Index = @enumFromInt(item.data); try writer.print("{s} {}", .{ @tagName(tag), - extra.function.ptrConst(data.builder).global.fmt(data.builder), + function.ptrConst(data.builder).global.fmt(data.builder), }); }, .trunc, @@ -1793,7 +4628,7 @@ pub const Constant = enum(u32) { const extra = data.builder.constantExtraData(Cast, item.data); try writer.print("{s} ({%} to {%})", .{ @tagName(tag), - extra.arg.fmt(data.builder), + extra.val.fmt(data.builder), extra.type.fmt(data.builder), }); }, @@ -1802,7 +4637,7 @@ pub const Constant = enum(u32) { => |tag| { const extra = data.builder.constantExtraDataTrail(GetElementPtr, item.data); const indices: []const Constant = @ptrCast(data.builder.constant_extra - .items[extra.end..][0..extra.data.indices_len]); + .items[extra.end..][0..extra.data.info.indices_len]); try writer.print("{s} ({%}, {%}", .{ @tagName(tag), extra.data.type.fmt(data.builder), @@ -1830,7 +4665,7 @@ pub const Constant = enum(u32) { const extra = data.builder.constantExtraData(ExtractElement, item.data); try writer.print("{s} ({%}, {%})", .{ @tagName(tag), - extra.arg.fmt(data.builder), + extra.val.fmt(data.builder), extra.index.fmt(data.builder), }); }, @@ -1838,7 +4673,7 @@ pub const Constant = enum(u32) { const extra = data.builder.constantExtraData(InsertElement, item.data); try writer.print("{s} ({%}, {%}, {%})", .{ @tagName(tag), - extra.arg.fmt(data.builder), + extra.val.fmt(data.builder), extra.elem.fmt(data.builder), extra.index.fmt(data.builder), }); @@ -1894,6 +4729,7 @@ pub const Constant = enum(u32) { }; pub const Value = enum(u32) { + none = std.math.maxInt(u31), _, const first_constant: Value = @enumFromInt(1 << 31); @@ -1903,10 +4739,65 @@ pub const Value = enum(u32) { constant: Constant, } { return if (@intFromEnum(self) < @intFromEnum(first_constant)) - .{ .instruction = @intFromEnum(self) } + .{ .instruction = @enumFromInt(@intFromEnum(self)) } else .{ .constant = @enumFromInt(@intFromEnum(self) - @intFromEnum(first_constant)) }; } + + pub fn typeOfWip(self: Value, wip: *const WipFunction) Type { + return switch (self.unwrap()) { + .instruction => |instruction| instruction.typeOfWip(wip), + .constant => |constant| constant.typeOf(wip.builder), + }; + } + + pub fn typeOf(self: Value, function: Function.Index, builder: *Builder) Type { + return switch (self.unwrap()) { + .instruction => |instruction| instruction.typeOf(function, builder), + .constant => |constant| constant.typeOf(builder), + }; + } + + pub fn toConst(self: Value) ?Constant { + return switch (self.unwrap()) { + .instruction => null, + .constant => |constant| constant, + }; + } + + const FormatData = struct { + value: Value, + function: Function.Index, + builder: *Builder, + }; + fn format( + data: FormatData, + comptime fmt_str: []const u8, + fmt_opts: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + switch (data.value.unwrap()) { + .instruction => |instruction| try Function.Instruction.Index.format(.{ + .instruction = instruction, + .function = data.function, + .builder = data.builder, + }, fmt_str, fmt_opts, writer), + .constant => |constant| try Constant.format(.{ + .constant = constant, + .builder = data.builder, + }, fmt_str, fmt_opts, writer), + } + } + pub fn fmt(self: Value, function: Function.Index, builder: *Builder) std.fmt.Formatter(format) { + return .{ .data = .{ .value = self, .function = function, .builder = builder } }; + } + + pub fn toLlvm(self: Value, wip: *const WipFunction) *llvm.Value { + return switch (self.unwrap()) { + .instruction => |instruction| instruction.toLlvm(wip), + .constant => |constant| constant.toLlvm(wip.builder), + }; + } }; pub const Metadata = enum(u32) { _ }; @@ -2297,12 +5188,12 @@ pub fn fnType( } pub fn intType(self: *Builder, bits: u24) Allocator.Error!Type { - try self.ensureUnusedTypeCapacity(1, null, 0); + try self.ensureUnusedTypeCapacity(1, NoExtra, 0); return self.intTypeAssumeCapacity(bits); } pub fn ptrType(self: *Builder, addr_space: AddrSpace) Allocator.Error!Type { - try self.ensureUnusedTypeCapacity(1, null, 0); + try self.ensureUnusedTypeCapacity(1, NoExtra, 0); return self.ptrTypeAssumeCapacity(addr_space); } @@ -2376,7 +5267,7 @@ pub fn namedTypeSetBody( pub fn addGlobal(self: *Builder, name: String, global: Global) Allocator.Error!Global.Index { assert(!name.isAnon()); - try self.ensureUnusedTypeCapacity(1, null, 0); + try self.ensureUnusedTypeCapacity(1, NoExtra, 0); try self.ensureUnusedGlobalCapacity(name); return self.addGlobalAssumeCapacity(name, global); } @@ -2422,6 +5313,10 @@ pub fn intConst(self: *Builder, ty: Type, value: anytype) Allocator.Error!Consta return self.bigIntConst(ty, std.math.big.int.Mutable.init(&limbs, value).toConst()); } +pub fn intValue(self: *Builder, ty: Type, value: anytype) Allocator.Error!Value { + return (try self.intConst(ty, value)).toValue(); +} + pub fn bigIntConst(self: *Builder, ty: Type, value: std.math.big.int.Const) Allocator.Error!Constant { try self.constant_map.ensureUnusedCapacity(self.gpa, 1); try self.constant_items.ensureUnusedCapacity(self.gpa, 1); @@ -2430,6 +5325,10 @@ pub fn bigIntConst(self: *Builder, ty: Type, value: std.math.big.int.Const) Allo return self.bigIntConstAssumeCapacity(ty, value); } +pub fn bigIntValue(self: *Builder, ty: Type, value: std.math.big.int.Const) Allocator.Error!Value { + return (try self.bigIntConst(ty, value)).toValue(); +} + pub fn fpConst(self: *Builder, ty: Type, comptime val: comptime_float) Allocator.Error!Constant { return switch (ty) { .half => try self.halfConst(val), @@ -2438,88 +5337,169 @@ pub fn fpConst(self: *Builder, ty: Type, comptime val: comptime_float) Allocator .double => try self.doubleConst(val), .fp128 => try self.fp128Const(val), .x86_fp80 => try self.x86_fp80Const(val), - .ppc_fp128 => try self.ppc_fp128Const(.{ val, 0 }), + .ppc_fp128 => try self.ppc_fp128Const(.{ val, -0.0 }), + else => unreachable, + }; +} + +pub fn fpValue(self: *Builder, ty: Type, comptime value: comptime_float) Allocator.Error!Value { + return (try self.fpConst(ty, value)).toValue(); +} + +pub fn nanConst(self: *Builder, ty: Type) Allocator.Error!Constant { + return switch (ty) { + .half => try self.halfConst(std.math.nan(f16)), + .bfloat => try self.bfloatConst(std.math.nan(f32)), + .float => try self.floatConst(std.math.nan(f32)), + .double => try self.doubleConst(std.math.nan(f64)), + .fp128 => try self.fp128Const(std.math.nan(f128)), + .x86_fp80 => try self.x86_fp80Const(std.math.nan(f80)), + .ppc_fp128 => try self.ppc_fp128Const(.{std.math.nan(f64)} ** 2), else => unreachable, }; } +pub fn nanValue(self: *Builder, ty: Type) Allocator.Error!Value { + return (try self.nanConst(ty)).toValue(); +} + pub fn halfConst(self: *Builder, val: f16) Allocator.Error!Constant { - try self.ensureUnusedConstantCapacity(1, null, 0); + try self.ensureUnusedConstantCapacity(1, NoExtra, 0); return self.halfConstAssumeCapacity(val); } +pub fn halfValue(self: *Builder, ty: Type, value: f16) Allocator.Error!Value { + return (try self.halfConst(ty, value)).toValue(); +} + pub fn bfloatConst(self: *Builder, val: f32) Allocator.Error!Constant { - try self.ensureUnusedConstantCapacity(1, null, 0); + try self.ensureUnusedConstantCapacity(1, NoExtra, 0); return self.bfloatConstAssumeCapacity(val); } +pub fn bfloatValue(self: *Builder, ty: Type, value: f32) Allocator.Error!Value { + return (try self.bfloatConst(ty, value)).toValue(); +} + pub fn floatConst(self: *Builder, val: f32) Allocator.Error!Constant { - try self.ensureUnusedConstantCapacity(1, null, 0); + try self.ensureUnusedConstantCapacity(1, NoExtra, 0); return self.floatConstAssumeCapacity(val); } +pub fn floatValue(self: *Builder, ty: Type, value: f32) Allocator.Error!Value { + return (try self.floatConst(ty, value)).toValue(); +} + pub fn doubleConst(self: *Builder, val: f64) Allocator.Error!Constant { try self.ensureUnusedConstantCapacity(1, Constant.Double, 0); return self.doubleConstAssumeCapacity(val); } +pub fn doubleValue(self: *Builder, ty: Type, value: f64) Allocator.Error!Value { + return (try self.doubleConst(ty, value)).toValue(); +} + pub fn fp128Const(self: *Builder, val: f128) Allocator.Error!Constant { try self.ensureUnusedConstantCapacity(1, Constant.Fp128, 0); return self.fp128ConstAssumeCapacity(val); } +pub fn fp128Value(self: *Builder, ty: Type, value: f128) Allocator.Error!Value { + return (try self.fp128Const(ty, value)).toValue(); +} + pub fn x86_fp80Const(self: *Builder, val: f80) Allocator.Error!Constant { try self.ensureUnusedConstantCapacity(1, Constant.Fp80, 0); return self.x86_fp80ConstAssumeCapacity(val); } +pub fn x86_fp80Value(self: *Builder, ty: Type, value: f80) Allocator.Error!Value { + return (try self.x86_fp80Const(ty, value)).toValue(); +} + pub fn ppc_fp128Const(self: *Builder, val: [2]f64) Allocator.Error!Constant { try self.ensureUnusedConstantCapacity(1, Constant.Fp128, 0); return self.ppc_fp128ConstAssumeCapacity(val); } +pub fn ppc_fp128Value(self: *Builder, ty: Type, value: [2]f64) Allocator.Error!Value { + return (try self.ppc_fp128Const(ty, value)).toValue(); +} + pub fn nullConst(self: *Builder, ty: Type) Allocator.Error!Constant { - try self.ensureUnusedConstantCapacity(1, null, 0); + try self.ensureUnusedConstantCapacity(1, NoExtra, 0); return self.nullConstAssumeCapacity(ty); } +pub fn nullValue(self: *Builder, ty: Type) Allocator.Error!Value { + return (try self.nullConst(ty)).toValue(); +} + pub fn noneConst(self: *Builder, ty: Type) Allocator.Error!Constant { - try self.ensureUnusedConstantCapacity(1, null, 0); + try self.ensureUnusedConstantCapacity(1, NoExtra, 0); return self.noneConstAssumeCapacity(ty); } +pub fn noneValue(self: *Builder, ty: Type) Allocator.Error!Value { + return (try self.noneConst(ty)).toValue(); +} + pub fn structConst(self: *Builder, ty: Type, vals: []const Constant) Allocator.Error!Constant { try self.ensureUnusedConstantCapacity(1, Constant.Aggregate, vals.len); return self.structConstAssumeCapacity(ty, vals); } +pub fn structValue(self: *Builder, ty: Type, vals: []const Constant) Allocator.Error!Value { + return (try self.structConst(ty, vals)).toValue(); +} + pub fn arrayConst(self: *Builder, ty: Type, vals: []const Constant) Allocator.Error!Constant { try self.ensureUnusedConstantCapacity(1, Constant.Aggregate, vals.len); return self.arrayConstAssumeCapacity(ty, vals); } +pub fn arrayValue(self: *Builder, ty: Type, vals: []const Constant) Allocator.Error!Value { + return (try self.arrayConst(ty, vals)).toValue(); +} + pub fn stringConst(self: *Builder, val: String) Allocator.Error!Constant { try self.ensureUnusedTypeCapacity(1, Type.Array, 0); - try self.ensureUnusedConstantCapacity(1, null, 0); + try self.ensureUnusedConstantCapacity(1, NoExtra, 0); return self.stringConstAssumeCapacity(val); } +pub fn stringValue(self: *Builder, val: String) Allocator.Error!Value { + return (try self.stringConst(val)).toValue(); +} + pub fn stringNullConst(self: *Builder, val: String) Allocator.Error!Constant { try self.ensureUnusedTypeCapacity(1, Type.Array, 0); - try self.ensureUnusedConstantCapacity(1, null, 0); + try self.ensureUnusedConstantCapacity(1, NoExtra, 0); return self.stringNullConstAssumeCapacity(val); } +pub fn stringNullValue(self: *Builder, val: String) Allocator.Error!Value { + return (try self.stringNullConst(val)).toValue(); +} + pub fn vectorConst(self: *Builder, ty: Type, vals: []const Constant) Allocator.Error!Constant { try self.ensureUnusedConstantCapacity(1, Constant.Aggregate, vals.len); return self.vectorConstAssumeCapacity(ty, vals); } +pub fn vectorValue(self: *Builder, ty: Type, vals: []const Constant) Allocator.Error!Value { + return (try self.vectorConst(ty, vals)).toValue(); +} + pub fn splatConst(self: *Builder, ty: Type, val: Constant) Allocator.Error!Constant { try self.ensureUnusedConstantCapacity(1, Constant.Splat, 0); return self.splatConstAssumeCapacity(ty, val); } +pub fn splatValue(self: *Builder, ty: Type, val: Constant) Allocator.Error!Value { + return (try self.splatConst(ty, val)).toValue(); +} + pub fn zeroInitConst(self: *Builder, ty: Type) Allocator.Error!Constant { try self.ensureUnusedConstantCapacity(1, Constant.Fp128, 0); try self.constant_limbs.ensureUnusedCapacity( @@ -2529,16 +5509,28 @@ pub fn zeroInitConst(self: *Builder, ty: Type) Allocator.Error!Constant { return self.zeroInitConstAssumeCapacity(ty); } +pub fn zeroInitValue(self: *Builder, ty: Type) Allocator.Error!Value { + return (try self.zeroInitConst(ty)).toValue(); +} + pub fn undefConst(self: *Builder, ty: Type) Allocator.Error!Constant { - try self.ensureUnusedConstantCapacity(1, null, 0); + try self.ensureUnusedConstantCapacity(1, NoExtra, 0); return self.undefConstAssumeCapacity(ty); } +pub fn undefValue(self: *Builder, ty: Type) Allocator.Error!Value { + return (try self.undefConst(ty)).toValue(); +} + pub fn poisonConst(self: *Builder, ty: Type) Allocator.Error!Constant { - try self.ensureUnusedConstantCapacity(1, null, 0); + try self.ensureUnusedConstantCapacity(1, NoExtra, 0); return self.poisonConstAssumeCapacity(ty); } +pub fn poisonValue(self: *Builder, ty: Type) Allocator.Error!Value { + return (try self.poisonConst(ty)).toValue(); +} + pub fn blockAddrConst( self: *Builder, function: Function.Index, @@ -2548,29 +5540,58 @@ pub fn blockAddrConst( return self.blockAddrConstAssumeCapacity(function, block); } +pub fn blockAddrValue( + self: *Builder, + function: Function.Index, + block: Function.Block.Index, +) Allocator.Error!Value { + return (try self.blockAddrConst(function, block)).toValue(); +} + pub fn dsoLocalEquivalentConst(self: *Builder, function: Function.Index) Allocator.Error!Constant { - try self.ensureUnusedConstantCapacity(1, Constant.FunctionReference, 0); + try self.ensureUnusedConstantCapacity(1, NoExtra, 0); return self.dsoLocalEquivalentConstAssumeCapacity(function); } +pub fn dsoLocalEquivalentValue(self: *Builder, function: Function.Index) Allocator.Error!Value { + return (try self.dsoLocalEquivalentConst(function)).toValue(); +} + pub fn noCfiConst(self: *Builder, function: Function.Index) Allocator.Error!Constant { - try self.ensureUnusedConstantCapacity(1, Constant.FunctionReference, 0); + try self.ensureUnusedConstantCapacity(1, NoExtra, 0); return self.noCfiConstAssumeCapacity(function); } +pub fn noCfiValue(self: *Builder, function: Function.Index) Allocator.Error!Value { + return (try self.noCfiConst(function)).toValue(); +} + pub fn convConst( self: *Builder, signedness: Constant.Cast.Signedness, - arg: Constant, + val: Constant, ty: Type, ) Allocator.Error!Constant { try self.ensureUnusedConstantCapacity(1, Constant.Cast, 0); - return self.convConstAssumeCapacity(signedness, arg, ty); + return self.convConstAssumeCapacity(signedness, val, ty); +} + +pub fn convValue( + self: *Builder, + signedness: Constant.Cast.Signedness, + val: Constant, + ty: Type, +) Allocator.Error!Value { + return (try self.convConst(signedness, val, ty)).toValue(); } -pub fn castConst(self: *Builder, tag: Constant.Tag, arg: Constant, ty: Type) Allocator.Error!Constant { +pub fn castConst(self: *Builder, tag: Constant.Tag, val: Constant, ty: Type) Allocator.Error!Constant { try self.ensureUnusedConstantCapacity(1, Constant.Cast, 0); - return self.castConstAssumeCapacity(tag, arg, ty); + return self.castConstAssumeCapacity(tag, val, ty); +} + +pub fn castValue(self: *Builder, tag: Constant.Tag, val: Constant, ty: Type) Allocator.Error!Value { + return (try self.castConst(tag, val, ty)).toValue(); } pub fn gepConst( @@ -2578,11 +5599,23 @@ pub fn gepConst( comptime kind: Constant.GetElementPtr.Kind, ty: Type, base: Constant, + inrange: ?u16, indices: []const Constant, ) Allocator.Error!Constant { try self.ensureUnusedTypeCapacity(1, Type.Vector, 0); try self.ensureUnusedConstantCapacity(1, Constant.GetElementPtr, indices.len); - return self.gepConstAssumeCapacity(kind, ty, base, indices); + return self.gepConstAssumeCapacity(kind, ty, base, inrange, indices); +} + +pub fn gepValue( + self: *Builder, + comptime kind: Constant.GetElementPtr.Kind, + ty: Type, + base: Constant, + inrange: ?u16, + indices: []const Constant, +) Allocator.Error!Value { + return (try self.gepConst(kind, ty, base, inrange, indices)).toValue(); } pub fn icmpConst( @@ -2595,6 +5628,15 @@ pub fn icmpConst( return self.icmpConstAssumeCapacity(cond, lhs, rhs); } +pub fn icmpValue( + self: *Builder, + cond: IntegerCondition, + lhs: Constant, + rhs: Constant, +) Allocator.Error!Value { + return (try self.icmpConst(cond, lhs, rhs)).toValue(); +} + pub fn fcmpConst( self: *Builder, cond: FloatCondition, @@ -2605,19 +5647,41 @@ pub fn fcmpConst( return self.icmpConstAssumeCapacity(cond, lhs, rhs); } -pub fn extractElementConst(self: *Builder, arg: Constant, index: Constant) Allocator.Error!Constant { +pub fn fcmpValue( + self: *Builder, + cond: FloatCondition, + lhs: Constant, + rhs: Constant, +) Allocator.Error!Value { + return (try self.fcmpConst(cond, lhs, rhs)).toValue(); +} + +pub fn extractElementConst(self: *Builder, val: Constant, index: Constant) Allocator.Error!Constant { try self.ensureUnusedConstantCapacity(1, Constant.ExtractElement, 0); - return self.extractElementConstAssumeCapacity(arg, index); + return self.extractElementConstAssumeCapacity(val, index); +} + +pub fn extractElementValue(self: *Builder, val: Constant, index: Constant) Allocator.Error!Value { + return (try self.extractElementConst(val, index)).toValue(); } pub fn insertElementConst( self: *Builder, - arg: Constant, + val: Constant, elem: Constant, index: Constant, ) Allocator.Error!Constant { try self.ensureUnusedConstantCapacity(1, Constant.InsertElement, 0); - return self.insertElementConstAssumeCapacity(arg, elem, index); + return self.insertElementConstAssumeCapacity(val, elem, index); +} + +pub fn insertElementValue( + self: *Builder, + val: Constant, + elem: Constant, + index: Constant, +) Allocator.Error!Value { + return (try self.insertElementConst(val, elem, index)).toValue(); } pub fn shuffleVectorConst( @@ -2626,10 +5690,20 @@ pub fn shuffleVectorConst( rhs: Constant, mask: Constant, ) Allocator.Error!Constant { + try self.ensureUnusedTypeCapacity(1, Type.Array, 0); try self.ensureUnusedConstantCapacity(1, Constant.ShuffleVector, 0); return self.shuffleVectorConstAssumeCapacity(lhs, rhs, mask); } +pub fn shuffleVectorValue( + self: *Builder, + lhs: Constant, + rhs: Constant, + mask: Constant, +) Allocator.Error!Value { + return (try self.shuffleVectorConst(lhs, rhs, mask)).toValue(); +} + pub fn binConst( self: *Builder, tag: Constant.Tag, @@ -2640,6 +5714,10 @@ pub fn binConst( return self.binConstAssumeCapacity(tag, lhs, rhs); } +pub fn binValue(self: *Builder, tag: Constant.Tag, lhs: Constant, rhs: Constant) Allocator.Error!Value { + return (try self.binConst(tag, lhs, rhs)).toValue(); +} + pub fn dump(self: *Builder, writer: anytype) (@TypeOf(writer).Error || Allocator.Error)!void { if (self.source_filename != .none) try writer.print( \\; ModuleID = '{s}' @@ -2679,17 +5757,15 @@ pub fn dump(self: *Builder, writer: anytype) (@TypeOf(writer).Error || Allocator @tagName(variable.mutability), global.type.fmt(self), variable.init.fmt(self), - global.alignment, + variable.alignment, }); } try writer.writeByte('\n'); - for (self.functions.items) |function| { + for (0.., self.functions.items) |function_i, function| { + const function_index: Function.Index = @enumFromInt(function_i); if (function.global.getReplacement(self) != .none) continue; const global = function.global.ptrConst(self); - const item = self.type_items.items[@intFromEnum(global.type)]; - const extra = self.typeExtraDataTrail(Type.Function, item.data); - const params: []const Type = - @ptrCast(self.type_extra.items[extra.end..][0..extra.data.params_len]); + const params_len = global.type.functionParameters(self).len; try writer.print( \\{s}{}{}{}{} {} {}( , .{ @@ -2698,31 +5774,398 @@ pub fn dump(self: *Builder, writer: anytype) (@TypeOf(writer).Error || Allocator global.preemption, global.visibility, global.dll_storage_class, - extra.data.ret.fmt(self), + global.type.functionReturn(self).fmt(self), function.global.fmt(self), }); - for (params, 0..) |param, index| { - if (index > 0) try writer.writeAll(", "); - try writer.print("{%} %{d}", .{ param.fmt(self), index }); + for (0..params_len) |arg| { + if (arg > 0) try writer.writeAll(", "); + try writer.print("{%}", .{function.arg(@intCast(arg)).fmt(function_index, self)}); } - switch (item.tag) { - .function => {}, - .vararg_function => { - if (params.len > 0) try writer.writeAll(", "); + switch (global.type.functionKind(self)) { + .normal => {}, + .vararg => { + if (params_len > 0) try writer.writeAll(", "); try writer.writeAll("..."); }, - else => unreachable, } - try writer.print("){}{}", .{ global.unnamed_addr, global.alignment }); + try writer.print("){}{}", .{ global.unnamed_addr, function.alignment }); if (function.instructions.len > 0) { try writer.writeAll(" {\n"); - for (0..function.instructions.len) |index| { - const instruction_index: Function.Instruction.Index = @enumFromInt(index); - const instruction = function.instructions.get(index); + for (params_len..function.instructions.len) |instruction_i| { + const instruction_index: Function.Instruction.Index = @enumFromInt(instruction_i); + const instruction = function.instructions.get(@intFromEnum(instruction_index)); switch (instruction.tag) { - .block => try writer.print("{}:\n", .{instruction_index.name(&function).fmt(self)}), - .@"ret void" => |tag| try writer.print(" {s}\n", .{@tagName(tag)}), - else => unreachable, + .add, + .@"add nsw", + .@"add nuw", + .@"add nuw nsw", + .@"and", + .ashr, + .@"ashr exact", + .fadd, + .@"fadd fast", + .@"fcmp false", + .@"fcmp fast false", + .@"fcmp fast oeq", + .@"fcmp fast oge", + .@"fcmp fast ogt", + .@"fcmp fast ole", + .@"fcmp fast olt", + .@"fcmp fast one", + .@"fcmp fast ord", + .@"fcmp fast true", + .@"fcmp fast ueq", + .@"fcmp fast uge", + .@"fcmp fast ugt", + .@"fcmp fast ule", + .@"fcmp fast ult", + .@"fcmp fast une", + .@"fcmp fast uno", + .@"fcmp oeq", + .@"fcmp oge", + .@"fcmp ogt", + .@"fcmp ole", + .@"fcmp olt", + .@"fcmp one", + .@"fcmp ord", + .@"fcmp true", + .@"fcmp ueq", + .@"fcmp uge", + .@"fcmp ugt", + .@"fcmp ule", + .@"fcmp ult", + .@"fcmp une", + .@"fcmp uno", + .fdiv, + .@"fdiv fast", + .fmul, + .@"fmul fast", + .frem, + .@"frem fast", + .fsub, + .@"fsub fast", + .@"icmp eq", + .@"icmp ne", + .@"icmp sge", + .@"icmp sgt", + .@"icmp sle", + .@"icmp slt", + .@"icmp uge", + .@"icmp ugt", + .@"icmp ule", + .@"icmp ult", + .lshr, + .@"lshr exact", + .mul, + .@"mul nsw", + .@"mul nuw", + .@"mul nuw nsw", + .@"or", + .sdiv, + .@"sdiv exact", + .srem, + .shl, + .@"shl nsw", + .@"shl nuw", + .@"shl nuw nsw", + .sub, + .@"sub nsw", + .@"sub nuw", + .@"sub nuw nsw", + .udiv, + .@"udiv exact", + .urem, + .xor, + => |tag| { + const extra = function.extraData(Function.Instruction.Binary, instruction.data); + try writer.print(" %{} = {s} {%}, {}\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.lhs.fmt(function_index, self), + extra.rhs.fmt(function_index, self), + }); + }, + .addrspacecast, + .bitcast, + .fpext, + .fptosi, + .fptoui, + .fptrunc, + .inttoptr, + .ptrtoint, + .sext, + .sitofp, + .trunc, + .uitofp, + .zext, + => |tag| { + const extra = function.extraData(Function.Instruction.Cast, instruction.data); + try writer.print(" %{} = {s} {%} to {%}\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.val.fmt(function_index, self), + extra.type.fmt(self), + }); + }, + .alloca, + .@"alloca inalloca", + => |tag| { + const extra = function.extraData(Function.Instruction.Alloca, instruction.data); + try writer.print(" %{} = {s} {%}{,%}{,}{,}\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.type.fmt(self), + extra.len.fmt(function_index, self), + extra.info.alignment, + extra.info.addr_space, + }); + }, + .arg => unreachable, + .block => { + const name = instruction_index.name(&function); + if (@intFromEnum(instruction_index) > params_len) try writer.writeByte('\n'); + try writer.print("{}:\n", .{name.fmt(self)}); + }, + .br => |tag| { + const target: Function.Block.Index = @enumFromInt(instruction.data); + try writer.print(" {s} {%}\n", .{ + @tagName(tag), target.toInst(&function).fmt(function_index, self), + }); + }, + .br_cond => { + const extra = function.extraData(Function.Instruction.BrCond, instruction.data); + try writer.print(" br {%}, {%}, {%}\n", .{ + extra.cond.fmt(function_index, self), + extra.then.toInst(&function).fmt(function_index, self), + extra.@"else".toInst(&function).fmt(function_index, self), + }); + }, + .extractelement => |tag| { + const extra = + function.extraData(Function.Instruction.ExtractElement, instruction.data); + try writer.print(" %{} = {s} {%}, {%}\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.val.fmt(function_index, self), + extra.index.fmt(function_index, self), + }); + }, + .extractvalue => |tag| { + const extra = + function.extraDataTrail(Function.Instruction.ExtractValue, instruction.data); + const indices: []const u32 = + function.extra[extra.end..][0..extra.data.indices_len]; + try writer.print(" %{} = {s} {%}", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.data.val.fmt(function_index, self), + }); + for (indices) |index| try writer.print(", {d}", .{index}); + try writer.writeByte('\n'); + }, + .fence => |tag| { + const info: MemoryAccessInfo = @bitCast(instruction.data); + try writer.print(" {s}{}{}", .{ @tagName(tag), info.scope, info.ordering }); + }, + .fneg, + .@"fneg fast", + .ret, + => |tag| { + const val: Value = @enumFromInt(instruction.data); + try writer.print(" {s} {%}\n", .{ + @tagName(tag), + val.fmt(function_index, self), + }); + }, + .getelementptr, + .@"getelementptr inbounds", + => |tag| { + const extra = function.extraDataTrail( + Function.Instruction.GetElementPtr, + instruction.data, + ); + const indices: []const Value = + @ptrCast(function.extra[extra.end..][0..extra.data.indices_len]); + try writer.print(" %{} = {s} {%}, {%}", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.data.type.fmt(self), + extra.data.base.fmt(function_index, self), + }); + for (indices) |index| try writer.print(", {%}", .{ + index.fmt(function_index, self), + }); + try writer.writeByte('\n'); + }, + .insertelement => |tag| { + const extra = + function.extraData(Function.Instruction.InsertElement, instruction.data); + try writer.print(" %{} = {s} {%}, {%}, {%}\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.val.fmt(function_index, self), + extra.elem.fmt(function_index, self), + extra.index.fmt(function_index, self), + }); + }, + .insertvalue => |tag| { + const extra = + function.extraDataTrail(Function.Instruction.InsertValue, instruction.data); + const indices: []const u32 = + function.extra[extra.end..][0..extra.data.indices_len]; + try writer.print(" %{} = {s} {%}, {%}", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.data.val.fmt(function_index, self), + extra.data.elem.fmt(function_index, self), + }); + for (indices) |index| try writer.print(", {d}", .{index}); + try writer.writeByte('\n'); + }, + .@"llvm.maxnum.", + .@"llvm.minnum.", + .@"llvm.sadd.sat.", + .@"llvm.smax.", + .@"llvm.smin.", + .@"llvm.smul.fix.sat.", + .@"llvm.sshl.sat.", + .@"llvm.ssub.sat.", + .@"llvm.uadd.sat.", + .@"llvm.umax.", + .@"llvm.umin.", + .@"llvm.umul.fix.sat.", + .@"llvm.ushl.sat.", + .@"llvm.usub.sat.", + => |tag| { + const extra = function.extraData(Function.Instruction.Binary, instruction.data); + const ty = instruction_index.typeOf(function_index, self); + try writer.print(" %{} = call {%} @{s}{m}({%}, {%})\n", .{ + instruction_index.name(&function).fmt(self), + ty.fmt(self), + @tagName(tag), + ty.fmt(self), + extra.lhs.fmt(function_index, self), + extra.rhs.fmt(function_index, self), + }); + }, + .load, + .@"load atomic", + .@"load atomic volatile", + .@"load volatile", + => |tag| { + const extra = function.extraData(Function.Instruction.Load, instruction.data); + try writer.print(" %{} = {s} {%}, {%}{}{}{,}\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.type.fmt(self), + extra.ptr.fmt(function_index, self), + extra.info.scope, + extra.info.ordering, + extra.info.alignment, + }); + }, + .phi, + .@"phi fast", + => |tag| { + const extra = + function.extraDataTrail(Function.Instruction.Phi, instruction.data); + const vals: []const Value = + @ptrCast(function.extra[extra.end..][0..extra.data.incoming_len]); + const blocks: []const Function.Block.Index = @ptrCast(function.extra[extra.end + + extra.data.incoming_len ..][0..extra.data.incoming_len]); + try writer.print(" %{} = {s} {%} ", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + vals[0].typeOf(function_index, self).fmt(self), + }); + for (0.., vals, blocks) |incoming_index, incoming_val, incoming_block| { + if (incoming_index > 0) try writer.writeAll(", "); + try writer.print("[ {}, {} ]", .{ + incoming_val.fmt(function_index, self), + incoming_block.toInst(&function).fmt(function_index, self), + }); + } + try writer.writeByte('\n'); + }, + .@"ret void", + .@"unreachable", + => |tag| try writer.print(" {s}\n", .{@tagName(tag)}), + .select, + .@"select fast", + => |tag| { + const extra = function.extraData(Function.Instruction.Select, instruction.data); + try writer.print(" %{} = {s} {%}, {%}, {%}\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.cond.fmt(function_index, self), + extra.lhs.fmt(function_index, self), + extra.rhs.fmt(function_index, self), + }); + }, + .shufflevector => |tag| { + const extra = + function.extraData(Function.Instruction.ShuffleVector, instruction.data); + try writer.print(" %{} = {s} {%}, {%}, {%}\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.lhs.fmt(function_index, self), + extra.rhs.fmt(function_index, self), + extra.mask.fmt(function_index, self), + }); + }, + .store, + .@"store atomic", + .@"store atomic volatile", + .@"store volatile", + => |tag| { + const extra = function.extraData(Function.Instruction.Store, instruction.data); + try writer.print(" {s} {%}, {%}{}{}{,}\n", .{ + @tagName(tag), + extra.val.fmt(function_index, self), + extra.ptr.fmt(function_index, self), + extra.info.scope, + extra.info.ordering, + extra.info.alignment, + }); + }, + .@"switch" => |tag| { + const extra = + function.extraDataTrail(Function.Instruction.Switch, instruction.data); + const vals: []const Constant = + @ptrCast(function.extra[extra.end..][0..extra.data.cases_len]); + const blocks: []const Function.Block.Index = @ptrCast(function.extra[extra.end + + extra.data.cases_len ..][0..extra.data.cases_len]); + try writer.print(" {s} {%}, {%} [", .{ + @tagName(tag), + extra.data.val.fmt(function_index, self), + extra.data.default.toInst(&function).fmt(function_index, self), + }); + for (vals, blocks) |case_val, case_block| try writer.print(" {%}, {%}\n", .{ + case_val.fmt(self), + case_block.toInst(&function).fmt(function_index, self), + }); + try writer.writeAll(" ]\n"); + }, + .unimplemented => |tag| { + const ty: Type = @enumFromInt(instruction.data); + try writer.writeAll(" "); + switch (ty) { + .none, .void => {}, + else => try writer.print("%{} = ", .{ + instruction_index.name(&function).fmt(self), + }), + } + try writer.print("{s} {%}\n", .{ @tagName(tag), ty.fmt(self) }); + }, + .va_arg => |tag| { + const extra = function.extraData(Function.Instruction.VaArg, instruction.data); + try writer.print(" %{} = {s} {%}, {%}\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.list.fmt(function_index, self), + extra.type.fmt(self), + }); + }, } } try writer.writeByte('}'); @@ -2731,6 +6174,12 @@ pub fn dump(self: *Builder, writer: anytype) (@TypeOf(writer).Error || Allocator } } +pub inline fn useLibLlvm(self: *const Builder) bool { + return build_options.have_llvm and self.use_lib_llvm; +} + +const NoExtra = struct {}; + fn isValidIdentifier(id: []const u8) bool { for (id, 0..) |character, index| switch (character) { '$', '-', '.', 'A'...'Z', '_', 'a'...'z' => {}, @@ -3048,15 +6497,15 @@ fn opaqueTypeAssumeCapacity(self: *Builder, name: String) Type { fn ensureUnusedTypeCapacity( self: *Builder, count: usize, - comptime Extra: ?type, + comptime Extra: type, trail_len: usize, ) Allocator.Error!void { try self.type_map.ensureUnusedCapacity(self.gpa, count); try self.type_items.ensureUnusedCapacity(self.gpa, count); - if (Extra) |E| try self.type_extra.ensureUnusedCapacity( + try self.type_extra.ensureUnusedCapacity( self.gpa, - count * (@typeInfo(E).Struct.fields.len + trail_len), - ) else assert(trail_len == 0); + count * (@typeInfo(Extra).Struct.fields.len + trail_len), + ); if (self.useLibLlvm()) try self.llvm.types.ensureUnusedCapacity(self.gpa, count); } @@ -3104,10 +6553,10 @@ fn typeExtraDataTrail( ) struct { data: T, end: Type.Item.ExtraIndex } { var result: T = undefined; const fields = @typeInfo(T).Struct.fields; - inline for (fields, self.type_extra.items[index..][0..fields.len]) |field, data| + inline for (fields, self.type_extra.items[index..][0..fields.len]) |field, value| @field(result, field.name) = switch (field.type) { - u32 => data, - String, Type => @enumFromInt(data), + u32 => value, + String, Type => @enumFromInt(value), else => @compileError("bad field type: " ++ @typeName(field.type)), }; return .{ .data = result, .end = index + @as(Type.Item.ExtraIndex, @intCast(fields.len)) }; @@ -3519,13 +6968,13 @@ fn arrayConstAssumeCapacity( ) if (build_options.have_llvm) Allocator.Error!Constant else Constant { const type_item = self.type_items.items[@intFromEnum(ty)]; const type_extra: struct { len: u64, child: Type } = switch (type_item.tag) { - .small_array => extra: { - const extra = self.typeExtraData(Type.Vector, type_item.data); - break :extra .{ .len = extra.len, .child = extra.child }; - }, - .array => extra: { - const extra = self.typeExtraData(Type.Array, type_item.data); - break :extra .{ .len = extra.len(), .child = extra.child }; + inline .small_array, .array => |kind| extra: { + const extra = self.typeExtraData(switch (kind) { + .small_array => Type.Vector, + .array => Type.Array, + else => unreachable, + }, type_item.data); + break :extra .{ .len = extra.length(), .child = extra.child }; }, else => unreachable, }; @@ -3738,7 +7187,7 @@ fn poisonConstAssumeCapacity(self: *Builder, ty: Type) Constant { .{ .tag = .poison, .data = @intFromEnum(ty) }, ); if (self.useLibLlvm() and result.new) - self.llvm.constants.appendAssumeCapacity(ty.toLlvm(self).getUndef()); + self.llvm.constants.appendAssumeCapacity(ty.toLlvm(self).getPoison()); return result.constant; } @@ -3794,17 +7243,17 @@ fn noCfiConstAssumeCapacity(self: *Builder, function: Function.Index) Constant { return result.constant; } -fn convConstAssumeCapacity( +fn convTag( self: *Builder, + comptime Tag: type, signedness: Constant.Cast.Signedness, - arg: Constant, + val_ty: Type, ty: Type, -) Constant { - const arg_ty = arg.typeOf(self); - if (arg_ty == ty) return arg; - return self.castConstAssumeCapacity(switch (arg_ty.scalarTag(self)) { +) Tag { + assert(val_ty != ty); + return switch (val_ty.scalarTag(self)) { .simple => switch (ty.scalarTag(self)) { - .simple => switch (std.math.order(arg_ty.scalarBits(self), ty.scalarBits(self))) { + .simple => switch (std.math.order(val_ty.scalarBits(self), ty.scalarBits(self))) { .lt => .fpext, .eq => unreachable, .gt => .fptrunc, @@ -3816,13 +7265,13 @@ fn convConstAssumeCapacity( }, else => unreachable, }, - .integer => switch (ty.tag(self)) { + .integer => switch (ty.scalarTag(self)) { .simple => switch (signedness) { .unsigned => .uitofp, .signed => .sitofp, .unneeded => unreachable, }, - .integer => switch (std.math.order(arg_ty.scalarBits(self), ty.scalarBits(self))) { + .integer => switch (std.math.order(val_ty.scalarBits(self), ty.scalarBits(self))) { .lt => switch (signedness) { .unsigned => .zext, .signed => .sext, @@ -3834,16 +7283,27 @@ fn convConstAssumeCapacity( .pointer => .inttoptr, else => unreachable, }, - .pointer => switch (ty.tag(self)) { + .pointer => switch (ty.scalarTag(self)) { .integer => .ptrtoint, .pointer => .addrspacecast, else => unreachable, }, else => unreachable, - }, arg, ty); + }; +} + +fn convConstAssumeCapacity( + self: *Builder, + signedness: Constant.Cast.Signedness, + val: Constant, + ty: Type, +) Constant { + const val_ty = val.typeOf(self); + if (val_ty == ty) return val; + return self.castConstAssumeCapacity(self.convTag(Constant.Tag, signedness, val_ty, ty), val, ty); } -fn castConstAssumeCapacity(self: *Builder, tag: Constant.Tag, arg: Constant, ty: Type) Constant { +fn castConstAssumeCapacity(self: *Builder, tag: Constant.Tag, val: Constant, ty: Type) Constant { const Key = struct { tag: Constant.Tag, cast: Constant.Cast }; const Adapter = struct { builder: *const Builder, @@ -3860,7 +7320,7 @@ fn castConstAssumeCapacity(self: *Builder, tag: Constant.Tag, arg: Constant, ty: return std.meta.eql(lhs_key.cast, rhs_extra); } }; - const data = Key{ .tag = tag, .cast = .{ .arg = arg, .type = ty } }; + const data = Key{ .tag = tag, .cast = .{ .val = val, .type = ty } }; const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); if (!gop.found_existing) { gop.key_ptr.* = {}; @@ -3883,7 +7343,7 @@ fn castConstAssumeCapacity(self: *Builder, tag: Constant.Tag, arg: Constant, ty: .inttoptr => &llvm.Value.constIntToPtr, .bitcast => &llvm.Value.constBitCast, else => unreachable, - }(arg.toLlvm(self), ty.toLlvm(self))); + }(val.toLlvm(self), ty.toLlvm(self))); } return @enumFromInt(gop.index); } @@ -3893,6 +7353,7 @@ fn gepConstAssumeCapacity( comptime kind: Constant.GetElementPtr.Kind, ty: Type, base: Constant, + inrange: ?u16, indices: []const Constant, ) if (build_options.have_llvm) Allocator.Error!Constant else Constant { const tag: Constant.Tag = switch (kind) { @@ -3929,13 +7390,19 @@ fn gepConstAssumeCapacity( inline else => |vector_kind| _ = self.vectorTypeAssumeCapacity(vector_kind, info.len, base_ty), }; - const Key = struct { type: Type, base: Constant, indices: []const Constant }; + const Key = struct { + type: Type, + base: Constant, + inrange: Constant.GetElementPtr.InRangeIndex, + indices: []const Constant, + }; const Adapter = struct { builder: *const Builder, pub fn hash(_: @This(), key: Key) u32 { var hasher = std.hash.Wyhash.init(comptime std.hash.uint32(@intFromEnum(tag))); hasher.update(std.mem.asBytes(&key.type)); hasher.update(std.mem.asBytes(&key.base)); + hasher.update(std.mem.asBytes(&key.inrange)); hasher.update(std.mem.sliceAsBytes(key.indices)); return @truncate(hasher.final()); } @@ -3944,12 +7411,18 @@ fn gepConstAssumeCapacity( const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index]; const rhs_extra = ctx.builder.constantExtraDataTrail(Constant.GetElementPtr, rhs_data); const rhs_indices: []const Constant = @ptrCast(ctx.builder.constant_extra - .items[rhs_extra.end..][0..rhs_extra.data.indices_len]); + .items[rhs_extra.end..][0..rhs_extra.data.info.indices_len]); return lhs_key.type == rhs_extra.data.type and lhs_key.base == rhs_extra.data.base and + lhs_key.inrange == rhs_extra.data.info.inrange and std.mem.eql(Constant, lhs_key.indices, rhs_indices); } }; - const data = Key{ .type = ty, .base = base, .indices = indices }; + const data = Key{ + .type = ty, + .base = base, + .inrange = if (inrange) |index| @enumFromInt(index) else .none, + .indices = indices, + }; const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); if (!gop.found_existing) { gop.key_ptr.* = {}; @@ -3959,7 +7432,7 @@ fn gepConstAssumeCapacity( .data = self.addConstantExtraAssumeCapacity(Constant.GetElementPtr{ .type = ty, .base = base, - .indices_len = @intCast(indices.len), + .info = .{ .indices_len = @intCast(indices.len), .inrange = data.inrange }, }), }); self.constant_extra.appendSliceAssumeCapacity(@ptrCast(indices)); @@ -3976,7 +7449,7 @@ fn gepConstAssumeCapacity( self.llvm.constants.appendAssumeCapacity(switch (kind) { .normal => &llvm.Type.constGEP, .inbounds => &llvm.Type.constInBoundsGEP, - }(ty.toLlvm(self), base.toLlvm(self), llvm_indices.ptr, @intCast(indices.len))); + }(ty.toLlvm(self), base.toLlvm(self), llvm_indices.ptr, @intCast(llvm_indices.len))); } } return @enumFromInt(gop.index); @@ -4058,7 +7531,7 @@ fn fcmpConstAssumeCapacity( fn extractElementConstAssumeCapacity( self: *Builder, - arg: Constant, + val: Constant, index: Constant, ) Constant { const Adapter = struct { @@ -4076,7 +7549,7 @@ fn extractElementConstAssumeCapacity( return std.meta.eql(lhs_key, rhs_extra); } }; - const data = Constant.ExtractElement{ .arg = arg, .index = index }; + const data = Constant.ExtractElement{ .val = val, .index = index }; const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); if (!gop.found_existing) { gop.key_ptr.* = {}; @@ -4086,7 +7559,7 @@ fn extractElementConstAssumeCapacity( .data = self.addConstantExtraAssumeCapacity(data), }); if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity( - arg.toLlvm(self).constExtractElement(index.toLlvm(self)), + val.toLlvm(self).constExtractElement(index.toLlvm(self)), ); } return @enumFromInt(gop.index); @@ -4094,7 +7567,7 @@ fn extractElementConstAssumeCapacity( fn insertElementConstAssumeCapacity( self: *Builder, - arg: Constant, + val: Constant, elem: Constant, index: Constant, ) Constant { @@ -4113,7 +7586,7 @@ fn insertElementConstAssumeCapacity( return std.meta.eql(lhs_key, rhs_extra); } }; - const data = Constant.InsertElement{ .arg = arg, .elem = elem, .index = index }; + const data = Constant.InsertElement{ .val = val, .elem = elem, .index = index }; const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); if (!gop.found_existing) { gop.key_ptr.* = {}; @@ -4123,7 +7596,7 @@ fn insertElementConstAssumeCapacity( .data = self.addConstantExtraAssumeCapacity(data), }); if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity( - arg.toLlvm(self).constInsertElement(elem.toLlvm(self), index.toLlvm(self)), + val.toLlvm(self).constInsertElement(elem.toLlvm(self), index.toLlvm(self)), ); } return @enumFromInt(gop.index); @@ -4135,6 +7608,10 @@ fn shuffleVectorConstAssumeCapacity( rhs: Constant, mask: Constant, ) Constant { + assert(lhs.typeOf(self).isVector(self.builder)); + assert(lhs.typeOf(self) == rhs.typeOf(self)); + assert(mask.typeOf(self).scalarType(self).isInteger(self)); + _ = lhs.typeOf(self).changeLengthAssumeCapacity(mask.typeOf(self).vectorLen(self), self); const Adapter = struct { builder: *const Builder, pub fn hash(_: @This(), key: Constant.ShuffleVector) u32 { @@ -4235,15 +7712,15 @@ fn binConstAssumeCapacity( fn ensureUnusedConstantCapacity( self: *Builder, count: usize, - comptime Extra: ?type, + comptime Extra: type, trail_len: usize, ) Allocator.Error!void { try self.constant_map.ensureUnusedCapacity(self.gpa, count); try self.constant_items.ensureUnusedCapacity(self.gpa, count); - if (Extra) |E| try self.constant_extra.ensureUnusedCapacity( + try self.constant_extra.ensureUnusedCapacity( self.gpa, - count * (@typeInfo(E).Struct.fields.len + trail_len), - ) else assert(trail_len == 0); + count * (@typeInfo(Extra).Struct.fields.len + trail_len), + ); if (self.useLibLlvm()) try self.llvm.constants.ensureUnusedCapacity(self.gpa, count); } @@ -4323,11 +7800,8 @@ fn addConstantExtraAssumeCapacity(self: *Builder, extra: anytype) Constant.Item. const value = @field(extra, field.name); self.constant_extra.appendAssumeCapacity(switch (field.type) { u32 => value, - Type, - Constant, - Function.Index, - Function.Block.Index, - => @intFromEnum(value), + Type, Constant, Function.Index, Function.Block.Index => @intFromEnum(value), + Constant.GetElementPtr.Info => @bitCast(value), else => @compileError("bad field type: " ++ @typeName(field.type)), }); } @@ -4341,14 +7815,11 @@ fn constantExtraDataTrail( ) struct { data: T, end: Constant.Item.ExtraIndex } { var result: T = undefined; const fields = @typeInfo(T).Struct.fields; - inline for (fields, self.constant_extra.items[index..][0..fields.len]) |field, data| + inline for (fields, self.constant_extra.items[index..][0..fields.len]) |field, value| @field(result, field.name) = switch (field.type) { - u32 => data, - Type, - Constant, - Function.Index, - Function.Block.Index, - => @enumFromInt(data), + u32 => value, + Type, Constant, Function.Index, Function.Block.Index => @enumFromInt(value), + Constant.GetElementPtr.Info => @bitCast(value), else => @compileError("bad field type: " ++ @typeName(field.type)), }; return .{ .data = result, .end = index + @as(Constant.Item.ExtraIndex, @intCast(fields.len)) }; @@ -4358,10 +7829,6 @@ fn constantExtraData(self: *const Builder, comptime T: type, index: Constant.Ite return self.constantExtraDataTrail(T, index).data; } -pub inline fn useLibLlvm(self: *const Builder) bool { - return build_options.have_llvm and self.use_lib_llvm; -} - const assert = std.debug.assert; const build_options = @import("build_options"); const builtin = @import("builtin"); diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index adc6223830..3b99ae1fe1 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -135,9 +135,6 @@ pub const Value = opaque { pub const getNextInstruction = LLVMGetNextInstruction; extern fn LLVMGetNextInstruction(Inst: *Value) ?*Value; - pub const typeOf = LLVMTypeOf; - extern fn LLVMTypeOf(Val: *Value) *Type; - pub const setGlobalConstant = LLVMSetGlobalConstant; extern fn LLVMSetGlobalConstant(GlobalVar: *Value, IsConstant: Bool) void; @@ -291,6 +288,9 @@ pub const Value = opaque { MaskConstant: *Value, ) *Value; + pub const isConstant = LLVMIsConstant; + extern fn LLVMIsConstant(Val: *Value) Bool; + pub const blockAddress = LLVMBlockAddress; extern fn LLVMBlockAddress(F: *Value, BB: *BasicBlock) *Value; @@ -303,6 +303,9 @@ pub const Value = opaque { pub const setVolatile = LLVMSetVolatile; extern fn LLVMSetVolatile(MemoryAccessInst: *Value, IsVolatile: Bool) void; + pub const setAtomicSingleThread = LLVMSetAtomicSingleThread; + extern fn LLVMSetAtomicSingleThread(AtomicInst: *Value, SingleThread: Bool) void; + pub const setAlignment = LLVMSetAlignment; extern fn LLVMSetAlignment(V: *Value, Bytes: c_uint) void; @@ -348,17 +351,9 @@ pub const Value = opaque { pub const addCase = LLVMAddCase; extern fn LLVMAddCase(Switch: *Value, OnVal: *Value, Dest: *BasicBlock) void; - pub inline fn isPoison(Val: *Value) bool { - return LLVMIsPoison(Val).toBool(); - } - extern fn LLVMIsPoison(Val: *Value) Bool; - pub const replaceAllUsesWith = LLVMReplaceAllUsesWith; extern fn LLVMReplaceAllUsesWith(OldVal: *Value, NewVal: *Value) void; - pub const globalGetValueType = LLVMGlobalGetValueType; - extern fn LLVMGlobalGetValueType(Global: *Value) *Type; - pub const getLinkage = LLVMGetLinkage; extern fn LLVMGetLinkage(Global: *Value) Linkage; @@ -410,6 +405,9 @@ pub const Type = opaque { pub const getUndef = LLVMGetUndef; extern fn LLVMGetUndef(Ty: *Type) *Value; + pub const getPoison = LLVMGetPoison; + extern fn LLVMGetPoison(Ty: *Type) *Value; + pub const arrayType = LLVMArrayType; extern fn LLVMArrayType(ElementType: *Type, ElementCount: c_uint) *Type; @@ -427,24 +425,6 @@ pub const Type = opaque { Packed: Bool, ) void; - pub const structGetTypeAtIndex = LLVMStructGetTypeAtIndex; - extern fn LLVMStructGetTypeAtIndex(StructTy: *Type, i: c_uint) *Type; - - pub const getTypeKind = LLVMGetTypeKind; - extern fn LLVMGetTypeKind(Ty: *Type) TypeKind; - - pub const getElementType = LLVMGetElementType; - extern fn LLVMGetElementType(Ty: *Type) *Type; - - pub const countStructElementTypes = LLVMCountStructElementTypes; - extern fn LLVMCountStructElementTypes(StructTy: *Type) c_uint; - - pub const isOpaqueStruct = LLVMIsOpaqueStruct; - extern fn LLVMIsOpaqueStruct(StructTy: *Type) Bool; - - pub const isSized = LLVMTypeIsSized; - extern fn LLVMTypeIsSized(Ty: *Type) Bool; - pub const constGEP = LLVMConstGEP2; extern fn LLVMConstGEP2( Ty: *Type, @@ -815,6 +795,16 @@ pub const Builder = opaque { pub const buildBitCast = LLVMBuildBitCast; extern fn LLVMBuildBitCast(*Builder, Val: *Value, DestTy: *Type, Name: [*:0]const u8) *Value; + pub const buildGEP = LLVMBuildGEP2; + extern fn LLVMBuildGEP2( + B: *Builder, + Ty: *Type, + Pointer: *Value, + Indices: [*]const *Value, + NumIndices: c_uint, + Name: [*:0]const u8, + ) *Value; + pub const buildInBoundsGEP = LLVMBuildInBoundsGEP2; extern fn LLVMBuildInBoundsGEP2( B: *Builder, @@ -868,14 +858,6 @@ pub const Builder = opaque { Name: [*:0]const u8, ) *Value; - pub const buildVectorSplat = LLVMBuildVectorSplat; - extern fn LLVMBuildVectorSplat( - *Builder, - ElementCount: c_uint, - EltVal: *Value, - Name: [*:0]const u8, - ) *Value; - pub const buildPtrToInt = LLVMBuildPtrToInt; extern fn LLVMBuildPtrToInt( *Builder, @@ -892,15 +874,6 @@ pub const Builder = opaque { Name: [*:0]const u8, ) *Value; - pub const buildStructGEP = LLVMBuildStructGEP2; - extern fn LLVMBuildStructGEP2( - B: *Builder, - Ty: *Type, - Pointer: *Value, - Idx: c_uint, - Name: [*:0]const u8, - ) *Value; - pub const buildTrunc = LLVMBuildTrunc; extern fn LLVMBuildTrunc( *Builder, @@ -1156,9 +1129,6 @@ pub const RealPredicate = enum(c_uint) { pub const BasicBlock = opaque { pub const deleteBasicBlock = LLVMDeleteBasicBlock; extern fn LLVMDeleteBasicBlock(BB: *BasicBlock) void; - - pub const getFirstInstruction = LLVMGetFirstInstruction; - extern fn LLVMGetFirstInstruction(BB: *BasicBlock) ?*Value; }; pub const TargetMachine = opaque { @@ -1580,29 +1550,6 @@ pub const AtomicRMWBinOp = enum(c_int) { FMin, }; -pub const TypeKind = enum(c_int) { - Void, - Half, - Float, - Double, - X86_FP80, - FP128, - PPC_FP128, - Label, - Integer, - Function, - Struct, - Array, - Pointer, - Vector, - Metadata, - X86_MMX, - Token, - ScalableVector, - BFloat, - X86_AMX, -}; - pub const CallConv = enum(c_uint) { C = 0, Fast = 8, @@ -1729,7 +1676,7 @@ pub const address_space = struct { pub const constant_buffer_15: c_uint = 23; }; - // See llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.h + // See llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypetilities.h pub const wasm = struct { pub const variable: c_uint = 1; pub const externref: c_uint = 10; diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp index b04356bf4d..26ea04aca6 100644 --- a/src/zig_llvm.cpp +++ b/src/zig_llvm.cpp @@ -560,10 +560,6 @@ LLVMValueRef ZigLLVMBuildUShlSat(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRe return wrap(call_inst); } -LLVMValueRef LLVMBuildVectorSplat(LLVMBuilderRef B, unsigned elem_count, LLVMValueRef V, const char *Name) { - return wrap(unwrap(B)->CreateVectorSplat(elem_count, unwrap(V), Name)); -} - void ZigLLVMFnSetSubprogram(LLVMValueRef fn, ZigLLVMDISubprogram *subprogram) { assert( isa(unwrap(fn)) ); Function *unwrapped_function = reinterpret_cast(unwrap(fn)); -- cgit v1.2.3 From 9c4d5e64b45001cd82c0db53d1a489f433024223 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 17 Jul 2023 20:17:43 -0400 Subject: llvm: minor fixes --- src/codegen/llvm.zig | 2 +- src/codegen/llvm/Builder.zig | 2 +- src/main.zig | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'src') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 7fe5dbb88f..8c5660f8c5 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -8,7 +8,7 @@ const native_endian = builtin.cpu.arch.endian(); const DW = std.dwarf; const Builder = @import("llvm/Builder.zig"); -const llvm = if (build_options.have_llvm) +const llvm = if (build_options.have_llvm or true) @import("llvm/bindings.zig") else @compileError("LLVM unavailable"); diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 67bafc0113..1ff0ea6976 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -6644,7 +6644,7 @@ fn bigIntConstAssumeCapacity( } else |_| { const llvm_limbs = try allocator.alloc(u64, std.math.divCeil( usize, - canonical_value.bitCountTwosComp(), + if (canonical_value.positive) canonical_value.bitCountAbs() else bits, @bitSizeOf(u64), ) catch unreachable); defer allocator.free(llvm_limbs); diff --git a/src/main.zig b/src/main.zig index 59655eadb6..134b566bdc 100644 --- a/src/main.zig +++ b/src/main.zig @@ -439,8 +439,8 @@ const usage_build_generic = \\ -fno-unwind-tables Never produce unwind table entries \\ -fLLVM Force using LLVM as the codegen backend \\ -fno-LLVM Prevent using LLVM as the codegen backend - \\ -flibLLVM Force using LLVM shared library apias the codegen backend - \\ -fno-libLLVM Prevent using LLVM as the codegen backend + \\ -flibLLVM Force using the LLVM API in the codegen backend + \\ -fno-libLLVM Prevent using the LLVM API in the codegen backend \\ -fClang Force using Clang as the C/C++ compilation backend \\ -fno-Clang Prevent using Clang as the C/C++ compilation backend \\ -freference-trace[=num] How many lines of reference trace should be shown per compile error -- cgit v1.2.3 From 9dd7a9eb0265f8be0fbfa4ea65b2b52f8bf29b0f Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 18 Jul 2023 03:08:15 -0400 Subject: llvm: fix various crashes --- lib/std/target.zig | 29 ++++- src/codegen/llvm.zig | 300 ++++++++++++++++++++++++++++++++----------- src/codegen/llvm/Builder.zig | 20 ++- 3 files changed, 262 insertions(+), 87 deletions(-) (limited to 'src') diff --git a/lib/std/target.zig b/lib/std/target.zig index d40ef11bb7..e00a1c15d7 100644 --- a/lib/std/target.zig +++ b/lib/std/target.zig @@ -1910,12 +1910,37 @@ pub const Target = struct { pub fn stackAlignment(target: Target) u16 { return switch (target.cpu.arch) { + .amdgcn => 4, .x86 => switch (target.os.tag) { .windows => 4, else => 16, }, - .arm, .armeb, .mips, .mipsel => 8, - .aarch64, .aarch64_be, .powerpc64, .powerpc64le, .riscv64, .x86_64, .wasm32, .wasm64 => 16, + .arm, + .armeb, + .thumb, + .thumbeb, + .mips, + .mipsel, + .sparc, + .sparcel, + => 8, + .aarch64, + .aarch64_be, + .aarch64_32, + .bpfeb, + .bpfel, + .mips64, + .mips64el, + .powerpc64, + .powerpc64le, + .riscv32, + .riscv64, + .sparc64, + .x86_64, + .ve, + .wasm32, + .wasm64, + => 16, else => @divExact(target.ptrBitWidth(), 8), }; } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 8c5660f8c5..d4bdb04507 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -341,34 +341,40 @@ const DataLayoutBuilder = struct { writer: anytype, ) @TypeOf(writer).Error!void { const is_aarch64_windows = self.target.cpu.arch == .aarch64 and self.target.os.tag == .windows; - try writer.print("{c}-m:{c}", .{ - @as(u8, switch (self.target.cpu.arch.endian()) { - .Little => 'e', - .Big => 'E', - }), - @as(u8, if (self.target.cpu.arch.isMIPS()) - 'm' // Mips mangling: Private symbols get a $ prefix. - else switch (self.target.ofmt) { - .elf => 'e', // ELF mangling: Private symbols get a `.L` prefix. - //.goff => 'l', // GOFF mangling: Private symbols get a `@` prefix. - .macho => 'o', // Mach-O mangling: Private symbols get `L` prefix. - // Other symbols get a `_` prefix. - .coff => switch (self.target.os.tag) { - .windows => switch (self.target.cpu.arch) { - .x86 => 'x', // Windows x86 COFF mangling: Private symbols get the usual prefix. - // Regular C symbols get a `_` prefix. Functions with `__stdcall`, `__fastcall`, - // and `__vectorcall` have custom mangling that appends `@N` where N is the - // number of bytes used to pass parameters. C++ symbols starting with `?` are - // not mangled in any way. - else => 'w', // Windows COFF mangling: Similar to x, except that normal C - // symbols do not receive a `_` prefix. + try writer.writeByte(switch (self.target.cpu.arch.endian()) { + .Little => 'e', + .Big => 'E', + }); + switch (self.target.cpu.arch) { + .amdgcn, + .nvptx, + .nvptx64, + => {}, + .avr => try writer.writeAll("-P1"), + else => try writer.print("-m:{c}", .{@as(u8, switch (self.target.cpu.arch) { + .mips, .mipsel => 'm', // Mips mangling: Private symbols get a $ prefix. + else => switch (self.target.ofmt) { + .elf => 'e', // ELF mangling: Private symbols get a `.L` prefix. + //.goff => 'l', // GOFF mangling: Private symbols get a `@` prefix. + .macho => 'o', // Mach-O mangling: Private symbols get `L` prefix. + // Other symbols get a `_` prefix. + .coff => switch (self.target.os.tag) { + .windows => switch (self.target.cpu.arch) { + .x86 => 'x', // Windows x86 COFF mangling: Private symbols get the usual + // prefix. Regular C symbols get a `_` prefix. Functions with `__stdcall`, + //`__fastcall`, and `__vectorcall` have custom mangling that appends `@N` + // where N is the number of bytes used to pass parameters. C++ symbols + // starting with `?` are not mangled in any way. + else => 'w', // Windows COFF mangling: Similar to x, except that normal C + // symbols do not receive a `_` prefix. + }, + else => 'e', }, + //.xcoff => 'a', // XCOFF mangling: Private symbols get a `L..` prefix. else => 'e', }, - //.xcoff => 'a', // XCOFF mangling: Private symbols get a `L..` prefix. - else => 'e', - }), - }); + })}), + } var any_non_integral = false; const ptr_bit_width = self.target.ptrBitWidth(); var default_info = struct { size: u16, abi: u16, pref: u16, idx: u16 }{ @@ -399,66 +405,134 @@ const DataLayoutBuilder = struct { .pref = pref, .idx = idx, }; + if (self.target.cpu.arch == .aarch64_32) continue; if (!info.force_in_data_layout and matches_default and - self.target.cpu.arch != .riscv64 and !is_aarch64_windows) continue; + self.target.cpu.arch != .riscv64 and !is_aarch64_windows and + self.target.cpu.arch != .bpfeb and self.target.cpu.arch != .bpfel) continue; try writer.writeAll("-p"); if (info.llvm != .default) try writer.print("{d}", .{@intFromEnum(info.llvm)}); try writer.print(":{d}:{d}", .{ size, abi }); - if (pref != abi or idx != size) { + if (pref != abi or idx != size or self.target.cpu.arch == .hexagon) { try writer.print(":{d}", .{pref}); if (idx != size) try writer.print(":{d}", .{idx}); } } if (self.target.cpu.arch.isARM() or self.target.cpu.arch.isThumb()) try writer.writeAll("-Fi8"); // for thumb interwork - try self.typeAlignment(.integer, 8, 8, 8, false, writer); - try self.typeAlignment(.integer, 16, 16, 16, false, writer); - try self.typeAlignment(.integer, 32, if (is_aarch64_windows) 0 else 32, 32, false, writer); - try self.typeAlignment(.integer, 64, 32, 64, false, writer); - try self.typeAlignment(.integer, 128, 32, 64, false, writer); - if (backendSupportsF16(self.target)) try self.typeAlignment(.float, 16, 16, 16, false, writer); - try self.typeAlignment(.float, 32, 32, 32, false, writer); - try self.typeAlignment(.float, 64, 64, 64, false, writer); - if (backendSupportsF80(self.target)) try self.typeAlignment(.float, 80, 0, 0, false, writer); - try self.typeAlignment(.float, 128, 128, 128, false, writer); - try self.typeAlignment(.vector, 64, 64, 64, false, writer); - try self.typeAlignment(.vector, 128, 128, 128, false, writer); - if (self.target.os.tag != .windows) try self.typeAlignment(.aggregate, 0, 0, 64, false, writer); + if (self.target.cpu.arch != .hexagon) { + if (self.target.cpu.arch == .s390x) try self.typeAlignment(.integer, 1, 8, 8, false, writer); + try self.typeAlignment(.integer, 8, 8, 8, false, writer); + try self.typeAlignment(.integer, 16, 16, 16, false, writer); + try self.typeAlignment(.integer, 32, if (is_aarch64_windows) 0 else 32, 32, false, writer); + try self.typeAlignment(.integer, 64, 32, 64, false, writer); + try self.typeAlignment(.integer, 128, 32, 64, false, writer); + if (backendSupportsF16(self.target)) try self.typeAlignment(.float, 16, 16, 16, false, writer); + try self.typeAlignment(.float, 32, 32, 32, false, writer); + try self.typeAlignment(.float, 64, 64, 64, false, writer); + if (backendSupportsF80(self.target)) try self.typeAlignment(.float, 80, 0, 0, false, writer); + try self.typeAlignment(.float, 128, 128, 128, false, writer); + } + switch (self.target.cpu.arch) { + .amdgcn => { + try self.typeAlignment(.vector, 16, 16, 16, false, writer); + try self.typeAlignment(.vector, 24, 32, 32, false, writer); + try self.typeAlignment(.vector, 32, 32, 32, false, writer); + try self.typeAlignment(.vector, 48, 64, 64, false, writer); + try self.typeAlignment(.vector, 96, 128, 128, false, writer); + try self.typeAlignment(.vector, 192, 256, 256, false, writer); + try self.typeAlignment(.vector, 256, 256, 256, false, writer); + try self.typeAlignment(.vector, 512, 512, 512, false, writer); + try self.typeAlignment(.vector, 1024, 1024, 1024, false, writer); + try self.typeAlignment(.vector, 2048, 2048, 2048, false, writer); + }, + .ve => {}, + else => { + try self.typeAlignment(.vector, 16, 32, 32, false, writer); + try self.typeAlignment(.vector, 32, 32, 32, false, writer); + try self.typeAlignment(.vector, 64, 64, 64, false, writer); + try self.typeAlignment(.vector, 128, 128, 128, true, writer); + }, + } + if (self.target.os.tag != .windows and self.target.cpu.arch != .avr) + try self.typeAlignment(.aggregate, 0, 0, 64, false, writer); for (@as([]const u24, switch (self.target.cpu.arch) { - .aarch64_32, + .avr => &.{8}, + .msp430 => &.{ 8, 16 }, .arm, .armeb, .mips, .mipsel, .powerpc, .powerpcle, + .riscv32, + .sparc, + .sparcel, .thumb, .thumbeb, - .riscv32, => &.{32}, .aarch64, .aarch64_be, + .aarch64_32, + .amdgcn, + .bpfeb, + .bpfel, .mips64, .mips64el, .powerpc64, .powerpc64le, .riscv64, + .s390x, + .sparc64, + .ve, .wasm32, .wasm64, => &.{ 32, 64 }, + .hexagon => &.{ 16, 32 }, .x86 => &.{ 8, 16, 32 }, + .nvptx, + .nvptx64, + => &.{ 16, 32, 64 }, .x86_64 => &.{ 8, 16, 32, 64 }, else => &.{}, }), 0..) |natural, index| switch (index) { 0 => try writer.print("-n{d}", .{natural}), else => try writer.print(":{d}", .{natural}), }; - if (self.target.os.tag == .windows) try self.typeAlignment(.aggregate, 0, 0, 64, false, writer); + if (self.target.cpu.arch == .hexagon) { + try self.typeAlignment(.integer, 64, 64, 64, true, writer); + try self.typeAlignment(.integer, 32, 32, 32, true, writer); + try self.typeAlignment(.integer, 16, 16, 16, true, writer); + try self.typeAlignment(.integer, 1, 8, 8, true, writer); + try self.typeAlignment(.float, 32, 32, 32, true, writer); + try self.typeAlignment(.float, 64, 64, 64, true, writer); + } + if (self.target.os.tag == .windows or self.target.cpu.arch == .avr) + try self.typeAlignment(.aggregate, 0, 0, 64, false, writer); const stack_abi = self.target.stackAlignment() * 8; - if (self.target.os.tag == .windows or stack_abi != ptr_bit_width) + if (self.target.os.tag == .windows or self.target.cpu.arch == .msp430 or + stack_abi != ptr_bit_width) try writer.print("-S{d}", .{stack_abi}); - try self.typeAlignment(.vector, 256, 128, 128, true, writer); - try self.typeAlignment(.vector, 512, 128, 128, true, writer); + switch (self.target.cpu.arch) { + .hexagon, .ve => { + try self.typeAlignment(.vector, 32, 128, 128, true, writer); + try self.typeAlignment(.vector, 64, 128, 128, true, writer); + try self.typeAlignment(.vector, 128, 128, 128, true, writer); + }, + else => {}, + } + if (self.target.cpu.arch != .amdgcn) { + try self.typeAlignment(.vector, 256, 128, 128, true, writer); + try self.typeAlignment(.vector, 512, 128, 128, true, writer); + try self.typeAlignment(.vector, 1024, 128, 128, true, writer); + try self.typeAlignment(.vector, 2048, 128, 128, true, writer); + try self.typeAlignment(.vector, 4096, 128, 128, true, writer); + try self.typeAlignment(.vector, 8192, 128, 128, true, writer); + try self.typeAlignment(.vector, 16384, 128, 128, true, writer); + } + const alloca_addr_space = llvmAllocaAddressSpace(self.target); + if (alloca_addr_space != .default) try writer.print("-A{d}", .{@intFromEnum(alloca_addr_space)}); + const global_addr_space = llvmDefaultGlobalAddressSpace(self.target); + if (global_addr_space != .default) try writer.print("-G{d}", .{@intFromEnum(global_addr_space)}); if (any_non_integral) { try writer.writeAll("-ni"); for (addr_space_info) |info| if (info.non_integral) @@ -472,11 +546,13 @@ const DataLayoutBuilder = struct { size: u24, default_abi: u24, default_pref: u24, - force_pref: bool, + default_force_pref: bool, writer: anytype, ) @TypeOf(writer).Error!void { var abi = default_abi; var pref = default_pref; + var force_abi = false; + var force_pref = default_force_pref; if (kind == .float and size == 80) { abi = 128; pref = 128; @@ -493,21 +569,45 @@ const DataLayoutBuilder = struct { } switch (kind) { .integer => { + if (self.target.ptrBitWidth() <= 16 and size >= 128) return; abi = @min(abi, self.target.maxIntAlignment() * 8); switch (self.target.os.tag) { .linux => switch (self.target.cpu.arch) { - .aarch64, .aarch64_be, .mips, .mipsel => pref = @max(pref, 32), + .aarch64, + .aarch64_be, + .aarch64_32, + .mips, + .mipsel, + => pref = @max(pref, 32), else => {}, }, else => {}, } switch (self.target.cpu.arch) { - .aarch64, .aarch64_be, .riscv64 => switch (size) { - 128 => { - abi = size; - pref = size; - }, - else => {}, + .aarch64, + .aarch64_be, + .aarch64_32, + .bpfeb, + .bpfel, + .nvptx, + .nvptx64, + .riscv64, + => if (size == 128) { + abi = size; + pref = size; + }, + .hexagon => force_abi = true, + .mips64, + .mips64el, + => if (size <= 32) { + pref = 32; + }, + .s390x => if (size <= 16) { + pref = 16; + }, + .ve => if (size == 64) { + abi = size; + pref = size; }, else => {}, } @@ -517,18 +617,66 @@ const DataLayoutBuilder = struct { 128 => abi = 64, else => {}, } - } else if (self.target.cpu.arch.isPPC64()) { + } else if ((self.target.cpu.arch.isPPC64() and (size == 256 or size == 512)) or + (self.target.cpu.arch.isNvptx() and (size == 16 or size == 32))) + { + force_abi = true; + abi = size; + pref = size; + } else if (self.target.cpu.arch == .amdgcn and size <= 2048) { + force_abi = true; + } else if (self.target.cpu.arch == .hexagon and + ((size >= 32 and size <= 64) or (size >= 512 and size <= 2048))) + { abi = size; pref = size; + force_pref = true; + } else if (self.target.cpu.arch == .s390x and size == 128) { + abi = 64; + pref = 64; + force_pref = false; + } else if (self.target.cpu.arch == .ve and (size >= 64 and size <= 16384)) { + abi = 64; + pref = 64; + force_abi = true; + force_pref = true; + }, + .float => switch (self.target.cpu.arch) { + .avr, .msp430, .sparc64 => if (size != 32 and size != 64) return, + .hexagon => if (size == 32 or size == 64) { + force_abi = true; + }, + .aarch64_32 => if (size == 128) { + abi = size; + pref = size; + }, + .ve => if (size == 64) { + abi = size; + pref = size; + }, + else => {}, }, - .float => {}, .aggregate => if (self.target.os.tag == .windows or self.target.cpu.arch.isARM() or self.target.cpu.arch.isThumb()) { pref = @min(pref, self.target.ptrBitWidth()); + } else if (self.target.cpu.arch == .hexagon) { + abi = 0; + pref = 0; + } else if (self.target.cpu.arch == .s390x) { + abi = 8; + pref = 16; + } else if (self.target.cpu.arch == .msp430) { + abi = 8; + pref = 8; }, } - if (abi == default_abi and pref == default_pref) return; + if (kind != .vector and self.target.cpu.arch == .avr) { + force_abi = true; + abi = 8; + pref = 8; + } + if (!force_abi and abi == default_abi and pref == default_pref) return; try writer.print("-{c}", .{@tagName(kind)[0]}); if (size != 0) try writer.print("{d}", .{size}); try writer.print(":{d}", .{abi}); @@ -5096,12 +5244,15 @@ pub const FuncGen = struct { // In this case the function return type is honoring the calling convention by having // a different LLVM type than the usual one. We solve this here at the callsite // by using our canonical type, then loading it if necessary. - const rp = try self.buildAlloca(llvm_ret_ty, .default); - _ = try self.wip.store(.normal, call, rp, .default); + const alignment = Builder.Alignment.fromByteUnits( + o.target_data.abiAlignmentOfType(abi_ret_ty.toLlvm(&o.builder)), + ); + const rp = try self.buildAlloca(llvm_ret_ty, alignment); + _ = try self.wip.store(.normal, call, rp, alignment); return if (isByRef(return_type, mod)) rp else - try self.wip.load(.normal, llvm_ret_ty, rp, .default, ""); + try self.wip.load(.normal, llvm_ret_ty, rp, alignment, ""); } if (isByRef(return_type, mod)) { @@ -10923,23 +11074,26 @@ fn llvmAddrSpaceInfo(target: std.Target) []const AddrSpaceInfo { .{ .zig = .local, .llvm = Builder.AddrSpace.nvptx.local }, }, .amdgcn => &.{ - .{ .zig = .generic, .llvm = Builder.AddrSpace.amdgpu.flat }, - .{ .zig = .global, .llvm = Builder.AddrSpace.amdgpu.global }, - .{ .zig = .constant, .llvm = Builder.AddrSpace.amdgpu.constant }, - .{ .zig = .shared, .llvm = Builder.AddrSpace.amdgpu.local }, - .{ .zig = .local, .llvm = Builder.AddrSpace.amdgpu.private }, + .{ .zig = .generic, .llvm = Builder.AddrSpace.amdgpu.flat, .force_in_data_layout = true }, + .{ .zig = .global, .llvm = Builder.AddrSpace.amdgpu.global, .force_in_data_layout = true }, + .{ .zig = null, .llvm = Builder.AddrSpace.amdgpu.region, .size = 32, .abi = 32 }, + .{ .zig = .shared, .llvm = Builder.AddrSpace.amdgpu.local, .size = 32, .abi = 32 }, + .{ .zig = .constant, .llvm = Builder.AddrSpace.amdgpu.constant, .force_in_data_layout = true }, + .{ .zig = .local, .llvm = Builder.AddrSpace.amdgpu.private, .size = 32, .abi = 32 }, + .{ .zig = null, .llvm = Builder.AddrSpace.amdgpu.constant_32bit, .size = 32, .abi = 32 }, + .{ .zig = null, .llvm = Builder.AddrSpace.amdgpu.buffer_fat_pointer, .non_integral = true }, }, .avr => &.{ - .{ .zig = .generic, .llvm = .default }, - .{ .zig = .flash, .llvm = Builder.AddrSpace.avr.flash }, - .{ .zig = .flash1, .llvm = Builder.AddrSpace.avr.flash1 }, - .{ .zig = .flash2, .llvm = Builder.AddrSpace.avr.flash2 }, - .{ .zig = .flash3, .llvm = Builder.AddrSpace.avr.flash3 }, - .{ .zig = .flash4, .llvm = Builder.AddrSpace.avr.flash4 }, - .{ .zig = .flash5, .llvm = Builder.AddrSpace.avr.flash5 }, + .{ .zig = .generic, .llvm = .default, .abi = 8 }, + .{ .zig = .flash, .llvm = Builder.AddrSpace.avr.flash, .abi = 8 }, + .{ .zig = .flash1, .llvm = Builder.AddrSpace.avr.flash1, .abi = 8 }, + .{ .zig = .flash2, .llvm = Builder.AddrSpace.avr.flash2, .abi = 8 }, + .{ .zig = .flash3, .llvm = Builder.AddrSpace.avr.flash3, .abi = 8 }, + .{ .zig = .flash4, .llvm = Builder.AddrSpace.avr.flash4, .abi = 8 }, + .{ .zig = .flash5, .llvm = Builder.AddrSpace.avr.flash5, .abi = 8 }, }, .wasm32, .wasm64 => &.{ - .{ .zig = .generic, .llvm = .default }, + .{ .zig = .generic, .llvm = .default, .force_in_data_layout = true }, .{ .zig = null, .llvm = Builder.AddrSpace.wasm.variable, .non_integral = true }, .{ .zig = null, .llvm = Builder.AddrSpace.wasm.externref, .non_integral = true, .size = 8, .abi = 8 }, .{ .zig = null, .llvm = Builder.AddrSpace.wasm.funcref, .non_integral = true, .size = 8, .abi = 8 }, diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 1ff0ea6976..07c861aec7 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -2789,10 +2789,6 @@ pub const WipFunction = struct { name: []const u8, ) Allocator.Error!Value { assert(ptr.typeOfWip(self).isPointer(self.builder)); - const final_scope = switch (ordering) { - .none => .system, - else => scope, - }; try self.ensureUnusedExtraCapacity(1, Instruction.Load, 0); const instruction = try self.addInst(name, .{ .tag = switch (ordering) { @@ -2808,7 +2804,10 @@ pub const WipFunction = struct { .data = self.addExtraAssumeCapacity(Instruction.Load{ .type = ty, .ptr = ptr, - .info = .{ .scope = final_scope, .ordering = ordering, .alignment = alignment }, + .info = .{ .scope = switch (ordering) { + .none => .system, + else => scope, + }, .ordering = ordering, .alignment = alignment }, }), }); if (self.builder.useLibLlvm()) { @@ -2817,7 +2816,6 @@ pub const WipFunction = struct { ptr.toLlvm(self), instruction.llvmName(self), ); - if (final_scope == .singlethread) llvm_instruction.setAtomicSingleThread(.True); if (ordering != .none) llvm_instruction.setOrdering(@enumFromInt(@intFromEnum(ordering))); if (alignment.toByteUnits()) |a| llvm_instruction.setAlignment(@intCast(a)); self.llvm.instructions.appendAssumeCapacity(llvm_instruction); @@ -2845,10 +2843,6 @@ pub const WipFunction = struct { alignment: Alignment, ) Allocator.Error!Instruction.Index { assert(ptr.typeOfWip(self).isPointer(self.builder)); - const final_scope = switch (ordering) { - .none => .system, - else => scope, - }; try self.ensureUnusedExtraCapacity(1, Instruction.Store, 0); const instruction = try self.addInst(null, .{ .tag = switch (ordering) { @@ -2864,7 +2858,10 @@ pub const WipFunction = struct { .data = self.addExtraAssumeCapacity(Instruction.Store{ .val = val, .ptr = ptr, - .info = .{ .scope = final_scope, .ordering = ordering, .alignment = alignment }, + .info = .{ .scope = switch (ordering) { + .none => .system, + else => scope, + }, .ordering = ordering, .alignment = alignment }, }), }); if (self.builder.useLibLlvm()) { @@ -2873,7 +2870,6 @@ pub const WipFunction = struct { .normal => {}, .@"volatile" => llvm_instruction.setVolatile(.True), } - if (final_scope == .singlethread) llvm_instruction.setAtomicSingleThread(.True); if (ordering != .none) llvm_instruction.setOrdering(@enumFromInt(@intFromEnum(ordering))); if (alignment.toByteUnits()) |a| llvm_instruction.setAlignment(@intCast(a)); self.llvm.instructions.appendAssumeCapacity(llvm_instruction); -- cgit v1.2.3 From ea72fea1a4e2bc8309c211308f49f7f2c38507be Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 13 Jul 2023 05:06:27 -0400 Subject: llvm: fix bootstrap --- src/codegen/llvm/Builder.zig | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'src') diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 07c861aec7..dd8bb6605b 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -6308,8 +6308,8 @@ fn vectorTypeAssumeCapacity( .data = self.addTypeExtraAssumeCapacity(data), }); if (self.useLibLlvm()) self.llvm.types.appendAssumeCapacity(switch (kind) { - .normal => &llvm.Type.vectorType, - .scalable => &llvm.Type.scalableVectorType, + .normal => llvm.Type.vectorType, + .scalable => llvm.Type.scalableVectorType, }(child.toLlvm(self), @intCast(len))); } return @enumFromInt(gop.index); @@ -7443,8 +7443,8 @@ fn gepConstAssumeCapacity( for (llvm_indices, indices) |*llvm_index, index| llvm_index.* = index.toLlvm(self); self.llvm.constants.appendAssumeCapacity(switch (kind) { - .normal => &llvm.Type.constGEP, - .inbounds => &llvm.Type.constInBoundsGEP, + .normal => llvm.Type.constGEP, + .inbounds => llvm.Type.constInBoundsGEP, }(ty.toLlvm(self), base.toLlvm(self), llvm_indices.ptr, @intCast(llvm_indices.len))); } } -- cgit v1.2.3 From a1062c63cad1ad80ecd42f6c58403133a608cca2 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 20 Jul 2023 01:48:27 -0400 Subject: llvm: add Builder trailing extra interface --- src/codegen/llvm/Builder.zig | 335 +++++++++++++++++++++++++------------------ 1 file changed, 194 insertions(+), 141 deletions(-) (limited to 'src') diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index dd8bb6605b..15e7891d18 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -311,8 +311,8 @@ pub const Type = enum(u32) { .function, .vararg_function, => { - const extra = builder.typeExtraDataTrail(Type.Function, item.data); - return @ptrCast(builder.type_extra.items[extra.end..][0..extra.data.params_len]); + var extra = builder.typeExtraDataTrail(Type.Function, item.data); + return extra.trail.next(extra.data.params_len, Type, builder); }, else => unreachable, } @@ -519,8 +519,8 @@ pub const Type = enum(u32) { .structure, .packed_structure, => { - const extra = builder.typeExtraDataTrail(Type.Structure, item.data); - return @ptrCast(builder.type_extra.items[extra.end..][0..extra.data.fields_len]); + var extra = builder.typeExtraDataTrail(Type.Structure, item.data); + return extra.trail.next(extra.data.fields_len, Type, builder); }, .named_structure => return builder.typeExtraData(Type.NamedStructure, item.data).body .structFields(builder), @@ -539,9 +539,8 @@ pub const Type = enum(u32) { .structure, .packed_structure, => { - const extra = builder.typeExtraDataTrail(Type.Structure, item.data); - const fields: []const Type = - @ptrCast(builder.type_extra.items[extra.end..][0..extra.data.fields_len]); + var extra = builder.typeExtraDataTrail(Type.Structure, item.data); + const fields = extra.trail.next(extra.data.fields_len, Type, builder); return fields[indices[0]].childTypeAt(indices[1..], builder); }, .named_structure => builder.typeExtraData(Type.NamedStructure, item.data).body @@ -590,9 +589,8 @@ pub const Type = enum(u32) { .metadata => "Metadata", }), .function, .vararg_function => |kind| { - const extra = data.builder.typeExtraDataTrail(Type.Function, item.data); - const params: []const Type = - @ptrCast(data.builder.type_extra.items[extra.end..][0..extra.data.params_len]); + var extra = data.builder.typeExtraDataTrail(Type.Function, item.data); + const params = extra.trail.next(extra.data.params_len, Type, data.builder); try writer.print("f_{m}", .{extra.data.ret.fmt(data.builder)}); for (params) |param| try writer.print("{m}", .{param.fmt(data.builder)}); switch (kind) { @@ -605,11 +603,9 @@ pub const Type = enum(u32) { .integer => try writer.print("i{d}", .{item.data}), .pointer => try writer.print("p{d}", .{item.data}), .target => { - const extra = data.builder.typeExtraDataTrail(Type.Target, item.data); - const types: []const Type = - @ptrCast(data.builder.type_extra.items[extra.end..][0..extra.data.types_len]); - const ints: []const u32 = @ptrCast(data.builder.type_extra.items[extra.end + - extra.data.types_len ..][0..extra.data.ints_len]); + var extra = data.builder.typeExtraDataTrail(Type.Target, item.data); + const types = extra.trail.next(extra.data.types_len, Type, data.builder); + const ints = extra.trail.next(extra.data.ints_len, u32, data.builder); try writer.print("t{s}", .{extra.data.name.toSlice(data.builder).?}); for (types) |ty| try writer.print("_{m}", .{ty.fmt(data.builder)}); for (ints) |int| try writer.print("_{d}", .{int}); @@ -636,9 +632,8 @@ pub const Type = enum(u32) { try writer.print("a{d}{m}", .{ extra.length(), extra.child.fmt(data.builder) }); }, .structure, .packed_structure => { - const extra = data.builder.typeExtraDataTrail(Type.Structure, item.data); - const fields: []const Type = - @ptrCast(data.builder.type_extra.items[extra.end..][0..extra.data.fields_len]); + var extra = data.builder.typeExtraDataTrail(Type.Structure, item.data); + const fields = extra.trail.next(extra.data.fields_len, Type, data.builder); try writer.writeAll("sl_"); for (fields) |field| try writer.print("{m}", .{field.fmt(data.builder)}); try writer.writeByte('s'); @@ -656,9 +651,8 @@ pub const Type = enum(u32) { switch (item.tag) { .simple => unreachable, .function, .vararg_function => |kind| { - const extra = data.builder.typeExtraDataTrail(Type.Function, item.data); - const params: []const Type = - @ptrCast(data.builder.type_extra.items[extra.end..][0..extra.data.params_len]); + var extra = data.builder.typeExtraDataTrail(Type.Function, item.data); + const params = extra.trail.next(extra.data.params_len, Type, data.builder); if (!comptime std.mem.eql(u8, fmt_str, ">")) try writer.print("{%} ", .{extra.data.ret.fmt(data.builder)}); if (!comptime std.mem.eql(u8, fmt_str, "<")) { @@ -681,11 +675,9 @@ pub const Type = enum(u32) { .integer => try writer.print("i{d}", .{item.data}), .pointer => try writer.print("ptr{}", .{@as(AddrSpace, @enumFromInt(item.data))}), .target => { - const extra = data.builder.typeExtraDataTrail(Type.Target, item.data); - const types: []const Type = - @ptrCast(data.builder.type_extra.items[extra.end..][0..extra.data.types_len]); - const ints: []const u32 = @ptrCast(data.builder.type_extra.items[extra.end + - extra.data.types_len ..][0..extra.data.ints_len]); + var extra = data.builder.typeExtraDataTrail(Type.Target, item.data); + const types = extra.trail.next(extra.data.types_len, Type, data.builder); + const ints = extra.trail.next(extra.data.ints_len, u32, data.builder); try writer.print( \\target({"} , .{extra.data.name.fmt(data.builder)}); @@ -714,9 +706,8 @@ pub const Type = enum(u32) { try writer.print("[{d} x {%}]", .{ extra.length(), extra.child.fmt(data.builder) }); }, .structure, .packed_structure => |kind| { - const extra = data.builder.typeExtraDataTrail(Type.Structure, item.data); - const fields: []const Type = - @ptrCast(data.builder.type_extra.items[extra.end..][0..extra.data.fields_len]); + var extra = data.builder.typeExtraDataTrail(Type.Structure, item.data); + const fields = extra.trail.next(extra.data.fields_len, Type, data.builder); switch (kind) { .structure => {}, .packed_structure => try writer.writeByte('<'), @@ -812,10 +803,8 @@ pub const Type = enum(u32) { => { if (try visited.fetchPut(builder.gpa, self, {})) |_| return false; - const extra = builder.typeExtraDataTrail(Type.Structure, item.data); - const fields: []const Type = @ptrCast( - builder.type_extra.items[extra.end..][0..extra.data.fields_len], - ); + var extra = builder.typeExtraDataTrail(Type.Structure, item.data); + const fields = extra.trail.next(extra.data.fields_len, Type, builder); for (fields) |field| { if (field.isVector(builder) and field.vectorKind(builder) == .scalable) return false; @@ -1639,9 +1628,8 @@ pub const Function = struct { .extractelement => wip.extraData(ExtractElement, instruction.data) .val.typeOfWip(wip).childType(wip.builder), .extractvalue => { - const extra = wip.extraDataTrail(ExtractValue, instruction.data); - const indices: []const u32 = - wip.extra.items[extra.end..][0..extra.data.indices_len]; + var extra = wip.extraDataTrail(ExtractValue, instruction.data); + const indices = extra.trail.next(extra.data.indices_len, u32, wip); return extra.data.val.typeOfWip(wip).childTypeAt(indices, wip.builder); }, .@"fcmp false", @@ -1694,9 +1682,8 @@ pub const Function = struct { .getelementptr, .@"getelementptr inbounds", => { - const extra = wip.extraDataTrail(GetElementPtr, instruction.data); - const indices: []const Value = - @ptrCast(wip.extra.items[extra.end..][0..extra.data.indices_len]); + var extra = wip.extraDataTrail(GetElementPtr, instruction.data); + const indices = extra.trail.next(extra.data.indices_len, Value, wip); const base_ty = extra.data.base.typeOfWip(wip); if (!base_ty.isVector(wip.builder)) for (indices) |index| { const index_ty = index.typeOfWip(wip); @@ -1829,9 +1816,8 @@ pub const Function = struct { .extractelement => function.extraData(ExtractElement, instruction.data) .val.typeOf(function_index, builder).childType(builder), .extractvalue => { - const extra = function.extraDataTrail(ExtractValue, instruction.data); - const indices: []const u32 = - function.extra[extra.end..][0..extra.data.indices_len]; + var extra = function.extraDataTrail(ExtractValue, instruction.data); + const indices = extra.trail.next(extra.data.indices_len, u32, function); return extra.data.val.typeOf(function_index, builder) .childTypeAt(indices, builder); }, @@ -1885,9 +1871,8 @@ pub const Function = struct { .getelementptr, .@"getelementptr inbounds", => { - const extra = function.extraDataTrail(GetElementPtr, instruction.data); - const indices: []const Value = - @ptrCast(function.extra[extra.end..][0..extra.data.indices_len]); + var extra = function.extraDataTrail(GetElementPtr, instruction.data); + const indices = extra.trail.next(extra.data.indices_len, Value, function); const base_ty = extra.data.base.typeOf(function_index, builder); if (!base_ty.isVector(builder)) for (indices) |index| { const index_ty = index.typeOf(function_index, builder); @@ -1908,10 +1893,9 @@ pub const Function = struct { .phi, .@"phi fast", => { - const extra = function.extraDataTrail(Phi, instruction.data); - const incoming_vals: []const Value = - @ptrCast(function.extra[extra.end..][0..extra.data.incoming_len]); - return incoming_vals[0].typeOf(function_index, builder); + var extra = function.extraDataTrail(Phi, instruction.data); + const vals = extra.trail.next(extra.data.incoming_len, Value, function); + return vals[0].typeOf(function_index, builder); }, .select, .@"select fast", @@ -2112,11 +2096,32 @@ pub const Function = struct { return argument_index.toValue(); } + const ExtraDataTrail = struct { + index: Instruction.ExtraIndex, + + fn nextMut(self: *ExtraDataTrail, len: u32, comptime Item: type, function: *Function) []Item { + const items: []Item = @ptrCast(function.extra[self.index..][0..len]); + self.index += @intCast(len); + return items; + } + + fn next( + self: *ExtraDataTrail, + len: u32, + comptime Item: type, + function: *const Function, + ) []const Item { + const items: []const Item = @ptrCast(function.extra[self.index..][0..len]); + self.index += @intCast(len); + return items; + } + }; + fn extraDataTrail( self: *const Function, comptime T: type, index: Instruction.ExtraIndex, - ) struct { data: T, end: Instruction.ExtraIndex } { + ) struct { data: T, trail: ExtraDataTrail } { var result: T = undefined; const fields = @typeInfo(T).Struct.fields; inline for (fields, self.extra[index..][0..fields.len]) |field, value| @@ -2126,7 +2131,10 @@ pub const Function = struct { MemoryAccessInfo, Instruction.Alloca.Info => @bitCast(value), else => @compileError("bad field type: " ++ @typeName(field.type)), }; - return .{ .data = result, .end = index + @as(Type.Item.ExtraIndex, @intCast(fields.len)) }; + return .{ + .data = result, + .trail = .{ .index = index + @as(Type.Item.ExtraIndex, @intCast(fields.len)) }, + }; } fn extraData(self: *const Function, comptime T: type, index: Instruction.ExtraIndex) T { @@ -2315,14 +2323,10 @@ pub const WipFunction = struct { wip: *WipFunction, ) Allocator.Error!void { const instruction = wip.instructions.get(@intFromEnum(self.instruction)); - const extra = wip.extraDataTrail(Instruction.Switch, instruction.data); - const case_vals: []Constant = - @ptrCast(wip.extra.items[extra.end..][0..extra.data.cases_len]); - const case_dests: []Block.Index = - @ptrCast(wip.extra.items[extra.end + extra.data.cases_len ..][0..extra.data.cases_len]); + var extra = wip.extraDataTrail(Instruction.Switch, instruction.data); assert(val.typeOf(wip.builder) == extra.data.val.typeOfWip(wip)); - case_vals[self.index] = val; - case_dests[self.index] = dest; + extra.trail.nextMut(extra.data.cases_len, Constant, wip)[self.index] = val; + extra.trail.nextMut(extra.data.cases_len, Block.Index, wip)[self.index] = dest; self.index += 1; dest.ptr(wip).branches += 1; if (wip.builder.useLibLlvm()) @@ -3113,13 +3117,10 @@ pub const WipFunction = struct { const incoming_len = self.block.ptrConst(wip).incoming; assert(vals.len == incoming_len and blocks.len == incoming_len); const instruction = wip.instructions.get(@intFromEnum(self.instruction)); - const extra = wip.extraDataTrail(Instruction.WipPhi, instruction.data); + var extra = wip.extraDataTrail(Instruction.WipPhi, instruction.data); for (vals) |val| assert(val.typeOfWip(wip) == extra.data.type); - const incoming_vals: []Value = @ptrCast(wip.extra.items[extra.end..][0..incoming_len]); - const incoming_blocks: []Block.Index = - @ptrCast(wip.extra.items[extra.end + incoming_len ..][0..incoming_len]); - @memcpy(incoming_vals, vals); - @memcpy(incoming_blocks, blocks); + @memcpy(extra.trail.nextMut(incoming_len, Value, wip), vals); + @memcpy(extra.trail.nextMut(incoming_len, Block.Index, wip), blocks); if (wip.builder.useLibLlvm()) { const ExpectedContents = extern struct { [expected_incoming_len]*llvm.Value, @@ -3504,9 +3505,8 @@ pub const WipFunction = struct { }); }, .extractvalue => { - const extra = self.extraDataTrail(Instruction.ExtractValue, instruction.data); - const indices: []const u32 = - self.extra.items[extra.end..][0..extra.data.indices_len]; + var extra = self.extraDataTrail(Instruction.ExtractValue, instruction.data); + const indices = extra.trail.next(extra.data.indices_len, u32, self); instruction.data = wip_extra.addExtra(Instruction.ExtractValue{ .val = instructions.map(extra.data.val), .indices_len = extra.data.indices_len, @@ -3520,9 +3520,8 @@ pub const WipFunction = struct { .getelementptr, .@"getelementptr inbounds", => { - const extra = self.extraDataTrail(Instruction.GetElementPtr, instruction.data); - const indices: []const Value = - @ptrCast(self.extra.items[extra.end..][0..extra.data.indices_len]); + var extra = self.extraDataTrail(Instruction.GetElementPtr, instruction.data); + const indices = extra.trail.next(extra.data.indices_len, Value, self); instruction.data = wip_extra.addExtra(Instruction.GetElementPtr{ .type = extra.data.type, .base = instructions.map(extra.data.base), @@ -3539,9 +3538,8 @@ pub const WipFunction = struct { }); }, .insertvalue => { - const extra = self.extraDataTrail(Instruction.InsertValue, instruction.data); - const indices: []const u32 = - self.extra.items[extra.end..][0..extra.data.indices_len]; + var extra = self.extraDataTrail(Instruction.InsertValue, instruction.data); + const indices = extra.trail.next(extra.data.indices_len, u32, self); instruction.data = wip_extra.addExtra(Instruction.InsertValue{ .val = instructions.map(extra.data.val), .elem = instructions.map(extra.data.elem), @@ -3564,12 +3562,10 @@ pub const WipFunction = struct { .phi, .@"phi fast", => { - const extra = self.extraDataTrail(Instruction.WipPhi, instruction.data); const incoming_len = current_block.incoming; - const incoming_vals: []const Value = - @ptrCast(self.extra.items[extra.end..][0..incoming_len]); - const incoming_blocks: []const Block.Index = - @ptrCast(self.extra.items[extra.end + incoming_len ..][0..incoming_len]); + var extra = self.extraDataTrail(Instruction.WipPhi, instruction.data); + const incoming_vals = extra.trail.next(incoming_len, Value, self); + const incoming_blocks = extra.trail.next(incoming_len, Block.Index, self); instruction.data = wip_extra.addExtra(Instruction.Phi{ .incoming_len = incoming_len, }); @@ -3607,11 +3603,9 @@ pub const WipFunction = struct { }); }, .@"switch" => { - const extra = self.extraDataTrail(Instruction.Switch, instruction.data); - const case_vals: []const Constant = - @ptrCast(self.extra.items[extra.end..][0..extra.data.cases_len]); - const case_blocks: []const Block.Index = @ptrCast(self.extra - .items[extra.end + extra.data.cases_len ..][0..extra.data.cases_len]); + var extra = self.extraDataTrail(Instruction.Switch, instruction.data); + const case_vals = extra.trail.next(extra.data.cases_len, Constant, self); + const case_blocks = extra.trail.next(extra.data.cases_len, Block.Index, self); instruction.data = wip_extra.addExtra(Instruction.Switch{ .val = instructions.map(extra.data.val), .default = extra.data.default, @@ -3956,11 +3950,32 @@ pub const WipFunction = struct { return result; } + const ExtraDataTrail = struct { + index: Instruction.ExtraIndex, + + fn nextMut(self: *ExtraDataTrail, len: u32, comptime Item: type, wip: *WipFunction) []Item { + const items: []Item = @ptrCast(wip.extra.items[self.index..][0..len]); + self.index += @intCast(len); + return items; + } + + fn next( + self: *ExtraDataTrail, + len: u32, + comptime Item: type, + wip: *const WipFunction, + ) []const Item { + const items: []const Item = @ptrCast(wip.extra.items[self.index..][0..len]); + self.index += @intCast(len); + return items; + } + }; + fn extraDataTrail( self: *const WipFunction, comptime T: type, index: Instruction.ExtraIndex, - ) struct { data: T, end: Instruction.ExtraIndex } { + ) struct { data: T, trail: ExtraDataTrail } { var result: T = undefined; const fields = @typeInfo(T).Struct.fields; inline for (fields, self.extra.items[index..][0..fields.len]) |field, value| @@ -3970,7 +3985,10 @@ pub const WipFunction = struct { MemoryAccessInfo, Instruction.Alloca.Info => @bitCast(value), else => @compileError("bad field type: " ++ @typeName(field.type)), }; - return .{ .data = result, .end = index + @as(Type.Item.ExtraIndex, @intCast(fields.len)) }; + return .{ + .data = result, + .trail = .{ .index = index + @as(Type.Item.ExtraIndex, @intCast(fields.len)) }, + }; } fn extraData(self: *const WipFunction, comptime T: type, index: Instruction.ExtraIndex) T { @@ -4315,9 +4333,9 @@ pub const Constant = enum(u32) { .getelementptr, .@"getelementptr inbounds", => { - const extra = builder.constantExtraDataTrail(GetElementPtr, item.data); - const indices: []const Constant = @ptrCast(builder.constant_extra - .items[extra.end..][0..extra.data.info.indices_len]); + var extra = builder.constantExtraDataTrail(GetElementPtr, item.data); + const indices = + extra.trail.next(extra.data.info.indices_len, Constant, builder); const base_ty = extra.data.base.typeOf(builder); if (!base_ty.isVector(builder)) for (indices) |index| { const index_ty = index.typeOf(builder); @@ -4392,10 +4410,9 @@ pub const Constant = enum(u32) { return extra.lo_lo == 0 and extra.lo_hi == 0 and extra.hi == 0; }, .vector => { - const extra = builder.constantExtraDataTrail(Aggregate, item.data); - const len = extra.data.type.aggregateLen(builder); - const vals: []const Constant = - @ptrCast(builder.constant_extra.items[extra.end..][0..len]); + var extra = builder.constantExtraDataTrail(Aggregate, item.data); + const len: u32 = @intCast(extra.data.type.aggregateLen(builder)); + const vals = extra.trail.next(len, Constant, builder); for (vals) |val| if (!val.isZeroInit(builder)) return false; return true; }, @@ -4549,10 +4566,9 @@ pub const Constant = enum(u32) { .array, .vector, => |tag| { - const extra = data.builder.constantExtraDataTrail(Aggregate, item.data); - const len = extra.data.type.aggregateLen(data.builder); - const vals: []const Constant = - @ptrCast(data.builder.constant_extra.items[extra.end..][0..len]); + var extra = data.builder.constantExtraDataTrail(Aggregate, item.data); + const len: u32 = @intCast(extra.data.type.aggregateLen(data.builder)); + const vals = extra.trail.next(len, Constant, data.builder); try writer.writeAll(switch (tag) { .structure => "{ ", .packed_structure => "<{ ", @@ -4631,9 +4647,9 @@ pub const Constant = enum(u32) { .getelementptr, .@"getelementptr inbounds", => |tag| { - const extra = data.builder.constantExtraDataTrail(GetElementPtr, item.data); - const indices: []const Constant = @ptrCast(data.builder.constant_extra - .items[extra.end..][0..extra.data.info.indices_len]); + var extra = data.builder.constantExtraDataTrail(GetElementPtr, item.data); + const indices = + extra.trail.next(extra.data.info.indices_len, Constant, data.builder); try writer.print("{s} ({%}, {%}", .{ @tagName(tag), extra.data.type.fmt(data.builder), @@ -5243,9 +5259,8 @@ pub fn namedTypeSetBody( @intFromEnum(body_type); if (self.useLibLlvm()) { const body_item = self.type_items.items[@intFromEnum(body_type)]; - const body_extra = self.typeExtraDataTrail(Type.Structure, body_item.data); - const body_fields: []const Type = - @ptrCast(self.type_extra.items[body_extra.end..][0..body_extra.data.fields_len]); + var body_extra = self.typeExtraDataTrail(Type.Structure, body_item.data); + const body_fields = body_extra.trail.next(body_extra.data.fields_len, Type, self); const llvm_fields = try self.gpa.alloc(*llvm.Type, body_fields.len); defer self.gpa.free(llvm_fields); for (llvm_fields, body_fields) |*llvm_field, body_field| llvm_field.* = body_field.toLlvm(self); @@ -5947,10 +5962,9 @@ pub fn dump(self: *Builder, writer: anytype) (@TypeOf(writer).Error || Allocator }); }, .extractvalue => |tag| { - const extra = + var extra = function.extraDataTrail(Function.Instruction.ExtractValue, instruction.data); - const indices: []const u32 = - function.extra[extra.end..][0..extra.data.indices_len]; + const indices = extra.trail.next(extra.data.indices_len, u32, &function); try writer.print(" %{} = {s} {%}", .{ instruction_index.name(&function).fmt(self), @tagName(tag), @@ -5976,12 +5990,11 @@ pub fn dump(self: *Builder, writer: anytype) (@TypeOf(writer).Error || Allocator .getelementptr, .@"getelementptr inbounds", => |tag| { - const extra = function.extraDataTrail( + var extra = function.extraDataTrail( Function.Instruction.GetElementPtr, instruction.data, ); - const indices: []const Value = - @ptrCast(function.extra[extra.end..][0..extra.data.indices_len]); + const indices = extra.trail.next(extra.data.indices_len, Value, &function); try writer.print(" %{} = {s} {%}, {%}", .{ instruction_index.name(&function).fmt(self), @tagName(tag), @@ -6005,10 +6018,9 @@ pub fn dump(self: *Builder, writer: anytype) (@TypeOf(writer).Error || Allocator }); }, .insertvalue => |tag| { - const extra = + var extra = function.extraDataTrail(Function.Instruction.InsertValue, instruction.data); - const indices: []const u32 = - function.extra[extra.end..][0..extra.data.indices_len]; + const indices = extra.trail.next(extra.data.indices_len, u32, &function); try writer.print(" %{} = {s} {%}, {%}", .{ instruction_index.name(&function).fmt(self), @tagName(tag), @@ -6063,12 +6075,10 @@ pub fn dump(self: *Builder, writer: anytype) (@TypeOf(writer).Error || Allocator .phi, .@"phi fast", => |tag| { - const extra = - function.extraDataTrail(Function.Instruction.Phi, instruction.data); - const vals: []const Value = - @ptrCast(function.extra[extra.end..][0..extra.data.incoming_len]); - const blocks: []const Function.Block.Index = @ptrCast(function.extra[extra.end + - extra.data.incoming_len ..][0..extra.data.incoming_len]); + var extra = function.extraDataTrail(Function.Instruction.Phi, instruction.data); + const vals = extra.trail.next(extra.data.incoming_len, Value, &function); + const blocks = + extra.trail.next(extra.data.incoming_len, Function.Block.Index, &function); try writer.print(" %{} = {s} {%} ", .{ instruction_index.name(&function).fmt(self), @tagName(tag), @@ -6125,12 +6135,11 @@ pub fn dump(self: *Builder, writer: anytype) (@TypeOf(writer).Error || Allocator }); }, .@"switch" => |tag| { - const extra = + var extra = function.extraDataTrail(Function.Instruction.Switch, instruction.data); - const vals: []const Constant = - @ptrCast(function.extra[extra.end..][0..extra.data.cases_len]); - const blocks: []const Function.Block.Index = @ptrCast(function.extra[extra.end + - extra.data.cases_len ..][0..extra.data.cases_len]); + const vals = extra.trail.next(extra.data.cases_len, Constant, &function); + const blocks = + extra.trail.next(extra.data.cases_len, Function.Block.Index, &function); try writer.print(" {s} {%}, {%} [", .{ @tagName(tag), extra.data.val.fmt(function_index, self), @@ -6216,9 +6225,8 @@ fn fnTypeAssumeCapacity( } pub fn eql(ctx: @This(), lhs_key: Key, _: void, rhs_index: usize) bool { const rhs_data = ctx.builder.type_items.items[rhs_index]; - const rhs_extra = ctx.builder.typeExtraDataTrail(Type.Function, rhs_data.data); - const rhs_params: []const Type = - @ptrCast(ctx.builder.type_extra.items[rhs_extra.end..][0..rhs_extra.data.params_len]); + var rhs_extra = ctx.builder.typeExtraDataTrail(Type.Function, rhs_data.data); + const rhs_params = rhs_extra.trail.next(rhs_extra.data.params_len, Type, ctx.builder); return rhs_data.tag == tag and lhs_key.ret == rhs_extra.data.ret and std.mem.eql(Type, lhs_key.params, rhs_params); } @@ -6400,9 +6408,8 @@ fn structTypeAssumeCapacity( } pub fn eql(ctx: @This(), lhs_key: []const Type, _: void, rhs_index: usize) bool { const rhs_data = ctx.builder.type_items.items[rhs_index]; - const rhs_extra = ctx.builder.typeExtraDataTrail(Type.Structure, rhs_data.data); - const rhs_fields: []const Type = - @ptrCast(ctx.builder.type_extra.items[rhs_extra.end..][0..rhs_extra.data.fields_len]); + var rhs_extra = ctx.builder.typeExtraDataTrail(Type.Structure, rhs_data.data); + const rhs_fields = rhs_extra.trail.next(rhs_extra.data.fields_len, Type, ctx.builder); return rhs_data.tag == tag and std.mem.eql(Type, lhs_key, rhs_fields); } }; @@ -6542,11 +6549,32 @@ fn addTypeExtraAssumeCapacity(self: *Builder, extra: anytype) Type.Item.ExtraInd return result; } +const TypeExtraDataTrail = struct { + index: Type.Item.ExtraIndex, + + fn nextMut(self: *TypeExtraDataTrail, len: u32, comptime Item: type, builder: *Builder) []Item { + const items: []Item = @ptrCast(builder.type_extra.items[self.index..][0..len]); + self.index += @intCast(len); + return items; + } + + fn next( + self: *TypeExtraDataTrail, + len: u32, + comptime Item: type, + builder: *const Builder, + ) []const Item { + const items: []const Item = @ptrCast(builder.type_extra.items[self.index..][0..len]); + self.index += @intCast(len); + return items; + } +}; + fn typeExtraDataTrail( self: *const Builder, comptime T: type, index: Type.Item.ExtraIndex, -) struct { data: T, end: Type.Item.ExtraIndex } { +) struct { data: T, trail: TypeExtraDataTrail } { var result: T = undefined; const fields = @typeInfo(T).Struct.fields; inline for (fields, self.type_extra.items[index..][0..fields.len]) |field, value| @@ -6555,7 +6583,10 @@ fn typeExtraDataTrail( String, Type => @enumFromInt(value), else => @compileError("bad field type: " ++ @typeName(field.type)), }; - return .{ .data = result, .end = index + @as(Type.Item.ExtraIndex, @intCast(fields.len)) }; + return .{ + .data = result, + .trail = .{ .index = index + @as(Type.Item.ExtraIndex, @intCast(fields.len)) }, + }; } fn typeExtraData(self: *const Builder, comptime T: type, index: Type.Item.ExtraIndex) T { @@ -6914,7 +6945,7 @@ fn structConstAssumeCapacity( vals: []const Constant, ) if (build_options.have_llvm) Allocator.Error!Constant else Constant { const type_item = self.type_items.items[@intFromEnum(ty)]; - const extra = self.typeExtraDataTrail(Type.Structure, switch (type_item.tag) { + var extra = self.typeExtraDataTrail(Type.Structure, switch (type_item.tag) { .structure, .packed_structure => type_item.data, .named_structure => data: { const body_ty = self.typeExtraData(Type.NamedStructure, type_item.data).body; @@ -6926,8 +6957,7 @@ fn structConstAssumeCapacity( }, else => unreachable, }); - const fields: []const Type = - @ptrCast(self.type_extra.items[extra.end..][0..extra.data.fields_len]); + const fields = extra.trail.next(extra.data.fields_len, Type, self); for (fields, vals) |field, val| assert(field == val.typeOf(self)); for (vals) |val| { @@ -7405,9 +7435,9 @@ fn gepConstAssumeCapacity( pub fn eql(ctx: @This(), lhs_key: Key, _: void, rhs_index: usize) bool { if (ctx.builder.constant_items.items(.tag)[rhs_index] != tag) return false; const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index]; - const rhs_extra = ctx.builder.constantExtraDataTrail(Constant.GetElementPtr, rhs_data); - const rhs_indices: []const Constant = @ptrCast(ctx.builder.constant_extra - .items[rhs_extra.end..][0..rhs_extra.data.info.indices_len]); + var rhs_extra = ctx.builder.constantExtraDataTrail(Constant.GetElementPtr, rhs_data); + const rhs_indices = + rhs_extra.trail.next(rhs_extra.data.info.indices_len, Constant, ctx.builder); return lhs_key.type == rhs_extra.data.type and lhs_key.base == rhs_extra.data.base and lhs_key.inrange == rhs_extra.data.info.inrange and std.mem.eql(Constant, lhs_key.indices, rhs_indices); @@ -7767,10 +7797,9 @@ fn getOrPutConstantAggregateAssumeCapacity( pub fn eql(ctx: @This(), lhs_key: Key, _: void, rhs_index: usize) bool { if (lhs_key.tag != ctx.builder.constant_items.items(.tag)[rhs_index]) return false; const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index]; - const rhs_extra = ctx.builder.constantExtraDataTrail(Constant.Aggregate, rhs_data); + var rhs_extra = ctx.builder.constantExtraDataTrail(Constant.Aggregate, rhs_data); if (lhs_key.type != rhs_extra.data.type) return false; - const rhs_vals: []const Constant = - @ptrCast(ctx.builder.constant_extra.items[rhs_extra.end..][0..lhs_key.vals.len]); + const rhs_vals = rhs_extra.trail.next(@intCast(lhs_key.vals.len), Constant, ctx.builder); return std.mem.eql(Constant, lhs_key.vals, rhs_vals); } }; @@ -7804,11 +7833,32 @@ fn addConstantExtraAssumeCapacity(self: *Builder, extra: anytype) Constant.Item. return result; } +const ConstantExtraDataTrail = struct { + index: Constant.Item.ExtraIndex, + + fn nextMut(self: *ConstantExtraDataTrail, len: u32, comptime Item: type, builder: *Builder) []Item { + const items: []Item = @ptrCast(builder.constant_extra.items[self.index..][0..len]); + self.index += @intCast(len); + return items; + } + + fn next( + self: *ConstantExtraDataTrail, + len: u32, + comptime Item: type, + builder: *const Builder, + ) []const Item { + const items: []const Item = @ptrCast(builder.constant_extra.items[self.index..][0..len]); + self.index += @intCast(len); + return items; + } +}; + fn constantExtraDataTrail( self: *const Builder, comptime T: type, index: Constant.Item.ExtraIndex, -) struct { data: T, end: Constant.Item.ExtraIndex } { +) struct { data: T, trail: ConstantExtraDataTrail } { var result: T = undefined; const fields = @typeInfo(T).Struct.fields; inline for (fields, self.constant_extra.items[index..][0..fields.len]) |field, value| @@ -7818,7 +7868,10 @@ fn constantExtraDataTrail( Constant.GetElementPtr.Info => @bitCast(value), else => @compileError("bad field type: " ++ @typeName(field.type)), }; - return .{ .data = result, .end = index + @as(Constant.Item.ExtraIndex, @intCast(fields.len)) }; + return .{ + .data = result, + .trail = .{ .index = index + @as(Constant.Item.ExtraIndex, @intCast(fields.len)) }, + }; } fn constantExtraData(self: *const Builder, comptime T: type, index: Constant.Item.ExtraIndex) T { -- cgit v1.2.3 From 4d31d4d875f32ed49c56151ca053a614b3ae343c Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 20 Jul 2023 02:44:29 -0400 Subject: llvm: cleanup LLVM IR dumping --- src/codegen/llvm.zig | 19 ++-------- src/codegen/llvm/Builder.zig | 83 ++++++++++++++++++++++++++++++++------------ 2 files changed, 63 insertions(+), 39 deletions(-) (limited to 'src') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index d4bdb04507..57842ef1e0 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1104,24 +1104,9 @@ pub const Object = struct { if (comp.verbose_llvm_ir) |path| { if (std.mem.eql(u8, path, "-")) { - self.llvm_module.dump(); - - const writer = std.io.getStdErr().writer(); - try writer.writeAll("\n" ++ "-" ** 200 ++ "\n\n"); - try self.builder.dump(writer); + self.builder.dump(); } else { - const path_z = try comp.gpa.dupeZ(u8, path); - defer comp.gpa.free(path_z); - - var error_message: [*:0]const u8 = undefined; - - if (self.llvm_module.printModuleToFile(path_z, &error_message).toBool()) { - defer llvm.disposeMessage(error_message); - - log.err("dump LLVM module failed ir={s}: {s}", .{ - path, error_message, - }); - } + _ = try self.builder.printToFile(path); } } diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 15e7891d18..1df64ea8f6 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -1701,7 +1701,7 @@ pub const Function = struct { => wip.extraData(Load, instruction.data).type, .phi, .@"phi fast", - => wip.extraData(WipPhi, instruction.data).type, + => wip.extraData(Phi, instruction.data).type, .select, .@"select fast", => wip.extraData(Select, instruction.data).lhs.typeOfWip(wip), @@ -1892,11 +1892,7 @@ pub const Function = struct { => function.extraData(Load, instruction.data).type, .phi, .@"phi fast", - => { - var extra = function.extraDataTrail(Phi, instruction.data); - const vals = extra.trail.next(extra.data.incoming_len, Value, function); - return vals[0].typeOf(function_index, builder); - }, + => function.extraData(Phi, instruction.data).type, .select, .@"select fast", => function.extraData(Select, instruction.data).lhs.typeOf(function_index, builder), @@ -2055,18 +2051,12 @@ pub const Function = struct { pub const Signedness = Constant.Cast.Signedness; }; - pub const WipPhi = struct { + pub const Phi = struct { type: Type, //incoming_vals: [block.incoming]Value, //incoming_blocks: [block.incoming]Block.Index, }; - pub const Phi = struct { - incoming_len: u32, - //incoming_vals: [incoming_len]Value, - //incoming_blocks: [incoming_len]Block.Index, - }; - pub const Select = struct { cond: Value, lhs: Value, @@ -3117,7 +3107,7 @@ pub const WipFunction = struct { const incoming_len = self.block.ptrConst(wip).incoming; assert(vals.len == incoming_len and blocks.len == incoming_len); const instruction = wip.instructions.get(@intFromEnum(self.instruction)); - var extra = wip.extraDataTrail(Instruction.WipPhi, instruction.data); + var extra = wip.extraDataTrail(Instruction.Phi, instruction.data); for (vals) |val| assert(val.typeOfWip(wip) == extra.data.type); @memcpy(extra.trail.nextMut(incoming_len, Value, wip), vals); @memcpy(extra.trail.nextMut(incoming_len, Block.Index, wip), blocks); @@ -3563,11 +3553,11 @@ pub const WipFunction = struct { .@"phi fast", => { const incoming_len = current_block.incoming; - var extra = self.extraDataTrail(Instruction.WipPhi, instruction.data); + var extra = self.extraDataTrail(Instruction.Phi, instruction.data); const incoming_vals = extra.trail.next(incoming_len, Value, self); const incoming_blocks = extra.trail.next(incoming_len, Block.Index, self); instruction.data = wip_extra.addExtra(Instruction.Phi{ - .incoming_len = incoming_len, + .type = extra.data.type, }); wip_extra.appendValues(incoming_vals, instructions); wip_extra.appendSlice(incoming_blocks); @@ -3831,10 +3821,10 @@ pub const WipFunction = struct { } const incoming = self.cursor.block.ptrConst(self).incoming; assert(incoming > 0); - try self.ensureUnusedExtraCapacity(1, Instruction.WipPhi, incoming * 2); + try self.ensureUnusedExtraCapacity(1, Instruction.Phi, incoming * 2); const instruction = try self.addInst(name, .{ .tag = tag, - .data = self.addExtraAssumeCapacity(Instruction.WipPhi{ .type = ty }), + .data = self.addExtraAssumeCapacity(Instruction.Phi{ .type = ty }), }); _ = self.extra.addManyAsSliceAssumeCapacity(incoming * 2); if (self.builder.useLibLlvm()) { @@ -5729,7 +5719,51 @@ pub fn binValue(self: *Builder, tag: Constant.Tag, lhs: Constant, rhs: Constant) return (try self.binConst(tag, lhs, rhs)).toValue(); } -pub fn dump(self: *Builder, writer: anytype) (@TypeOf(writer).Error || Allocator.Error)!void { +pub fn dump(self: *Builder) void { + if (self.useLibLlvm()) + self.llvm.module.?.dump() + else + self.print(std.io.getStdErr().writer()) catch {}; +} + +pub fn printToFile(self: *Builder, path: []const u8) Allocator.Error!bool { + const path_z = try self.gpa.dupeZ(u8, path); + defer self.gpa.free(path_z); + return self.printToFileZ(path_z); +} + +pub fn printToFileZ(self: *Builder, path: [*:0]const u8) bool { + if (self.useLibLlvm()) { + var error_message: [*:0]const u8 = undefined; + if (self.llvm.module.?.printModuleToFile(path, &error_message).toBool()) { + defer llvm.disposeMessage(error_message); + log.err("failed printing LLVM module to \"{s}\": {s}", .{ path, error_message }); + return false; + } + } else { + var file = std.fs.cwd().createFileZ(path, .{}) catch |err| { + log.err("failed printing LLVM module to \"{s}\": {s}", .{ path, @errorName(err) }); + return false; + }; + defer file.close(); + self.print(file.writer()) catch |err| { + log.err("failed printing LLVM module to \"{s}\": {s}", .{ path, @errorName(err) }); + return false; + }; + } + return true; +} + +pub fn print(self: *Builder, writer: anytype) (@TypeOf(writer).Error || Allocator.Error)!void { + var bw = std.io.bufferedWriter(writer); + try self.printUnbuffered(bw.writer()); + try bw.flush(); +} + +pub fn printUnbuffered( + self: *Builder, + writer: anytype, +) (@TypeOf(writer).Error || Allocator.Error)!void { if (self.source_filename != .none) try writer.print( \\; ModuleID = '{s}' \\source_filename = {"} @@ -5790,7 +5824,10 @@ pub fn dump(self: *Builder, writer: anytype) (@TypeOf(writer).Error || Allocator }); for (0..params_len) |arg| { if (arg > 0) try writer.writeAll(", "); - try writer.print("{%}", .{function.arg(@intCast(arg)).fmt(function_index, self)}); + if (function.instructions.len > 0) + try writer.print("{%}", .{function.arg(@intCast(arg)).fmt(function_index, self)}) + else + try writer.print("{%}", .{global.type.functionParameters(self)[arg].fmt(self)}); } switch (global.type.functionKind(self)) { .normal => {}, @@ -5801,6 +5838,7 @@ pub fn dump(self: *Builder, writer: anytype) (@TypeOf(writer).Error || Allocator } try writer.print("){}{}", .{ global.unnamed_addr, function.alignment }); if (function.instructions.len > 0) { + var block_incoming_len: u32 = undefined; try writer.writeAll(" {\n"); for (params_len..function.instructions.len) |instruction_i| { const instruction_index: Function.Instruction.Index = @enumFromInt(instruction_i); @@ -5933,6 +5971,7 @@ pub fn dump(self: *Builder, writer: anytype) (@TypeOf(writer).Error || Allocator }, .arg => unreachable, .block => { + block_incoming_len = instruction.data; const name = instruction_index.name(&function); if (@intFromEnum(instruction_index) > params_len) try writer.writeByte('\n'); try writer.print("{}:\n", .{name.fmt(self)}); @@ -6076,9 +6115,9 @@ pub fn dump(self: *Builder, writer: anytype) (@TypeOf(writer).Error || Allocator .@"phi fast", => |tag| { var extra = function.extraDataTrail(Function.Instruction.Phi, instruction.data); - const vals = extra.trail.next(extra.data.incoming_len, Value, &function); + const vals = extra.trail.next(block_incoming_len, Value, &function); const blocks = - extra.trail.next(extra.data.incoming_len, Function.Block.Index, &function); + extra.trail.next(block_incoming_len, Function.Block.Index, &function); try writer.print(" %{} = {s} {%} ", .{ instruction_index.name(&function).fmt(self), @tagName(tag), -- cgit v1.2.3