From c3663f2617fd317f458bfa013ea94efb7dbcfee5 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 15 Mar 2022 23:32:02 -0700 Subject: LLVM: implement debug info for structs This involved some significant reworking in order to introduce the concept of "forward declarations" to the system to break dependency loops. The `lowerDebugType` function now takes an `enum { full, fwd }` and is moved from `DeclGen` to `Object` so that it can be called from `flushModule`. `DITypeMap` is now an `ArrayHashMap` instead of a `HashMap` so that we can iterate over the entries in `flushModule` and finalize the forward decl DITypes into full DITypes. `DITypeMap` now stores `AnnotatedDITypePtr` values instead of `*DIType` values. This is an abstraction around a `usize` which assumes the pointers will be at least 2 bytes aligned and uses the least significant bit to store whether it is forward decl or a fully resolved debug info type. `lowerDebugTypeImpl` is extracted out from `lowerDebugType` and it has a mechanism for completing a forward decl DIType to a fully resolved one. The function now contains lowering for struct types. Closes #11095. There is a workaround for struct types which have not had `resolveFieldTypes` called in Sema, even by the time `flushModule` is called. This is a deficiency of Sema that should be addressed, and the workaround removed. I think Sema needs a new mechanism to queue up type resolution work instead of doing it in-line, so that it does not cause false dependency loops. We already have one failing behavior test because of a false dependency loop. --- src/codegen/llvm.zig | 3681 ++++++++++++++++++++++++++------------------------ 1 file changed, 1908 insertions(+), 1773 deletions(-) (limited to 'src/codegen/llvm.zig') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 186109d471..15de638737 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -160,6 +160,7 @@ pub fn targetTriple(allocator: Allocator, target: std.Target) ![:0]u8 { } pub const Object = struct { + gpa: Allocator, llvm_module: *const llvm.Module, di_builder: ?*llvm.DIBuilder, /// One of these mappings: @@ -171,6 +172,7 @@ pub const Object = struct { context: *const llvm.Context, target_machine: *const llvm.TargetMachine, target_data: *const llvm.TargetData, + target: std.Target, /// Ideally we would use `llvm_module.getNamedFunction` to go from *Decl to LLVM function, /// but that has some downsides: /// * we have to compute the fully qualified name every time we want to do the lookup @@ -202,11 +204,13 @@ pub const Object = struct { std.hash_map.default_max_load_percentage, ); - pub const DITypeMap = std.HashMapUnmanaged( + /// This is an ArrayHashMap as opposed to a HashMap because in `flushModule` we + /// want to iterate over it while adding entries to it. + pub const DITypeMap = std.ArrayHashMapUnmanaged( Type, - *llvm.DIType, - Type.HashContext64, - std.hash_map.default_max_load_percentage, + AnnotatedDITypePtr, + Type.HashContext32, + true, ); pub fn create(gpa: Allocator, options: link.Options) !*Object { @@ -335,6 +339,7 @@ pub const Object = struct { llvm_module.setModuleDataLayout(target_data); return Object{ + .gpa = gpa, .llvm_module = llvm_module, .di_map = .{}, .di_builder = opt_di_builder, @@ -342,6 +347,7 @@ pub const Object = struct { .context = context, .target_machine = target_machine, .target_data = target_data, + .target = options.target, .decl_map = .{}, .type_map = .{}, .type_map_arena = std.heap.ArenaAllocator.init(gpa), @@ -437,7 +443,25 @@ pub const Object = struct { pub fn flushModule(self: *Object, comp: *Compilation) !void { try self.genErrorNameTable(comp); - if (self.di_builder) |dib| dib.finalize(); + if (self.di_builder) |dib| { + // When lowering debug info for pointers, we emitted the element types as + // forward decls. Now we must go flesh those out. + // Here we iterate over a hash map while modifying it but it is OK because + // we never add or remove entries during this loop. + var i: usize = 0; + while (i < self.di_type_map.count()) : (i += 1) { + const value_ptr = &self.di_type_map.values()[i]; + const annotated = value_ptr.*; + if (!annotated.isFwdOnly()) continue; + const entry: Object.DITypeMap.Entry = .{ + .key_ptr = &self.di_type_map.keys()[i], + .value_ptr = value_ptr, + }; + _ = try self.lowerDebugTypeImpl(entry, .full, annotated.toDIType()); + } + + dib.finalize(); + } if (comp.verbose_llvm_ir) { self.llvm_module.dump(); @@ -503,7 +527,7 @@ pub const Object = struct { } pub fn updateFunc( - self: *Object, + o: *Object, module: *Module, func: *Module.Fn, air: Air, @@ -512,8 +536,8 @@ pub const Object = struct { const decl = func.owner_decl; var dg: DeclGen = .{ - .context = self.context, - .object = self, + .context = o.context, + .object = o, .module = module, .decl = decl, .err_msg = null, @@ -584,7 +608,7 @@ pub const Object = struct { llvm_func.getValueName(), di_file.?, line_number, - try dg.lowerDebugType(decl.ty), + try o.lowerDebugType(decl.ty, .full), is_internal_linkage, true, // is definition line_number + func.lbrace_line, // scope line @@ -631,7 +655,7 @@ pub const Object = struct { }; const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; - try self.updateDeclExports(module, decl, decl_exports); + try o.updateDeclExports(module, decl, decl_exports); } pub fn updateDecl(self: *Object, module: *Module, decl: *Module.Decl) !void { @@ -773,622 +797,811 @@ pub const Object = struct { gop.value_ptr.* = di_file.toNode(); return di_file; } -}; -pub const DeclGen = struct { - context: *const llvm.Context, - object: *Object, - module: *Module, - decl: *Module.Decl, - gpa: Allocator, - err_msg: ?*Module.ErrorMsg, + const DebugResolveStatus = enum { fwd, full }; - fn todo(self: *DeclGen, comptime format: []const u8, args: anytype) Error { - @setCold(true); - assert(self.err_msg == null); - const src_loc = @as(LazySrcLoc, .{ .node_offset = 0 }).toSrcLoc(self.decl); - self.err_msg = try Module.ErrorMsg.create(self.gpa, src_loc, "TODO (LLVM): " ++ format, args); - return error.CodegenFail; - } + /// In the implementation of this function, it is required to store a forward decl + /// into `gop` before making any recursive calls (even directly). + fn lowerDebugType( + o: *Object, + ty: Type, + resolve: DebugResolveStatus, + ) Allocator.Error!*llvm.DIType { + const gpa = o.gpa; + // Be careful not to reference this `gop` variable after any recursive calls + // to `lowerDebugType`. + const gop = try o.di_type_map.getOrPut(gpa, ty); + if (gop.found_existing) { + const annotated = gop.value_ptr.*; + const di_type = annotated.toDIType(); + if (!annotated.isFwdOnly() or resolve == .fwd) { + return di_type; + } + const entry: Object.DITypeMap.Entry = .{ + .key_ptr = gop.key_ptr, + .value_ptr = gop.value_ptr, + }; + return o.lowerDebugTypeImpl(entry, resolve, di_type); + } + errdefer assert(o.di_type_map.orderedRemove(ty)); + // The Type memory is ephemeral; since we want to store a longer-lived + // reference, we need to copy it here. + gop.key_ptr.* = try ty.copy(o.type_map_arena.allocator()); + const entry: Object.DITypeMap.Entry = .{ + .key_ptr = gop.key_ptr, + .value_ptr = gop.value_ptr, + }; + return o.lowerDebugTypeImpl(entry, resolve, null); + } + + /// This is a helper function used by `lowerDebugType`. + fn lowerDebugTypeImpl( + o: *Object, + gop: Object.DITypeMap.Entry, + resolve: DebugResolveStatus, + opt_fwd_decl: ?*llvm.DIType, + ) Allocator.Error!*llvm.DIType { + const ty = gop.key_ptr.*; + const gpa = o.gpa; + const target = o.target; + const dib = o.di_builder.?; + switch (ty.zigTypeTag()) { + .Void, .NoReturn => { + const di_type = dib.createBasicType("void", 0, DW.ATE.signed); + gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type); + return di_type; + }, + .Int => { + const info = ty.intInfo(target); + assert(info.bits != 0); + const name = try ty.nameAlloc(gpa); + defer gpa.free(name); + const dwarf_encoding: c_uint = switch (info.signedness) { + .signed => DW.ATE.signed, + .unsigned => DW.ATE.unsigned, + }; + const di_type = dib.createBasicType(name, info.bits, dwarf_encoding); + gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type); + return di_type; + }, + .Enum => { + const owner_decl = ty.getOwnerDecl(); - fn llvmModule(self: *DeclGen) *const llvm.Module { - return self.object.llvm_module; - } + if (!ty.hasRuntimeBitsIgnoreComptime()) { + const enum_di_ty = try o.makeEmptyNamespaceDIType(owner_decl); + // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` + // means we can't use `gop` anymore. + try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty)); + return enum_di_ty; + } - fn genDecl(dg: *DeclGen) !void { - const decl = dg.decl; - assert(decl.has_tv); + const field_names = ty.enumFields().keys(); - log.debug("gen: {s} type: {}, value: {}", .{ decl.name, decl.ty, decl.val }); + const enumerators = try gpa.alloc(*llvm.DIEnumerator, field_names.len); + defer gpa.free(enumerators); - if (decl.val.castTag(.function)) |func_payload| { - _ = func_payload; - @panic("TODO llvm backend genDecl function pointer"); - } else if (decl.val.castTag(.extern_fn)) |extern_fn| { - _ = try dg.resolveLlvmFunction(extern_fn.data.owner_decl); - } else { - const target = dg.module.getTarget(); - var global = try dg.resolveGlobalDecl(decl); - global.setAlignment(decl.getAlignment(target)); - assert(decl.has_tv); - const init_val = if (decl.val.castTag(.variable)) |payload| init_val: { - const variable = payload.data; - break :init_val variable.init; - } else init_val: { - global.setGlobalConstant(.True); - break :init_val decl.val; - }; - if (init_val.tag() != .unreachable_value) { - const llvm_init = try dg.genTypedValue(.{ .ty = decl.ty, .val = init_val }); - if (global.globalGetValueType() == llvm_init.typeOf()) { - global.setInitializer(llvm_init); - } else { - // LLVM does not allow us to change the type of globals. So we must - // create a new global with the correct type, copy all its attributes, - // and then update all references to point to the new global, - // delete the original, and rename the new one to the old one's name. - // This is necessary because LLVM does not support const bitcasting - // a struct with padding bytes, which is needed to lower a const union value - // to LLVM, when a field other than the most-aligned is active. Instead, - // we must lower to an unnamed struct, and pointer cast at usage sites - // of the global. Such an unnamed struct is the cause of the global type - // mismatch, because we don't have the LLVM type until the *value* is created, - // whereas the global needs to be created based on the type alone, because - // lowering the value may reference the global as a pointer. - const new_global = dg.object.llvm_module.addGlobalInAddressSpace( - llvm_init.typeOf(), - "", - dg.llvmAddressSpace(decl.@"addrspace"), - ); - new_global.setLinkage(global.getLinkage()); - new_global.setUnnamedAddr(global.getUnnamedAddress()); - new_global.setAlignment(global.getAlignment()); - new_global.setInitializer(llvm_init); - // replaceAllUsesWith requires the type to be unchanged. So we bitcast - // the new global to the old type and use that as the thing to replace - // old uses. - const new_global_ptr = new_global.constBitCast(global.typeOf()); - global.replaceAllUsesWith(new_global_ptr); - dg.object.decl_map.putAssumeCapacity(decl, new_global); - new_global.takeName(global); - global.deleteGlobal(); - global = new_global; + var buf_field_index: Value.Payload.U32 = .{ + .base = .{ .tag = .enum_field_index }, + .data = undefined, + }; + const field_index_val = Value.initPayload(&buf_field_index.base); + + for (field_names) |field_name, i| { + const field_name_z = try gpa.dupeZ(u8, field_name); + defer gpa.free(field_name_z); + + buf_field_index.data = @intCast(u32, i); + var buf_u64: Value.Payload.U64 = undefined; + const field_int_val = field_index_val.enumToInt(ty, &buf_u64); + // See https://github.com/ziglang/zig/issues/645 + const field_int = field_int_val.toSignedInt(); + enumerators[i] = dib.createEnumerator(field_name_z, field_int); } - } - if (dg.object.di_builder) |dib| { - const di_file = try dg.object.getDIFile(dg.gpa, decl.src_namespace.file_scope); + const di_file = try o.getDIFile(gpa, owner_decl.src_namespace.file_scope); + const di_scope = try o.namespaceToDebugScope(owner_decl.src_namespace); - const line_number = decl.src_line + 1; - const is_internal_linkage = !dg.module.decl_exports.contains(decl); - const di_global = dib.createGlobalVariable( - di_file.toScope(), - decl.name, - global.getValueName(), + const name = try ty.nameAlloc(gpa); + defer gpa.free(name); + var buffer: Type.Payload.Bits = undefined; + const int_ty = ty.intTagType(&buffer); + + const enum_di_ty = dib.createEnumerationType( + di_scope, + name, di_file, - line_number, - try dg.lowerDebugType(decl.ty), - is_internal_linkage, + owner_decl.src_node + 1, + ty.abiSize(target) * 8, + ty.abiAlignment(target) * 8, + enumerators.ptr, + @intCast(c_int, enumerators.len), + try o.lowerDebugType(int_ty, .full), + "", ); + // The recursive call to `lowerDebugType` means we can't use `gop` anymore. + try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty)); + return enum_di_ty; + }, + .Float => { + const bits = ty.floatBits(target); + const name = try ty.nameAlloc(gpa); + defer gpa.free(name); + const di_type = dib.createBasicType(name, bits, DW.ATE.float); + gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type); + return di_type; + }, + .Bool => { + const di_type = dib.createBasicType("bool", 1, DW.ATE.boolean); + gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type); + return di_type; + }, + .Pointer => { + // Normalize everything that the debug info does not represent. + const ptr_info = ty.ptrInfo().data; - try dg.object.di_map.put(dg.gpa, dg.decl, di_global.toNode()); - } - } - } + if (ptr_info.sentinel != null or + ptr_info.@"addrspace" != .generic or + ptr_info.bit_offset != 0 or + ptr_info.host_size != 0 or + ptr_info.@"allowzero" or + !ptr_info.mutable or + ptr_info.@"volatile" or + ptr_info.size == .Many or ptr_info.size == .C or + !ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime()) + { + var payload: Type.Payload.Pointer = .{ + .data = .{ + .pointee_type = ptr_info.pointee_type, + .sentinel = null, + .@"align" = ptr_info.@"align", + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = true, + .@"volatile" = false, + .size = switch (ptr_info.size) { + .Many, .C, .One => .One, + .Slice => .Slice, + }, + }, + }; + if (!ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime()) { + payload.data.pointee_type = Type.anyopaque; + } + const bland_ptr_ty = Type.initPayload(&payload.base); + const ptr_di_ty = try o.lowerDebugType(bland_ptr_ty, resolve); + // The recursive call to `lowerDebugType` means we can't use `gop` anymore. + try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty)); + return ptr_di_ty; + } - /// If the llvm function does not exist, create it. - /// Note that this can be called before the function's semantic analysis has - /// completed, so if any attributes rely on that, they must be done in updateFunc, not here. - fn resolveLlvmFunction(dg: *DeclGen, decl: *Module.Decl) !*const llvm.Value { - const gop = try dg.object.decl_map.getOrPut(dg.gpa, decl); - if (gop.found_existing) return gop.value_ptr.*; + if (ty.isSlice()) { + var buf: Type.SlicePtrFieldTypeBuffer = undefined; + const ptr_ty = ty.slicePtrFieldType(&buf); + const len_ty = Type.usize; - assert(decl.has_tv); - const zig_fn_type = decl.ty; - const fn_info = zig_fn_type.fnInfo(); - const target = dg.module.getTarget(); - const sret = firstParamSRet(fn_info, target); + const name = try ty.nameAlloc(gpa); + defer gpa.free(name); + const di_file: ?*llvm.DIFile = null; + const line = 0; + const compile_unit_scope = o.di_compile_unit.?.toScope(); - const fn_type = try dg.llvmType(zig_fn_type); + const fwd_decl = opt_fwd_decl orelse blk: { + const fwd_decl = dib.createReplaceableCompositeType( + DW.TAG.structure_type, + name.ptr, + compile_unit_scope, + di_file, + line, + ); + gop.value_ptr.* = AnnotatedDITypePtr.initFwd(fwd_decl); + if (resolve == .fwd) return fwd_decl; + break :blk fwd_decl; + }; - const fqn = try decl.getFullyQualifiedName(dg.gpa); - defer dg.gpa.free(fqn); + const ptr_size = ptr_ty.abiSize(target); + const ptr_align = ptr_ty.abiAlignment(target); + const len_size = len_ty.abiSize(target); + const len_align = len_ty.abiAlignment(target); - const llvm_addrspace = dg.llvmAddressSpace(decl.@"addrspace"); - const llvm_fn = dg.llvmModule().addFunctionInAddressSpace(fqn, fn_type, llvm_addrspace); - gop.value_ptr.* = llvm_fn; + var offset: u64 = 0; + offset += ptr_size; + offset = std.mem.alignForwardGeneric(u64, offset, len_align); + const len_offset = offset; - const is_extern = decl.isExtern(); - if (!is_extern) { - llvm_fn.setLinkage(.Internal); - llvm_fn.setUnnamedAddr(.True); - } else if (dg.module.getTarget().isWasm()) { - dg.addFnAttrString(llvm_fn, "wasm-import-name", std.mem.sliceTo(decl.name, 0)); - if (decl.getExternFn().?.lib_name) |lib_name| { - const module_name = std.mem.sliceTo(lib_name, 0); - if (!std.mem.eql(u8, module_name, "c")) { - dg.addFnAttrString(llvm_fn, "wasm-import-module", module_name); + const fields: [2]*llvm.DIType = .{ + dib.createMemberType( + fwd_decl.toScope(), + "ptr", + di_file, + line, + ptr_size * 8, // size in bits + ptr_align * 8, // align in bits + 0, // offset in bits + 0, // flags + try o.lowerDebugType(ptr_ty, .full), + ), + dib.createMemberType( + fwd_decl.toScope(), + "len", + di_file, + line, + len_size * 8, // size in bits + len_align * 8, // align in bits + len_offset * 8, // offset in bits + 0, // flags + try o.lowerDebugType(len_ty, .full), + ), + }; + + const full_di_ty = dib.createStructType( + compile_unit_scope, + name.ptr, + di_file, + line, + ty.abiSize(target) * 8, // size in bits + ty.abiAlignment(target) * 8, // align in bits + 0, // flags + null, // derived from + &fields, + fields.len, + 0, // run time lang + null, // vtable holder + "", // unique id + ); + dib.replaceTemporary(fwd_decl, full_di_ty); + // The recursive call to `lowerDebugType` means we can't use `gop` anymore. + try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty)); + return full_di_ty; } - } - } - if (sret) { - dg.addArgAttr(llvm_fn, 0, "nonnull"); // Sret pointers must not be address 0 - dg.addArgAttr(llvm_fn, 0, "noalias"); + const elem_di_ty = try o.lowerDebugType(ptr_info.pointee_type, .fwd); + const name = try ty.nameAlloc(gpa); + defer gpa.free(name); + const ptr_di_ty = dib.createPointerType( + elem_di_ty, + target.cpu.arch.ptrBitWidth(), + ty.ptrAlignment(target) * 8, + name, + ); + // The recursive call to `lowerDebugType` means we can't use `gop` anymore. + try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty)); + return ptr_di_ty; + }, + .Opaque => { + if (ty.tag() == .anyopaque) { + const di_ty = dib.createBasicType("anyopaque", 0, DW.ATE.signed); + gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty); + return di_ty; + } + const name = try ty.nameAlloc(gpa); + defer gpa.free(name); + const owner_decl = ty.getOwnerDecl(); + const opaque_di_ty = dib.createForwardDeclType( + DW.TAG.structure_type, + name, + try o.namespaceToDebugScope(owner_decl.src_namespace), + try o.getDIFile(gpa, owner_decl.src_namespace.file_scope), + owner_decl.src_node + 1, + ); + // The recursive call to `lowerDebugType` va `namespaceToDebugScope` + // means we can't use `gop` anymore. + try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(opaque_di_ty)); + return opaque_di_ty; + }, + .Array => { + const array_di_ty = dib.createArrayType( + ty.abiSize(target) * 8, + ty.abiAlignment(target) * 8, + try o.lowerDebugType(ty.childType(), .full), + @intCast(c_int, ty.arrayLen()), + ); + // The recursive call to `lowerDebugType` means we can't use `gop` anymore. + try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(array_di_ty)); + return array_di_ty; + }, + .Vector => { + const vector_di_ty = dib.createVectorType( + ty.abiSize(target) * 8, + ty.abiAlignment(target) * 8, + try o.lowerDebugType(ty.childType(), .full), + ty.vectorLen(), + ); + // The recursive call to `lowerDebugType` means we can't use `gop` anymore. + try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(vector_di_ty)); + return vector_di_ty; + }, + .Optional => { + const name = try ty.nameAlloc(gpa); + defer gpa.free(name); + var buf: Type.Payload.ElemType = undefined; + const child_ty = ty.optionalChild(&buf); + if (!child_ty.hasRuntimeBitsIgnoreComptime()) { + const di_ty = dib.createBasicType(name, 1, DW.ATE.boolean); + gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty); + return di_ty; + } + if (ty.isPtrLikeOptional()) { + const ptr_di_ty = try o.lowerDebugType(child_ty, resolve); + // The recursive call to `lowerDebugType` means we can't use `gop` anymore. + try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty)); + return ptr_di_ty; + } - const raw_llvm_ret_ty = try dg.llvmType(fn_info.return_type); - llvm_fn.addSretAttr(0, raw_llvm_ret_ty); - } + const di_file: ?*llvm.DIFile = null; + const line = 0; + const compile_unit_scope = o.di_compile_unit.?.toScope(); + const fwd_decl = opt_fwd_decl orelse blk: { + const fwd_decl = dib.createReplaceableCompositeType( + DW.TAG.structure_type, + name.ptr, + compile_unit_scope, + di_file, + line, + ); + gop.value_ptr.* = AnnotatedDITypePtr.initFwd(fwd_decl); + if (resolve == .fwd) return fwd_decl; + break :blk fwd_decl; + }; - // Set parameter attributes. - var llvm_param_i: c_uint = @boolToInt(sret); - for (fn_info.param_types) |param_ty| { - if (!param_ty.hasRuntimeBitsIgnoreComptime()) continue; + const non_null_ty = Type.bool; + const payload_size = child_ty.abiSize(target); + const payload_align = child_ty.abiAlignment(target); + const non_null_size = non_null_ty.abiSize(target); + const non_null_align = non_null_ty.abiAlignment(target); - if (isByRef(param_ty)) { - dg.addArgAttr(llvm_fn, llvm_param_i, "nonnull"); - // TODO readonly, noalias, align - } - llvm_param_i += 1; - } + var offset: u64 = 0; + offset += payload_size; + offset = std.mem.alignForwardGeneric(u64, offset, non_null_align); + const non_null_offset = offset; - // TODO: more attributes. see codegen.cpp `make_fn_llvm_value`. - if (fn_info.cc == .Naked) { - dg.addFnAttr(llvm_fn, "naked"); - } else { - llvm_fn.setFunctionCallConv(toLlvmCallConv(fn_info.cc, target)); - } + const fields: [2]*llvm.DIType = .{ + dib.createMemberType( + fwd_decl.toScope(), + "data", + di_file, + line, + payload_size * 8, // size in bits + payload_align * 8, // align in bits + 0, // offset in bits + 0, // flags + try o.lowerDebugType(child_ty, .full), + ), + dib.createMemberType( + fwd_decl.toScope(), + "some", + di_file, + line, + non_null_size * 8, // size in bits + non_null_align * 8, // align in bits + non_null_offset * 8, // offset in bits + 0, // flags + try o.lowerDebugType(non_null_ty, .full), + ), + }; - if (fn_info.alignment != 0) { - llvm_fn.setAlignment(fn_info.alignment); - } + const full_di_ty = dib.createStructType( + compile_unit_scope, + name.ptr, + di_file, + line, + ty.abiSize(target) * 8, // size in bits + ty.abiAlignment(target) * 8, // align in bits + 0, // flags + null, // derived from + &fields, + fields.len, + 0, // run time lang + null, // vtable holder + "", // unique id + ); + dib.replaceTemporary(fwd_decl, full_di_ty); + // The recursive call to `lowerDebugType` means we can't use `gop` anymore. + try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty)); + return full_di_ty; + }, + .ErrorUnion => { + const err_set_ty = ty.errorUnionSet(); + const payload_ty = ty.errorUnionPayload(); + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + const err_set_di_ty = try o.lowerDebugType(err_set_ty, .full); + // The recursive call to `lowerDebugType` means we can't use `gop` anymore. + try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(err_set_di_ty)); + return err_set_di_ty; + } + const name = try ty.nameAlloc(gpa); + defer gpa.free(name); + const di_file: ?*llvm.DIFile = null; + const line = 0; + const compile_unit_scope = o.di_compile_unit.?.toScope(); + const fwd_decl = opt_fwd_decl orelse blk: { + const fwd_decl = dib.createReplaceableCompositeType( + DW.TAG.structure_type, + name.ptr, + compile_unit_scope, + di_file, + line, + ); + gop.value_ptr.* = AnnotatedDITypePtr.initFwd(fwd_decl); + if (resolve == .fwd) return fwd_decl; + break :blk fwd_decl; + }; - // Function attributes that are independent of analysis results of the function body. - dg.addCommonFnAttributes(llvm_fn); + const err_set_size = err_set_ty.abiSize(target); + const err_set_align = err_set_ty.abiAlignment(target); + const payload_size = payload_ty.abiSize(target); + const payload_align = payload_ty.abiAlignment(target); - if (fn_info.return_type.isNoReturn()) { - dg.addFnAttr(llvm_fn, "noreturn"); - } + var offset: u64 = 0; + offset += err_set_size; + offset = std.mem.alignForwardGeneric(u64, offset, payload_align); + const payload_offset = offset; - return llvm_fn; - } + const fields: [2]*llvm.DIType = .{ + dib.createMemberType( + fwd_decl.toScope(), + "tag", + di_file, + line, + err_set_size * 8, // size in bits + err_set_align * 8, // align in bits + 0, // offset in bits + 0, // flags + try o.lowerDebugType(err_set_ty, .full), + ), + dib.createMemberType( + fwd_decl.toScope(), + "value", + di_file, + line, + payload_size * 8, // size in bits + payload_align * 8, // align in bits + payload_offset * 8, // offset in bits + 0, // flags + try o.lowerDebugType(payload_ty, .full), + ), + }; - fn addCommonFnAttributes(dg: *DeclGen, llvm_fn: *const llvm.Value) void { - if (!dg.module.comp.bin_file.options.red_zone) { - dg.addFnAttr(llvm_fn, "noredzone"); - } - if (dg.module.comp.bin_file.options.omit_frame_pointer) { - dg.addFnAttrString(llvm_fn, "frame-pointer", "none"); - } else { - dg.addFnAttrString(llvm_fn, "frame-pointer", "all"); - } - dg.addFnAttr(llvm_fn, "nounwind"); - if (dg.module.comp.unwind_tables) { - dg.addFnAttr(llvm_fn, "uwtable"); - } - if (dg.module.comp.bin_file.options.skip_linker_dependencies) { - // The intent here is for compiler-rt and libc functions to not generate - // infinite recursion. For example, if we are compiling the memcpy function, - // and llvm detects that the body is equivalent to memcpy, it may replace the - // body of memcpy with a call to memcpy, which would then cause a stack - // overflow instead of performing memcpy. - dg.addFnAttr(llvm_fn, "nobuiltin"); - } - if (dg.module.comp.bin_file.options.optimize_mode == .ReleaseSmall) { - dg.addFnAttr(llvm_fn, "minsize"); - dg.addFnAttr(llvm_fn, "optsize"); - } - if (dg.module.comp.bin_file.options.tsan) { - dg.addFnAttr(llvm_fn, "sanitize_thread"); - } - // TODO add target-cpu and target-features fn attributes - } - - fn resolveGlobalDecl(dg: *DeclGen, decl: *Module.Decl) Error!*const llvm.Value { - const gop = try dg.object.decl_map.getOrPut(dg.gpa, decl); - if (gop.found_existing) return gop.value_ptr.*; - errdefer assert(dg.object.decl_map.remove(decl)); - - const fqn = try decl.getFullyQualifiedName(dg.gpa); - defer dg.gpa.free(fqn); - - const llvm_type = try dg.llvmType(decl.ty); - const llvm_addrspace = dg.llvmAddressSpace(decl.@"addrspace"); - const llvm_global = dg.object.llvm_module.addGlobalInAddressSpace(llvm_type, fqn, llvm_addrspace); - gop.value_ptr.* = llvm_global; - - // This is needed for declarations created by `@extern`. - if (decl.isExtern()) { - llvm_global.setValueName(decl.name); - llvm_global.setUnnamedAddr(.False); - llvm_global.setLinkage(.External); - if (decl.val.castTag(.variable)) |variable| { - const single_threaded = dg.module.comp.bin_file.options.single_threaded; - if (variable.data.is_threadlocal and !single_threaded) { - llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); - } else { - llvm_global.setThreadLocalMode(.NotThreadLocal); - } - if (variable.data.is_weak_linkage) llvm_global.setLinkage(.ExternalWeak); - } - } else { - llvm_global.setLinkage(.Internal); - llvm_global.setUnnamedAddr(.True); - } - - return llvm_global; - } - - fn llvmAddressSpace(self: DeclGen, address_space: std.builtin.AddressSpace) c_uint { - const target = self.module.getTarget(); - return switch (target.cpu.arch) { - .i386, .x86_64 => switch (address_space) { - .generic => llvm.address_space.default, - .gs => llvm.address_space.x86.gs, - .fs => llvm.address_space.x86.fs, - .ss => llvm.address_space.x86.ss, - else => unreachable, - }, - .nvptx, .nvptx64 => switch (address_space) { - .generic => llvm.address_space.default, - .global => llvm.address_space.nvptx.global, - .constant => llvm.address_space.nvptx.constant, - .param => llvm.address_space.nvptx.param, - .shared => llvm.address_space.nvptx.shared, - .local => llvm.address_space.nvptx.local, - else => unreachable, - }, - else => switch (address_space) { - .generic => llvm.address_space.default, - else => unreachable, - }, - }; - } - - fn isUnnamedType(dg: *DeclGen, ty: Type, val: *const llvm.Value) bool { - // Once `llvmType` succeeds, successive calls to it with the same Zig type - // are guaranteed to succeed. So if a call to `llvmType` fails here it means - // it is the first time lowering the type, which means the value can't possible - // have that type. - const llvm_ty = dg.llvmType(ty) catch return true; - return val.typeOf() != llvm_ty; - } - - fn llvmType(dg: *DeclGen, t: Type) Allocator.Error!*const llvm.Type { - const gpa = dg.gpa; - const target = dg.module.getTarget(); - switch (t.zigTypeTag()) { - .Void, .NoReturn => return dg.context.voidType(), - .Int => { - const info = t.intInfo(target); - assert(info.bits != 0); - return dg.context.intType(info.bits); - }, - .Enum => { - var buffer: Type.Payload.Bits = undefined; - const int_ty = t.intTagType(&buffer); - const bit_count = int_ty.intInfo(target).bits; - assert(bit_count != 0); - return dg.context.intType(bit_count); - }, - .Float => switch (t.floatBits(target)) { - 16 => return dg.context.halfType(), - 32 => return dg.context.floatType(), - 64 => return dg.context.doubleType(), - 80 => return if (backendSupportsF80(target)) dg.context.x86FP80Type() else dg.context.intType(80), - 128 => return dg.context.fp128Type(), - else => unreachable, + const full_di_ty = dib.createStructType( + compile_unit_scope, + name.ptr, + di_file, + line, + ty.abiSize(target) * 8, // size in bits + ty.abiAlignment(target) * 8, // align in bits + 0, // flags + null, // derived from + &fields, + fields.len, + 0, // run time lang + null, // vtable holder + "", // unique id + ); + dib.replaceTemporary(fwd_decl, full_di_ty); + // The recursive call to `lowerDebugType` means we can't use `gop` anymore. + try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty)); + return full_di_ty; }, - .Bool => return dg.context.intType(1), - .Pointer => { - if (t.isSlice()) { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_type = t.slicePtrFieldType(&buf); - - const fields: [2]*const llvm.Type = .{ - try dg.llvmType(ptr_type), - try dg.llvmType(Type.usize), - }; - return dg.context.structType(&fields, fields.len, .False); - } - const ptr_info = t.ptrInfo().data; - const llvm_addrspace = dg.llvmAddressSpace(ptr_info.@"addrspace"); - if (ptr_info.host_size != 0) { - return dg.context.intType(ptr_info.host_size * 8).pointerType(llvm_addrspace); - } - const elem_ty = ptr_info.pointee_type; - const lower_elem_ty = switch (elem_ty.zigTypeTag()) { - .Opaque, .Fn => true, - .Array => elem_ty.childType().hasRuntimeBitsIgnoreComptime(), - else => elem_ty.hasRuntimeBitsIgnoreComptime(), - }; - const llvm_elem_ty = if (lower_elem_ty) - try dg.llvmType(elem_ty) - else - dg.context.intType(8); - return llvm_elem_ty.pointerType(llvm_addrspace); + .ErrorSet => { + // TODO make this a proper enum with all the error codes in it. + // will need to consider how to take incremental compilation into account. + const di_ty = dib.createBasicType("anyerror", 16, DW.ATE.unsigned); + gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty); + return di_ty; }, - .Opaque => switch (t.tag()) { - .@"opaque" => { - const gop = try dg.object.type_map.getOrPut(gpa, t); - if (gop.found_existing) return gop.value_ptr.*; - - // The Type memory is ephemeral; since we want to store a longer-lived - // reference, we need to copy it here. - gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); - - const opaque_obj = t.castTag(.@"opaque").?.data; - const name = try opaque_obj.getFullyQualifiedName(gpa); - defer gpa.free(name); + .Struct => { + const compile_unit_scope = o.di_compile_unit.?.toScope(); + const name = try ty.nameAlloc(gpa); + defer gpa.free(name); - const llvm_struct_ty = dg.context.structCreateNamed(name); - gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls - return llvm_struct_ty; - }, - .anyopaque => return dg.context.intType(8), - else => unreachable, - }, - .Array => { - const elem_ty = t.childType(); - assert(elem_ty.onePossibleValue() == null); - const elem_llvm_ty = try dg.llvmType(elem_ty); - const total_len = t.arrayLen() + @boolToInt(t.sentinel() != null); - return elem_llvm_ty.arrayType(@intCast(c_uint, total_len)); - }, - .Vector => { - const elem_type = try dg.llvmType(t.childType()); - return elem_type.vectorType(t.vectorLen()); - }, - .Optional => { - var buf: Type.Payload.ElemType = undefined; - const child_ty = t.optionalChild(&buf); - if (!child_ty.hasRuntimeBitsIgnoreComptime()) { - return dg.context.intType(1); - } - const payload_llvm_ty = try dg.llvmType(child_ty); - if (t.isPtrLikeOptional()) { - return payload_llvm_ty; + if (ty.castTag(.@"struct")) |payload| { + const struct_obj = payload.data; + if (struct_obj.layout == .Packed) { + var buf: Type.Payload.Bits = undefined; + const info = struct_obj.packedIntegerType(target, &buf).intInfo(target); + const dwarf_encoding: c_uint = switch (info.signedness) { + .signed => DW.ATE.signed, + .unsigned => DW.ATE.unsigned, + }; + const di_ty = dib.createBasicType(name, info.bits, dwarf_encoding); + gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty); + return di_ty; + } } - const fields: [2]*const llvm.Type = .{ - payload_llvm_ty, dg.context.intType(1), + const fwd_decl = opt_fwd_decl orelse blk: { + const fwd_decl = dib.createReplaceableCompositeType( + DW.TAG.structure_type, + name.ptr, + compile_unit_scope, + null, // file + 0, // line + ); + gop.value_ptr.* = AnnotatedDITypePtr.initFwd(fwd_decl); + if (resolve == .fwd) return fwd_decl; + break :blk fwd_decl; }; - return dg.context.structType(&fields, fields.len, .False); - }, - .ErrorUnion => { - const error_type = t.errorUnionSet(); - const payload_type = t.errorUnionPayload(); - const llvm_error_type = try dg.llvmType(error_type); - if (!payload_type.hasRuntimeBitsIgnoreComptime()) { - return llvm_error_type; - } - const llvm_payload_type = try dg.llvmType(payload_type); - - const fields: [2]*const llvm.Type = .{ llvm_error_type, llvm_payload_type }; - return dg.context.structType(&fields, fields.len, .False); - }, - .ErrorSet => { - return dg.context.intType(16); - }, - .Struct => { - const gop = try dg.object.type_map.getOrPut(gpa, t); - if (gop.found_existing) return gop.value_ptr.*; - - // The Type memory is ephemeral; since we want to store a longer-lived - // reference, we need to copy it here. - gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); - if (t.isTupleOrAnonStruct()) { - const tuple = t.tupleFields(); - const llvm_struct_ty = dg.context.structCreateNamed(""); - gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls + if (ty.isTupleOrAnonStruct()) { + const tuple = ty.tupleFields(); - var llvm_field_types: std.ArrayListUnmanaged(*const llvm.Type) = .{}; - defer llvm_field_types.deinit(gpa); + var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{}; + defer di_fields.deinit(gpa); - try llvm_field_types.ensureUnusedCapacity(gpa, tuple.types.len); + try di_fields.ensureUnusedCapacity(gpa, tuple.types.len); comptime assert(struct_layout_version == 2); var offset: u64 = 0; - var big_align: u32 = 0; for (tuple.types) |field_ty, i| { const field_val = tuple.values[i]; if (field_val.tag() != .unreachable_value) continue; + const field_size = field_ty.abiSize(target); const field_align = field_ty.abiAlignment(target); - big_align = @maximum(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); - - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - try llvm_field_types.append(gpa, llvm_array_ty); - } - const field_llvm_ty = try dg.llvmType(field_ty); - try llvm_field_types.append(gpa, field_llvm_ty); + const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = field_offset + field_size; - offset += field_ty.abiSize(target); - } - { - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - try llvm_field_types.append(gpa, llvm_array_ty); - } + const field_name = if (ty.castTag(.anon_struct)) |payload| + try gpa.dupeZ(u8, payload.data.names[i]) + else + try std.fmt.allocPrintZ(gpa, "{d}", .{i}); + defer gpa.free(field_name); + + try di_fields.append(gpa, dib.createMemberType( + fwd_decl.toScope(), + field_name, + null, // file + 0, // line + field_size * 8, // size in bits + field_align * 8, // align in bits + field_offset * 8, // offset in bits + 0, // flags + try o.lowerDebugType(field_ty, .full), + )); } - llvm_struct_ty.structSetBody( - llvm_field_types.items.ptr, - @intCast(c_uint, llvm_field_types.items.len), - .False, + const full_di_ty = dib.createStructType( + compile_unit_scope, + name.ptr, + null, // file + 0, // line + ty.abiSize(target) * 8, // size in bits + ty.abiAlignment(target) * 8, // align in bits + 0, // flags + null, // derived from + di_fields.items.ptr, + @intCast(c_int, di_fields.items.len), + 0, // run time lang + null, // vtable holder + "", // unique id ); - - return llvm_struct_ty; + dib.replaceTemporary(fwd_decl, full_di_ty); + // The recursive call to `lowerDebugType` means we can't use `gop` anymore. + try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty)); + return full_di_ty; } - const struct_obj = t.castTag(.@"struct").?.data; - - if (struct_obj.layout == .Packed) { - var buf: Type.Payload.Bits = undefined; - const int_ty = struct_obj.packedIntegerType(target, &buf); - const int_llvm_ty = try dg.llvmType(int_ty); - gop.value_ptr.* = int_llvm_ty; - return int_llvm_ty; + if (ty.castTag(.@"struct")) |payload| { + const struct_obj = payload.data; + if (!struct_obj.haveFieldTypes()) { + // TODO: improve the frontend to populate this struct. + // For now we treat it as a zero bit type. + const owner_decl = ty.getOwnerDecl(); + const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl); + dib.replaceTemporary(fwd_decl, struct_di_ty); + // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` + // means we can't use `gop` anymore. + try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty)); + return struct_di_ty; + } } - const name = try struct_obj.getFullyQualifiedName(gpa); - defer gpa.free(name); - - const llvm_struct_ty = dg.context.structCreateNamed(name); - gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls + if (!ty.hasRuntimeBitsIgnoreComptime()) { + const owner_decl = ty.getOwnerDecl(); + const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl); + dib.replaceTemporary(fwd_decl, struct_di_ty); + // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` + // means we can't use `gop` anymore. + try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty)); + return struct_di_ty; + } - assert(struct_obj.haveFieldTypes()); + const fields = ty.structFields(); - var llvm_field_types: std.ArrayListUnmanaged(*const llvm.Type) = .{}; - defer llvm_field_types.deinit(gpa); + var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{}; + defer di_fields.deinit(gpa); - try llvm_field_types.ensureUnusedCapacity(gpa, struct_obj.fields.count()); + try di_fields.ensureUnusedCapacity(gpa, fields.count()); comptime assert(struct_layout_version == 2); var offset: u64 = 0; - var big_align: u32 = 0; - for (struct_obj.fields.values()) |field| { + for (fields.values()) |field, i| { if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime()) continue; + const field_size = field.ty.abiSize(target); const field_align = field.normalAlignment(target); - big_align = @maximum(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = field_offset + field_size; - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - try llvm_field_types.append(gpa, llvm_array_ty); - } - const field_llvm_ty = try dg.llvmType(field.ty); - try llvm_field_types.append(gpa, field_llvm_ty); + const field_name = try gpa.dupeZ(u8, fields.keys()[i]); + defer gpa.free(field_name); - offset += field.ty.abiSize(target); - } - { - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - try llvm_field_types.append(gpa, llvm_array_ty); - } + try di_fields.append(gpa, dib.createMemberType( + fwd_decl.toScope(), + field_name, + null, // file + 0, // line + field_size * 8, // size in bits + field_align * 8, // align in bits + field_offset * 8, // offset in bits + 0, // flags + try o.lowerDebugType(field.ty, .full), + )); } - llvm_struct_ty.structSetBody( - llvm_field_types.items.ptr, - @intCast(c_uint, llvm_field_types.items.len), - .False, + const full_di_ty = dib.createStructType( + compile_unit_scope, + name.ptr, + null, // file + 0, // line + ty.abiSize(target) * 8, // size in bits + ty.abiAlignment(target) * 8, // align in bits + 0, // flags + null, // derived from + di_fields.items.ptr, + @intCast(c_int, di_fields.items.len), + 0, // run time lang + null, // vtable holder + "", // unique id ); - - return llvm_struct_ty; + dib.replaceTemporary(fwd_decl, full_di_ty); + // The recursive call to `lowerDebugType` means we can't use `gop` anymore. + try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty)); + return full_di_ty; }, .Union => { - const gop = try dg.object.type_map.getOrPut(gpa, t); - if (gop.found_existing) return gop.value_ptr.*; + const owner_decl = ty.getOwnerDecl(); - // The Type memory is ephemeral; since we want to store a longer-lived - // reference, we need to copy it here. - gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); + const name = try ty.nameAlloc(gpa); + defer gpa.free(name); - const layout = t.unionGetLayout(target); - const union_obj = t.cast(Type.Payload.Union).?.data; + const fwd_decl = opt_fwd_decl orelse blk: { + const fwd_decl = dib.createReplaceableCompositeType( + DW.TAG.structure_type, + name.ptr, + o.di_compile_unit.?.toScope(), + null, // file + 0, // line + ); + gop.value_ptr.* = AnnotatedDITypePtr.initFwd(fwd_decl); + if (resolve == .fwd) return fwd_decl; + break :blk fwd_decl; + }; - if (layout.payload_size == 0) { - const enum_tag_llvm_ty = try dg.llvmType(union_obj.tag_ty); - gop.value_ptr.* = enum_tag_llvm_ty; - return enum_tag_llvm_ty; + const TODO_implement_this = true; // TODO + if (TODO_implement_this or !ty.hasRuntimeBitsIgnoreComptime()) { + const union_di_ty = try o.makeEmptyNamespaceDIType(owner_decl); + dib.replaceTemporary(fwd_decl, union_di_ty); + // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` + // means we can't use `gop` anymore. + try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(union_di_ty)); + return union_di_ty; } - const name = try union_obj.getFullyQualifiedName(gpa); - defer gpa.free(name); + @panic("TODO debug info type for union"); + //const gop = try o.type_map.getOrPut(gpa, ty); + //if (gop.found_existing) return gop.value_ptr.*; - const llvm_union_ty = dg.context.structCreateNamed(name); - gop.value_ptr.* = llvm_union_ty; // must be done before any recursive calls + //// The Type memory is ephemeral; since we want to store a longer-lived + //// reference, we need to copy it here. + //gop.key_ptr.* = try ty.copy(o.type_map_arena.allocator()); - const aligned_field = union_obj.fields.values()[layout.most_aligned_field]; - const llvm_aligned_field_ty = try dg.llvmType(aligned_field.ty); + //const layout = ty.unionGetLayout(target); + //const union_obj = ty.cast(Type.Payload.Union).?.data; - const llvm_payload_ty = t: { - if (layout.most_aligned_field_size == layout.payload_size) { - break :t llvm_aligned_field_ty; - } - const padding_len = @intCast(c_uint, layout.payload_size - layout.most_aligned_field_size); - const fields: [2]*const llvm.Type = .{ - llvm_aligned_field_ty, - dg.context.intType(8).arrayType(padding_len), - }; - break :t dg.context.structType(&fields, fields.len, .True); - }; + //if (layout.payload_size == 0) { + // const enum_tag_llvm_ty = try dg.llvmType(union_obj.tag_ty); + // gop.value_ptr.* = enum_tag_llvm_ty; + // return enum_tag_llvm_ty; + //} - if (layout.tag_size == 0) { - var llvm_fields: [1]*const llvm.Type = .{llvm_payload_ty}; - llvm_union_ty.structSetBody(&llvm_fields, llvm_fields.len, .False); - return llvm_union_ty; - } - const enum_tag_llvm_ty = try dg.llvmType(union_obj.tag_ty); + //const name = try union_obj.getFullyQualifiedName(gpa); + //defer gpa.free(name); - // Put the tag before or after the payload depending on which one's - // alignment is greater. - var llvm_fields: [3]*const llvm.Type = undefined; - var llvm_fields_len: c_uint = 2; + //const llvm_union_ty = dg.context.structCreateNamed(name); + //gop.value_ptr.* = llvm_union_ty; // must be done before any recursive calls - if (layout.tag_align >= layout.payload_align) { - llvm_fields = .{ enum_tag_llvm_ty, llvm_payload_ty, undefined }; - } else { - llvm_fields = .{ llvm_payload_ty, enum_tag_llvm_ty, undefined }; - } + //const aligned_field = union_obj.fields.values()[layout.most_aligned_field]; + //const llvm_aligned_field_ty = try dg.llvmType(aligned_field.ty); - // Insert padding to make the LLVM struct ABI size match the Zig union ABI size. - if (layout.padding != 0) { - llvm_fields[2] = dg.context.intType(8).arrayType(layout.padding); - llvm_fields_len = 3; - } + //const llvm_payload_ty = ty: { + // if (layout.most_aligned_field_size == layout.payload_size) { + // break :ty llvm_aligned_field_ty; + // } + // const padding_len = @intCast(c_uint, layout.payload_size - layout.most_aligned_field_size); + // const fields: [2]*const llvm.Type = .{ + // llvm_aligned_field_ty, + // dg.context.intType(8).arrayType(padding_len), + // }; + // break :ty dg.context.structType(&fields, fields.len, .True); + //}; - llvm_union_ty.structSetBody(&llvm_fields, llvm_fields_len, .False); - return llvm_union_ty; + //if (layout.tag_size == 0) { + // var llvm_fields: [1]*const llvm.Type = .{llvm_payload_ty}; + // llvm_union_ty.structSetBody(&llvm_fields, llvm_fields.len, .False); + // return llvm_union_ty; + //} + //const enum_tag_llvm_ty = try dg.llvmType(union_obj.tag_ty); + + //// Put the tag before or after the payload depending on which one's + //// alignment is greater. + //var llvm_fields: [3]*const llvm.Type = undefined; + //var llvm_fields_len: c_uint = 2; + + //if (layout.tag_align >= layout.payload_align) { + // llvm_fields = .{ enum_tag_llvm_ty, llvm_payload_ty, undefined }; + //} else { + // llvm_fields = .{ llvm_payload_ty, enum_tag_llvm_ty, undefined }; + //} + + //// Insert padding to make the LLVM struct ABI size match the Zig union ABI size. + //if (layout.padding != 0) { + // llvm_fields[2] = dg.context.intType(8).arrayType(layout.padding); + // llvm_fields_len = 3; + //} + + //llvm_union_ty.structSetBody(&llvm_fields, llvm_fields_len, .False); + //return llvm_union_ty; }, .Fn => { - const fn_info = t.fnInfo(); + const fn_info = ty.fnInfo(); const sret = firstParamSRet(fn_info, target); - const return_type = fn_info.return_type; - const llvm_sret_ty = if (return_type.hasRuntimeBitsIgnoreComptime()) - try dg.llvmType(return_type) - else - dg.context.voidType(); - const llvm_ret_ty = if (sret) dg.context.voidType() else llvm_sret_ty; - var llvm_params = std.ArrayList(*const llvm.Type).init(dg.gpa); - defer llvm_params.deinit(); + var param_di_types = std.ArrayList(*llvm.DIType).init(gpa); + defer param_di_types.deinit(); - if (sret) { - try llvm_params.append(llvm_sret_ty.pointerType(0)); - } + // Return type goes first. + const di_ret_ty = if (sret or !fn_info.return_type.hasRuntimeBitsIgnoreComptime()) + Type.void + else + fn_info.return_type; + try param_di_types.append(try o.lowerDebugType(di_ret_ty, .full)); - for (fn_info.param_types) |param_ty| { - if (!param_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (sret) { + var ptr_ty_payload: Type.Payload.ElemType = .{ + .base = .{ .tag = .single_mut_pointer }, + .data = fn_info.return_type, + }; + const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); + } - const raw_llvm_ty = try dg.llvmType(param_ty); - const actual_llvm_ty = if (!isByRef(param_ty)) raw_llvm_ty else raw_llvm_ty.pointerType(0); - try llvm_params.append(actual_llvm_ty); + for (fn_info.param_types) |param_ty| { + if (!param_ty.hasRuntimeBitsIgnoreComptime()) continue; + + if (isByRef(param_ty)) { + var ptr_ty_payload: Type.Payload.ElemType = .{ + .base = .{ .tag = .single_mut_pointer }, + .data = param_ty, + }; + const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); + } else { + try param_di_types.append(try o.lowerDebugType(param_ty, .full)); + } } - return llvm.functionType( - llvm_ret_ty, - llvm_params.items.ptr, - @intCast(c_uint, llvm_params.items.len), - llvm.Bool.fromBool(fn_info.is_var_args), + const fn_di_ty = dib.createSubroutineType( + param_di_types.items.ptr, + @intCast(c_int, param_di_types.items.len), + 0, ); + // The recursive call to `lowerDebugType` means we can't use `gop` anymore. + try o.di_type_map.put(gpa, ty, AnnotatedDITypePtr.initFull(fn_di_ty)); + return fn_di_ty; }, .ComptimeInt => unreachable, .ComptimeFloat => unreachable, @@ -1399,1387 +1612,1282 @@ pub const DeclGen = struct { .BoundFn => @panic("TODO remove BoundFn from the language"), - .Frame => @panic("TODO implement llvmType for Frame types"), - .AnyFrame => @panic("TODO implement llvmType for AnyFrame types"), + .Frame => @panic("TODO implement lowerDebugType for Frame types"), + .AnyFrame => @panic("TODO implement lowerDebugType for AnyFrame types"), } } - fn genTypedValue(dg: *DeclGen, tv: TypedValue) Error!*const llvm.Value { - if (tv.val.isUndef()) { - const llvm_type = try dg.llvmType(tv.ty); - return llvm_type.getUndef(); + fn namespaceToDebugScope(o: *Object, namespace: *const Module.Namespace) !*llvm.DIScope { + if (namespace.parent == null) { + const di_file = try o.getDIFile(o.gpa, namespace.file_scope); + return di_file.toScope(); } + const di_type = try o.lowerDebugType(namespace.ty, .fwd); + return di_type.toScope(); + } - switch (tv.ty.zigTypeTag()) { - .Bool => { - const llvm_type = try dg.llvmType(tv.ty); - return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(); - }, - // TODO this duplicates code with Pointer but they should share the handling - // of the tv.val.tag() and then Int should do extra constPtrToInt on top - .Int => switch (tv.val.tag()) { - .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl), - .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), - else => { - var bigint_space: Value.BigIntSpace = undefined; - const bigint = tv.val.toBigInt(&bigint_space); - const target = dg.module.getTarget(); - const int_info = tv.ty.intInfo(target); - assert(int_info.bits != 0); - const llvm_type = dg.context.intType(int_info.bits); + /// This is to be used instead of void for debug info types, to avoid tripping + /// Assertion `!isa(Scope) && "shouldn't make a namespace scope for a type"' + /// when targeting CodeView (Windows). + fn makeEmptyNamespaceDIType(o: *Object, decl: *const Module.Decl) !*llvm.DIType { + const fields: [0]*llvm.DIType = .{}; + return o.di_builder.?.createStructType( + try o.namespaceToDebugScope(decl.src_namespace), + decl.name, // TODO use fully qualified name + try o.getDIFile(o.gpa, decl.src_namespace.file_scope), + decl.src_line + 1, + 0, // size in bits + 0, // align in bits + 0, // flags + null, // derived from + undefined, // TODO should be able to pass &fields, + fields.len, + 0, // run time lang + null, // vtable holder + "", // unique id + ); + } +}; - const unsigned_val = v: { - if (bigint.limbs.len == 1) { - break :v llvm_type.constInt(bigint.limbs[0], .False); - } - if (@sizeOf(usize) == @sizeOf(u64)) { - break :v llvm_type.constIntOfArbitraryPrecision( - @intCast(c_uint, bigint.limbs.len), - bigint.limbs.ptr, - ); - } - @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); - }; - if (!bigint.positive) { - return llvm.constNeg(unsigned_val); - } - return unsigned_val; - }, - }, - .Enum => { - var int_buffer: Value.Payload.U64 = undefined; - const int_val = tv.enumToInt(&int_buffer); +pub const DeclGen = struct { + context: *const llvm.Context, + object: *Object, + module: *Module, + decl: *Module.Decl, + gpa: Allocator, + err_msg: ?*Module.ErrorMsg, - var bigint_space: Value.BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_space); + fn todo(self: *DeclGen, comptime format: []const u8, args: anytype) Error { + @setCold(true); + assert(self.err_msg == null); + const src_loc = @as(LazySrcLoc, .{ .node_offset = 0 }).toSrcLoc(self.decl); + self.err_msg = try Module.ErrorMsg.create(self.gpa, src_loc, "TODO (LLVM): " ++ format, args); + return error.CodegenFail; + } - const target = dg.module.getTarget(); - const int_info = tv.ty.intInfo(target); - const llvm_type = dg.context.intType(int_info.bits); + fn llvmModule(self: *DeclGen) *const llvm.Module { + return self.object.llvm_module; + } - const unsigned_val = v: { - if (bigint.limbs.len == 1) { - break :v llvm_type.constInt(bigint.limbs[0], .False); - } - if (@sizeOf(usize) == @sizeOf(u64)) { - break :v llvm_type.constIntOfArbitraryPrecision( - @intCast(c_uint, bigint.limbs.len), - bigint.limbs.ptr, - ); - } - @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); - }; - if (!bigint.positive) { - return llvm.constNeg(unsigned_val); - } - return unsigned_val; - }, - .Float => { - const llvm_ty = try dg.llvmType(tv.ty); - const target = dg.module.getTarget(); - switch (tv.ty.floatBits(target)) { - 16, 32, 64 => return llvm_ty.constReal(tv.val.toFloat(f64)), - 80 => { - const float = tv.val.toFloat(f80); - const repr = std.math.break_f80(float); - const llvm_i80 = dg.context.intType(80); - var x = llvm_i80.constInt(repr.exp, .False); - x = x.constShl(llvm_i80.constInt(64, .False)); - x = x.constOr(llvm_i80.constInt(repr.fraction, .False)); - if (backendSupportsF80(target)) { - return x.constBitCast(llvm_ty); - } else { - return x; - } - }, - 128 => { - var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128)); - // LLVM seems to require that the lower half of the f128 be placed first - // in the buffer. - if (native_endian == .Big) { - std.mem.swap(u64, &buf[0], &buf[1]); - } - const int = dg.context.intType(128).constIntOfArbitraryPrecision(buf.len, &buf); - return int.constBitCast(llvm_ty); - }, - else => unreachable, + fn genDecl(dg: *DeclGen) !void { + const decl = dg.decl; + assert(decl.has_tv); + + log.debug("gen: {s} type: {}, value: {}", .{ decl.name, decl.ty, decl.val }); + + if (decl.val.castTag(.function)) |func_payload| { + _ = func_payload; + @panic("TODO llvm backend genDecl function pointer"); + } else if (decl.val.castTag(.extern_fn)) |extern_fn| { + _ = try dg.resolveLlvmFunction(extern_fn.data.owner_decl); + } else { + const target = dg.module.getTarget(); + var global = try dg.resolveGlobalDecl(decl); + global.setAlignment(decl.getAlignment(target)); + assert(decl.has_tv); + const init_val = if (decl.val.castTag(.variable)) |payload| init_val: { + const variable = payload.data; + break :init_val variable.init; + } else init_val: { + global.setGlobalConstant(.True); + break :init_val decl.val; + }; + if (init_val.tag() != .unreachable_value) { + const llvm_init = try dg.genTypedValue(.{ .ty = decl.ty, .val = init_val }); + if (global.globalGetValueType() == llvm_init.typeOf()) { + global.setInitializer(llvm_init); + } else { + // LLVM does not allow us to change the type of globals. So we must + // create a new global with the correct type, copy all its attributes, + // and then update all references to point to the new global, + // delete the original, and rename the new one to the old one's name. + // This is necessary because LLVM does not support const bitcasting + // a struct with padding bytes, which is needed to lower a const union value + // to LLVM, when a field other than the most-aligned is active. Instead, + // we must lower to an unnamed struct, and pointer cast at usage sites + // of the global. Such an unnamed struct is the cause of the global type + // mismatch, because we don't have the LLVM type until the *value* is created, + // whereas the global needs to be created based on the type alone, because + // lowering the value may reference the global as a pointer. + const new_global = dg.object.llvm_module.addGlobalInAddressSpace( + llvm_init.typeOf(), + "", + dg.llvmAddressSpace(decl.@"addrspace"), + ); + new_global.setLinkage(global.getLinkage()); + new_global.setUnnamedAddr(global.getUnnamedAddress()); + new_global.setAlignment(global.getAlignment()); + new_global.setInitializer(llvm_init); + // replaceAllUsesWith requires the type to be unchanged. So we bitcast + // the new global to the old type and use that as the thing to replace + // old uses. + const new_global_ptr = new_global.constBitCast(global.typeOf()); + global.replaceAllUsesWith(new_global_ptr); + dg.object.decl_map.putAssumeCapacity(decl, new_global); + new_global.takeName(global); + global.deleteGlobal(); + global = new_global; } - }, - .Pointer => switch (tv.val.tag()) { - .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl), - .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), - .variable => { - const decl = tv.val.castTag(.variable).?.data.owner_decl; - decl.markAlive(); - const val = try dg.resolveGlobalDecl(decl); - const llvm_var_type = try dg.llvmType(tv.ty); - const llvm_addrspace = dg.llvmAddressSpace(decl.@"addrspace"); - const llvm_type = llvm_var_type.pointerType(llvm_addrspace); - return val.constBitCast(llvm_type); - }, - .slice => { - const slice = tv.val.castTag(.slice).?.data; - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const fields: [2]*const llvm.Value = .{ - try dg.genTypedValue(.{ - .ty = tv.ty.slicePtrFieldType(&buf), - .val = slice.ptr, - }), - try dg.genTypedValue(.{ - .ty = Type.usize, - .val = slice.len, - }), - }; - return dg.context.constStruct(&fields, fields.len, .False); - }, - .int_u64, .one, .int_big_positive => { - const llvm_usize = try dg.llvmType(Type.usize); - const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(), .False); - return llvm_int.constIntToPtr(try dg.llvmType(tv.ty)); - }, - .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => { - return dg.lowerParentPtr(tv.val, tv.ty.childType()); - }, - .null_value, .zero => { - const llvm_type = try dg.llvmType(tv.ty); - return llvm_type.constNull(); - }, - else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{ tv.ty, tag }), - }, - .Array => switch (tv.val.tag()) { - .bytes => { - const bytes = tv.val.castTag(.bytes).?.data; - return dg.context.constString( - bytes.ptr, - @intCast(c_uint, tv.ty.arrayLenIncludingSentinel()), - .True, // don't null terminate. bytes has the sentinel, if any. - ); - }, - .aggregate => { - const elem_vals = tv.val.castTag(.aggregate).?.data; - const elem_ty = tv.ty.elemType(); - const gpa = dg.gpa; - const len = @intCast(usize, tv.ty.arrayLenIncludingSentinel()); - const llvm_elems = try gpa.alloc(*const llvm.Value, len); - defer gpa.free(llvm_elems); - var need_unnamed = false; - for (elem_vals[0..len]) |elem_val, i| { - llvm_elems[i] = try dg.genTypedValue(.{ .ty = elem_ty, .val = elem_val }); - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]); - } - if (need_unnamed) { - return dg.context.constStruct( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - .True, - ); - } else { - const llvm_elem_ty = try dg.llvmType(elem_ty); - return llvm_elem_ty.constArray( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - } - }, - .repeated => { - const val = tv.val.castTag(.repeated).?.data; - const elem_ty = tv.ty.elemType(); - const sentinel = tv.ty.sentinel(); - const len = @intCast(usize, tv.ty.arrayLen()); - const len_including_sent = len + @boolToInt(sentinel != null); - const gpa = dg.gpa; - const llvm_elems = try gpa.alloc(*const llvm.Value, len_including_sent); - defer gpa.free(llvm_elems); - - var need_unnamed = false; - if (len != 0) { - for (llvm_elems[0..len]) |*elem| { - elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = val }); - } - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]); - } + } - if (sentinel) |sent| { - llvm_elems[len] = try dg.genTypedValue(.{ .ty = elem_ty, .val = sent }); - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]); - } + if (dg.object.di_builder) |dib| { + const di_file = try dg.object.getDIFile(dg.gpa, decl.src_namespace.file_scope); - if (need_unnamed) { - return dg.context.constStruct( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - .True, - ); - } else { - const llvm_elem_ty = try dg.llvmType(elem_ty); - return llvm_elem_ty.constArray( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - } - }, - .empty_array_sentinel => { - const elem_ty = tv.ty.elemType(); - const sent_val = tv.ty.sentinel().?; - const sentinel = try dg.genTypedValue(.{ .ty = elem_ty, .val = sent_val }); - const llvm_elems: [1]*const llvm.Value = .{sentinel}; - const need_unnamed = dg.isUnnamedType(elem_ty, llvm_elems[0]); - if (need_unnamed) { - return dg.context.constStruct(&llvm_elems, llvm_elems.len, .True); - } else { - const llvm_elem_ty = try dg.llvmType(elem_ty); - return llvm_elem_ty.constArray(&llvm_elems, llvm_elems.len); - } - }, - else => unreachable, - }, - .Optional => { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = tv.ty.optionalChild(&buf); - const llvm_i1 = dg.context.intType(1); - const is_pl = !tv.val.isNull(); - const non_null_bit = if (is_pl) llvm_i1.constAllOnes() else llvm_i1.constNull(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - return non_null_bit; - } - if (tv.ty.isPtrLikeOptional()) { - if (tv.val.castTag(.opt_payload)) |payload| { - return dg.genTypedValue(.{ .ty = payload_ty, .val = payload.data }); - } else if (is_pl) { - return dg.genTypedValue(.{ .ty = payload_ty, .val = tv.val }); - } else { - const llvm_ty = try dg.llvmType(tv.ty); - return llvm_ty.constNull(); - } - } - assert(payload_ty.zigTypeTag() != .Fn); - const fields: [2]*const llvm.Value = .{ - try dg.genTypedValue(.{ - .ty = payload_ty, - .val = if (tv.val.castTag(.opt_payload)) |pl| pl.data else Value.initTag(.undef), - }), - non_null_bit, - }; - return dg.context.constStruct(&fields, fields.len, .False); - }, - .Fn => { - const fn_decl = switch (tv.val.tag()) { - .extern_fn => tv.val.castTag(.extern_fn).?.data.owner_decl, - .function => tv.val.castTag(.function).?.data.owner_decl, - else => unreachable, - }; - fn_decl.markAlive(); - return dg.resolveLlvmFunction(fn_decl); - }, - .ErrorSet => { - const llvm_ty = try dg.llvmType(tv.ty); - switch (tv.val.tag()) { - .@"error" => { - const err_name = tv.val.castTag(.@"error").?.data.name; - const kv = try dg.module.getErrorValue(err_name); - return llvm_ty.constInt(kv.value, .False); - }, - else => { - // In this case we are rendering an error union which has a 0 bits payload. - return llvm_ty.constNull(); - }, - } - }, - .ErrorUnion => { - const error_type = tv.ty.errorUnionSet(); - const payload_type = tv.ty.errorUnionPayload(); - const is_pl = tv.val.errorUnionIsPayload(); + const line_number = decl.src_line + 1; + const is_internal_linkage = !dg.module.decl_exports.contains(decl); + const di_global = dib.createGlobalVariable( + di_file.toScope(), + decl.name, + global.getValueName(), + di_file, + line_number, + try dg.object.lowerDebugType(decl.ty, .full), + is_internal_linkage, + ); - if (!payload_type.hasRuntimeBitsIgnoreComptime()) { - // We use the error type directly as the type. - const err_val = if (!is_pl) tv.val else Value.initTag(.zero); - return dg.genTypedValue(.{ .ty = error_type, .val = err_val }); - } + try dg.object.di_map.put(dg.gpa, dg.decl, di_global.toNode()); + } + } + } - const fields: [2]*const llvm.Value = .{ - try dg.genTypedValue(.{ - .ty = error_type, - .val = if (is_pl) Value.initTag(.zero) else tv.val, - }), - try dg.genTypedValue(.{ - .ty = payload_type, - .val = if (tv.val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef), - }), - }; - return dg.context.constStruct(&fields, fields.len, .False); - }, - .Struct => { - const llvm_struct_ty = try dg.llvmType(tv.ty); - const field_vals = tv.val.castTag(.aggregate).?.data; - const gpa = dg.gpa; - const target = dg.module.getTarget(); + /// If the llvm function does not exist, create it. + /// Note that this can be called before the function's semantic analysis has + /// completed, so if any attributes rely on that, they must be done in updateFunc, not here. + fn resolveLlvmFunction(dg: *DeclGen, decl: *Module.Decl) !*const llvm.Value { + const gop = try dg.object.decl_map.getOrPut(dg.gpa, decl); + if (gop.found_existing) return gop.value_ptr.*; - if (tv.ty.isTupleOrAnonStruct()) { - const tuple = tv.ty.tupleFields(); - var llvm_fields: std.ArrayListUnmanaged(*const llvm.Value) = .{}; - defer llvm_fields.deinit(gpa); + assert(decl.has_tv); + const zig_fn_type = decl.ty; + const fn_info = zig_fn_type.fnInfo(); + const target = dg.module.getTarget(); + const sret = firstParamSRet(fn_info, target); - try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len); + const fn_type = try dg.llvmType(zig_fn_type); - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 0; - var need_unnamed = false; + const fqn = try decl.getFullyQualifiedName(dg.gpa); + defer dg.gpa.free(fqn); - for (tuple.types) |field_ty, i| { - if (tuple.values[i].tag() != .unreachable_value) continue; - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + const llvm_addrspace = dg.llvmAddressSpace(decl.@"addrspace"); + const llvm_fn = dg.llvmModule().addFunctionInAddressSpace(fqn, fn_type, llvm_addrspace); + gop.value_ptr.* = llvm_fn; - const field_align = field_ty.abiAlignment(target); - big_align = @maximum(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + const is_extern = decl.isExtern(); + if (!is_extern) { + llvm_fn.setLinkage(.Internal); + llvm_fn.setUnnamedAddr(.True); + } else if (dg.module.getTarget().isWasm()) { + dg.addFnAttrString(llvm_fn, "wasm-import-name", std.mem.sliceTo(decl.name, 0)); + if (decl.getExternFn().?.lib_name) |lib_name| { + const module_name = std.mem.sliceTo(lib_name, 0); + if (!std.mem.eql(u8, module_name, "c")) { + dg.addFnAttrString(llvm_fn, "wasm-import-module", module_name); + } + } + } - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - // TODO make this and all other padding elsewhere in debug - // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } + if (sret) { + dg.addArgAttr(llvm_fn, 0, "nonnull"); // Sret pointers must not be address 0 + dg.addArgAttr(llvm_fn, 0, "noalias"); - const field_llvm_val = try dg.genTypedValue(.{ - .ty = field_ty, - .val = field_vals[i], - }); + const raw_llvm_ret_ty = try dg.llvmType(fn_info.return_type); + llvm_fn.addSretAttr(0, raw_llvm_ret_ty); + } - need_unnamed = need_unnamed or dg.isUnnamedType(field_ty, field_llvm_val); - - llvm_fields.appendAssumeCapacity(field_llvm_val); + // Set parameter attributes. + var llvm_param_i: c_uint = @boolToInt(sret); + for (fn_info.param_types) |param_ty| { + if (!param_ty.hasRuntimeBitsIgnoreComptime()) continue; - offset += field_ty.abiSize(target); - } - { - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } - } + if (isByRef(param_ty)) { + dg.addArgAttr(llvm_fn, llvm_param_i, "nonnull"); + // TODO readonly, noalias, align + } + llvm_param_i += 1; + } - if (need_unnamed) { - return dg.context.constStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - .False, - ); - } else { - return llvm_struct_ty.constNamedStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - ); - } - } + // TODO: more attributes. see codegen.cpp `make_fn_llvm_value`. + if (fn_info.cc == .Naked) { + dg.addFnAttr(llvm_fn, "naked"); + } else { + llvm_fn.setFunctionCallConv(toLlvmCallConv(fn_info.cc, target)); + } - const struct_obj = tv.ty.castTag(.@"struct").?.data; + if (fn_info.alignment != 0) { + llvm_fn.setAlignment(fn_info.alignment); + } - if (struct_obj.layout == .Packed) { - const big_bits = struct_obj.packedIntegerBits(target); - const int_llvm_ty = dg.context.intType(big_bits); - const fields = struct_obj.fields.values(); - comptime assert(Type.packed_struct_layout_version == 2); - var running_int: *const llvm.Value = int_llvm_ty.constNull(); - var running_bits: u16 = 0; - for (field_vals) |field_val, i| { - const field = fields[i]; - if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; + // Function attributes that are independent of analysis results of the function body. + dg.addCommonFnAttributes(llvm_fn); - const non_int_val = try dg.genTypedValue(.{ - .ty = field.ty, - .val = field_val, - }); - const ty_bit_size = @intCast(u16, field.ty.bitSize(target)); - const small_int_ty = dg.context.intType(ty_bit_size); - const small_int_val = non_int_val.constBitCast(small_int_ty); - const shift_rhs = int_llvm_ty.constInt(running_bits, .False); - // If the field is as large as the entire packed struct, this - // zext would go from, e.g. i16 to i16. This is legal with - // constZExtOrBitCast but not legal with constZExt. - const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty); - const shifted = extended_int_val.constShl(shift_rhs); - running_int = running_int.constOr(shifted); - running_bits += ty_bit_size; - } - return running_int; - } + if (fn_info.return_type.isNoReturn()) { + dg.addFnAttr(llvm_fn, "noreturn"); + } - const llvm_field_count = llvm_struct_ty.countStructElementTypes(); - var llvm_fields = try std.ArrayListUnmanaged(*const llvm.Value).initCapacity(gpa, llvm_field_count); - defer llvm_fields.deinit(gpa); + return llvm_fn; + } - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 0; - var need_unnamed = false; + fn addCommonFnAttributes(dg: *DeclGen, llvm_fn: *const llvm.Value) void { + if (!dg.module.comp.bin_file.options.red_zone) { + dg.addFnAttr(llvm_fn, "noredzone"); + } + if (dg.module.comp.bin_file.options.omit_frame_pointer) { + dg.addFnAttrString(llvm_fn, "frame-pointer", "none"); + } else { + dg.addFnAttrString(llvm_fn, "frame-pointer", "all"); + } + dg.addFnAttr(llvm_fn, "nounwind"); + if (dg.module.comp.unwind_tables) { + dg.addFnAttr(llvm_fn, "uwtable"); + } + if (dg.module.comp.bin_file.options.skip_linker_dependencies) { + // The intent here is for compiler-rt and libc functions to not generate + // infinite recursion. For example, if we are compiling the memcpy function, + // and llvm detects that the body is equivalent to memcpy, it may replace the + // body of memcpy with a call to memcpy, which would then cause a stack + // overflow instead of performing memcpy. + dg.addFnAttr(llvm_fn, "nobuiltin"); + } + if (dg.module.comp.bin_file.options.optimize_mode == .ReleaseSmall) { + dg.addFnAttr(llvm_fn, "minsize"); + dg.addFnAttr(llvm_fn, "optsize"); + } + if (dg.module.comp.bin_file.options.tsan) { + dg.addFnAttr(llvm_fn, "sanitize_thread"); + } + // TODO add target-cpu and target-features fn attributes + } - for (struct_obj.fields.values()) |field, i| { - if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime()) continue; + fn resolveGlobalDecl(dg: *DeclGen, decl: *Module.Decl) Error!*const llvm.Value { + const gop = try dg.object.decl_map.getOrPut(dg.gpa, decl); + if (gop.found_existing) return gop.value_ptr.*; + errdefer assert(dg.object.decl_map.remove(decl)); - const field_align = field.normalAlignment(target); - big_align = @maximum(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + const fqn = try decl.getFullyQualifiedName(dg.gpa); + defer dg.gpa.free(fqn); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - // TODO make this and all other padding elsewhere in debug - // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } + const llvm_type = try dg.llvmType(decl.ty); + const llvm_addrspace = dg.llvmAddressSpace(decl.@"addrspace"); + const llvm_global = dg.object.llvm_module.addGlobalInAddressSpace(llvm_type, fqn, llvm_addrspace); + gop.value_ptr.* = llvm_global; - const field_llvm_val = try dg.genTypedValue(.{ - .ty = field.ty, - .val = field_vals[i], - }); + // This is needed for declarations created by `@extern`. + if (decl.isExtern()) { + llvm_global.setValueName(decl.name); + llvm_global.setUnnamedAddr(.False); + llvm_global.setLinkage(.External); + if (decl.val.castTag(.variable)) |variable| { + const single_threaded = dg.module.comp.bin_file.options.single_threaded; + if (variable.data.is_threadlocal and !single_threaded) { + llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); + } else { + llvm_global.setThreadLocalMode(.NotThreadLocal); + } + if (variable.data.is_weak_linkage) llvm_global.setLinkage(.ExternalWeak); + } + } else { + llvm_global.setLinkage(.Internal); + llvm_global.setUnnamedAddr(.True); + } - need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val); + return llvm_global; + } - llvm_fields.appendAssumeCapacity(field_llvm_val); + fn llvmAddressSpace(self: DeclGen, address_space: std.builtin.AddressSpace) c_uint { + const target = self.module.getTarget(); + return switch (target.cpu.arch) { + .i386, .x86_64 => switch (address_space) { + .generic => llvm.address_space.default, + .gs => llvm.address_space.x86.gs, + .fs => llvm.address_space.x86.fs, + .ss => llvm.address_space.x86.ss, + else => unreachable, + }, + .nvptx, .nvptx64 => switch (address_space) { + .generic => llvm.address_space.default, + .global => llvm.address_space.nvptx.global, + .constant => llvm.address_space.nvptx.constant, + .param => llvm.address_space.nvptx.param, + .shared => llvm.address_space.nvptx.shared, + .local => llvm.address_space.nvptx.local, + else => unreachable, + }, + else => switch (address_space) { + .generic => llvm.address_space.default, + else => unreachable, + }, + }; + } - offset += field.ty.abiSize(target); - } - { - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } - } + fn isUnnamedType(dg: *DeclGen, ty: Type, val: *const llvm.Value) bool { + // Once `llvmType` succeeds, successive calls to it with the same Zig type + // are guaranteed to succeed. So if a call to `llvmType` fails here it means + // it is the first time lowering the type, which means the value can't possible + // have that type. + const llvm_ty = dg.llvmType(ty) catch return true; + return val.typeOf() != llvm_ty; + } - if (need_unnamed) { - return dg.context.constStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - .False, - ); - } else { - return llvm_struct_ty.constNamedStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - ); - } + fn llvmType(dg: *DeclGen, t: Type) Allocator.Error!*const llvm.Type { + const gpa = dg.gpa; + const target = dg.module.getTarget(); + switch (t.zigTypeTag()) { + .Void, .NoReturn => return dg.context.voidType(), + .Int => { + const info = t.intInfo(target); + assert(info.bits != 0); + return dg.context.intType(info.bits); }, - .Union => { - const llvm_union_ty = try dg.llvmType(tv.ty); - const tag_and_val = tv.val.castTag(.@"union").?.data; + .Enum => { + var buffer: Type.Payload.Bits = undefined; + const int_ty = t.intTagType(&buffer); + const bit_count = int_ty.intInfo(target).bits; + assert(bit_count != 0); + return dg.context.intType(bit_count); + }, + .Float => switch (t.floatBits(target)) { + 16 => return dg.context.halfType(), + 32 => return dg.context.floatType(), + 64 => return dg.context.doubleType(), + 80 => return if (backendSupportsF80(target)) dg.context.x86FP80Type() else dg.context.intType(80), + 128 => return dg.context.fp128Type(), + else => unreachable, + }, + .Bool => return dg.context.intType(1), + .Pointer => { + if (t.isSlice()) { + var buf: Type.SlicePtrFieldTypeBuffer = undefined; + const ptr_type = t.slicePtrFieldType(&buf); - const target = dg.module.getTarget(); - const layout = tv.ty.unionGetLayout(target); - - if (layout.payload_size == 0) { - return genTypedValue(dg, .{ - .ty = tv.ty.unionTagType().?, - .val = tag_and_val.tag, - }); - } - const union_obj = tv.ty.cast(Type.Payload.Union).?.data; - const field_index = union_obj.tag_ty.enumTagFieldIndex(tag_and_val.tag).?; - assert(union_obj.haveFieldTypes()); - const field_ty = union_obj.fields.values()[field_index].ty; - const payload = p: { - if (!field_ty.hasRuntimeBitsIgnoreComptime()) { - const padding_len = @intCast(c_uint, layout.payload_size); - break :p dg.context.intType(8).arrayType(padding_len).getUndef(); - } - const field = try genTypedValue(dg, .{ .ty = field_ty, .val = tag_and_val.val }); - const field_size = field_ty.abiSize(target); - if (field_size == layout.payload_size) { - break :p field; - } - const padding_len = @intCast(c_uint, layout.payload_size - field_size); - const fields: [2]*const llvm.Value = .{ - field, dg.context.intType(8).arrayType(padding_len).getUndef(), + const fields: [2]*const llvm.Type = .{ + try dg.llvmType(ptr_type), + try dg.llvmType(Type.usize), }; - break :p dg.context.constStruct(&fields, fields.len, .True); - }; - - // In this case we must make an unnamed struct because LLVM does - // not support bitcasting our payload struct to the true union payload type. - // Instead we use an unnamed struct and every reference to the global - // must pointer cast to the expected type before accessing the union. - const need_unnamed = layout.most_aligned_field != field_index; - - if (layout.tag_size == 0) { - const fields: [1]*const llvm.Value = .{payload}; - if (need_unnamed) { - return dg.context.constStruct(&fields, fields.len, .False); - } else { - return llvm_union_ty.constNamedStruct(&fields, fields.len); - } - } - const llvm_tag_value = try genTypedValue(dg, .{ - .ty = tv.ty.unionTagType().?, - .val = tag_and_val.tag, - }); - var fields: [3]*const llvm.Value = undefined; - var fields_len: c_uint = 2; - if (layout.tag_align >= layout.payload_align) { - fields = .{ llvm_tag_value, payload, undefined }; - } else { - fields = .{ payload, llvm_tag_value, undefined }; - } - if (layout.padding != 0) { - fields[2] = dg.context.intType(8).arrayType(layout.padding).getUndef(); - fields_len = 3; + return dg.context.structType(&fields, fields.len, .False); } - if (need_unnamed) { - return dg.context.constStruct(&fields, fields_len, .False); - } else { - return llvm_union_ty.constNamedStruct(&fields, fields_len); + const ptr_info = t.ptrInfo().data; + const llvm_addrspace = dg.llvmAddressSpace(ptr_info.@"addrspace"); + if (ptr_info.host_size != 0) { + return dg.context.intType(ptr_info.host_size * 8).pointerType(llvm_addrspace); } + const elem_ty = ptr_info.pointee_type; + const lower_elem_ty = switch (elem_ty.zigTypeTag()) { + .Opaque, .Fn => true, + .Array => elem_ty.childType().hasRuntimeBitsIgnoreComptime(), + else => elem_ty.hasRuntimeBitsIgnoreComptime(), + }; + const llvm_elem_ty = if (lower_elem_ty) + try dg.llvmType(elem_ty) + else + dg.context.intType(8); + return llvm_elem_ty.pointerType(llvm_addrspace); }, - .Vector => switch (tv.val.tag()) { - .bytes => { - // Note, sentinel is not stored even if the type has a sentinel. - const bytes = tv.val.castTag(.bytes).?.data; - const vector_len = @intCast(usize, tv.ty.arrayLen()); - assert(vector_len == bytes.len or vector_len + 1 == bytes.len); + .Opaque => switch (t.tag()) { + .@"opaque" => { + const gop = try dg.object.type_map.getOrPut(gpa, t); + if (gop.found_existing) return gop.value_ptr.*; - const elem_ty = tv.ty.elemType(); - const llvm_elems = try dg.gpa.alloc(*const llvm.Value, vector_len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems) |*elem, i| { - var byte_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = bytes[i], - }; + // The Type memory is ephemeral; since we want to store a longer-lived + // reference, we need to copy it here. + gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); - elem.* = try dg.genTypedValue(.{ - .ty = elem_ty, - .val = Value.initPayload(&byte_payload.base), - }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - .aggregate => { - // Note, sentinel is not stored even if the type has a sentinel. - // The value includes the sentinel in those cases. - const elem_vals = tv.val.castTag(.aggregate).?.data; - const vector_len = @intCast(usize, tv.ty.arrayLen()); - assert(vector_len == elem_vals.len or vector_len + 1 == elem_vals.len); - const elem_ty = tv.ty.elemType(); - const llvm_elems = try dg.gpa.alloc(*const llvm.Value, vector_len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems) |*elem, i| { - elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = elem_vals[i] }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - .repeated => { - // Note, sentinel is not stored even if the type has a sentinel. - const val = tv.val.castTag(.repeated).?.data; - const elem_ty = tv.ty.elemType(); - const len = @intCast(usize, tv.ty.arrayLen()); - const llvm_elems = try dg.gpa.alloc(*const llvm.Value, len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems) |*elem| { - elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = val }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); + const opaque_obj = t.castTag(.@"opaque").?.data; + const name = try opaque_obj.getFullyQualifiedName(gpa); + defer gpa.free(name); + + const llvm_struct_ty = dg.context.structCreateNamed(name); + gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls + return llvm_struct_ty; }, + .anyopaque => return dg.context.intType(8), else => unreachable, }, + .Array => { + const elem_ty = t.childType(); + assert(elem_ty.onePossibleValue() == null); + const elem_llvm_ty = try dg.llvmType(elem_ty); + const total_len = t.arrayLen() + @boolToInt(t.sentinel() != null); + return elem_llvm_ty.arrayType(@intCast(c_uint, total_len)); + }, + .Vector => { + const elem_type = try dg.llvmType(t.childType()); + return elem_type.vectorType(t.vectorLen()); + }, + .Optional => { + var buf: Type.Payload.ElemType = undefined; + const child_ty = t.optionalChild(&buf); + if (!child_ty.hasRuntimeBitsIgnoreComptime()) { + return dg.context.intType(1); + } + const payload_llvm_ty = try dg.llvmType(child_ty); + if (t.isPtrLikeOptional()) { + return payload_llvm_ty; + } - .ComptimeInt => unreachable, - .ComptimeFloat => unreachable, - .Type => unreachable, - .EnumLiteral => unreachable, - .Void => unreachable, - .NoReturn => unreachable, - .Undefined => unreachable, - .Null => unreachable, - .BoundFn => unreachable, - .Opaque => unreachable, - - .Frame, - .AnyFrame, - => return dg.todo("implement const of type '{}'", .{tv.ty}), - } - } + const fields: [2]*const llvm.Type = .{ + payload_llvm_ty, dg.context.intType(1), + }; + return dg.context.structType(&fields, fields.len, .False); + }, + .ErrorUnion => { + const error_type = t.errorUnionSet(); + const payload_type = t.errorUnionPayload(); + const llvm_error_type = try dg.llvmType(error_type); + if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + return llvm_error_type; + } + const llvm_payload_type = try dg.llvmType(payload_type); - fn lowerDebugType(dg: *DeclGen, ty: Type) Allocator.Error!*llvm.DIType { - const gpa = dg.gpa; - // Be careful not to reference this `gop` variable after any recursive calls - // to `lowerDebugType`. - const gop = try dg.object.di_type_map.getOrPut(gpa, ty); - if (gop.found_existing) return gop.value_ptr.*; - errdefer assert(dg.object.di_type_map.remove(ty)); - // The Type memory is ephemeral; since we want to store a longer-lived - // reference, we need to copy it here. - gop.key_ptr.* = try ty.copy(dg.object.type_map_arena.allocator()); - const target = dg.module.getTarget(); - const dib = dg.object.di_builder.?; - switch (ty.zigTypeTag()) { - .Void, .NoReturn => { - gop.value_ptr.* = dib.createBasicType("void", 0, DW.ATE.signed); - return gop.value_ptr.*; + const fields: [2]*const llvm.Type = .{ llvm_error_type, llvm_payload_type }; + return dg.context.structType(&fields, fields.len, .False); }, - .Int => { - const info = ty.intInfo(target); - assert(info.bits != 0); - const name = try ty.nameAlloc(gpa); - defer gpa.free(name); - const dwarf_encoding: c_uint = switch (info.signedness) { - .signed => DW.ATE.signed, - .unsigned => DW.ATE.unsigned, - }; - gop.value_ptr.* = dib.createBasicType(name, info.bits, dwarf_encoding); - return gop.value_ptr.*; + .ErrorSet => { + return dg.context.intType(16); }, - .Enum => { - const owner_decl = ty.getOwnerDecl(); + .Struct => { + const gop = try dg.object.type_map.getOrPut(gpa, t); + if (gop.found_existing) return gop.value_ptr.*; - if (!ty.hasRuntimeBitsIgnoreComptime()) { - const enum_di_ty = try dg.makeEmptyNamespaceDIType(owner_decl); - // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` - // means we can't use `gop` anymore. - try dg.object.di_type_map.put(gpa, ty, enum_di_ty); - return enum_di_ty; - } + // The Type memory is ephemeral; since we want to store a longer-lived + // reference, we need to copy it here. + gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); - const field_names = ty.enumFields().keys(); + if (t.isTupleOrAnonStruct()) { + const tuple = t.tupleFields(); + const llvm_struct_ty = dg.context.structCreateNamed(""); + gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls - const enumerators = try gpa.alloc(*llvm.DIEnumerator, field_names.len); - defer gpa.free(enumerators); + var llvm_field_types: std.ArrayListUnmanaged(*const llvm.Type) = .{}; + defer llvm_field_types.deinit(gpa); - var buf_field_index: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = undefined, - }; - const field_index_val = Value.initPayload(&buf_field_index.base); + try llvm_field_types.ensureUnusedCapacity(gpa, tuple.types.len); - for (field_names) |field_name, i| { - const field_name_z = try gpa.dupeZ(u8, field_name); - defer gpa.free(field_name_z); + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; - buf_field_index.data = @intCast(u32, i); - var buf_u64: Value.Payload.U64 = undefined; - const field_int_val = field_index_val.enumToInt(ty, &buf_u64); - // See https://github.com/ziglang/zig/issues/645 - const field_int = field_int_val.toSignedInt(); - enumerators[i] = dib.createEnumerator(field_name_z, field_int); + for (tuple.types) |field_ty, i| { + const field_val = tuple.values[i]; + if (field_val.tag() != .unreachable_value) continue; + + const field_align = field_ty.abiAlignment(target); + big_align = @maximum(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); + + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + try llvm_field_types.append(gpa, llvm_array_ty); + } + const field_llvm_ty = try dg.llvmType(field_ty); + try llvm_field_types.append(gpa, field_llvm_ty); + + offset += field_ty.abiSize(target); + } + { + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + try llvm_field_types.append(gpa, llvm_array_ty); + } + } + + llvm_struct_ty.structSetBody( + llvm_field_types.items.ptr, + @intCast(c_uint, llvm_field_types.items.len), + .False, + ); + + return llvm_struct_ty; } - const di_file = try dg.object.getDIFile(gpa, owner_decl.src_namespace.file_scope); - const di_scope = try dg.namespaceToDebugScope(owner_decl.src_namespace); + const struct_obj = t.castTag(.@"struct").?.data; - const name = try ty.nameAlloc(gpa); - defer gpa.free(name); - var buffer: Type.Payload.Bits = undefined; - const int_ty = ty.intTagType(&buffer); + if (struct_obj.layout == .Packed) { + var buf: Type.Payload.Bits = undefined; + const int_ty = struct_obj.packedIntegerType(target, &buf); + const int_llvm_ty = try dg.llvmType(int_ty); + gop.value_ptr.* = int_llvm_ty; + return int_llvm_ty; + } - const enum_di_ty = dib.createEnumerationType( - di_scope, - name, - di_file, - owner_decl.src_node + 1, - ty.abiSize(target) * 8, - ty.abiAlignment(target) * 8, - enumerators.ptr, - @intCast(c_int, enumerators.len), - try lowerDebugType(dg, int_ty), - "", - ); - // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try dg.object.di_type_map.put(gpa, ty, enum_di_ty); - return enum_di_ty; - }, - .Float => { - const bits = ty.floatBits(target); - const name = try ty.nameAlloc(gpa); + const name = try struct_obj.getFullyQualifiedName(gpa); defer gpa.free(name); - gop.value_ptr.* = dib.createBasicType(name, bits, DW.ATE.float); - return gop.value_ptr.*; - }, - .Bool => { - gop.value_ptr.* = dib.createBasicType("bool", 1, DW.ATE.boolean); - return gop.value_ptr.*; - }, - .Pointer => { - // Normalize everything that the debug info does not represent. - const ptr_info = ty.ptrInfo().data; - if (ptr_info.sentinel != null or - ptr_info.@"addrspace" != .generic or - ptr_info.bit_offset != 0 or - ptr_info.host_size != 0 or - ptr_info.@"allowzero" or - !ptr_info.mutable or - ptr_info.@"volatile" or - ptr_info.size == .Many or ptr_info.size == .C or - !ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime()) - { - var payload: Type.Payload.Pointer = .{ - .data = .{ - .pointee_type = ptr_info.pointee_type, - .sentinel = null, - .@"align" = ptr_info.@"align", - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = switch (ptr_info.size) { - .Many, .C, .One => .One, - .Slice => .Slice, - }, - }, - }; - if (!ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime()) { - payload.data.pointee_type = Type.anyopaque; - } - const bland_ptr_ty = Type.initPayload(&payload.base); - const ptr_di_ty = try dg.lowerDebugType(bland_ptr_ty); - // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try dg.object.di_type_map.put(gpa, ty, ptr_di_ty); - return ptr_di_ty; - } + const llvm_struct_ty = dg.context.structCreateNamed(name); + gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls - if (ty.isSlice()) { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&buf); - const len_ty = Type.usize; + assert(struct_obj.haveFieldTypes()); - const name = try ty.nameAlloc(gpa); - defer gpa.free(name); - const di_file: ?*llvm.DIFile = null; - const line = 0; - const compile_unit_scope = dg.object.di_compile_unit.?.toScope(); - const fwd_decl = dib.createReplaceableCompositeType( - DW.TAG.structure_type, - name.ptr, - compile_unit_scope, - di_file, - line, - ); - gop.value_ptr.* = fwd_decl; + var llvm_field_types: std.ArrayListUnmanaged(*const llvm.Type) = .{}; + defer llvm_field_types.deinit(gpa); - const ptr_size = ptr_ty.abiSize(target); - const ptr_align = ptr_ty.abiAlignment(target); - const len_size = len_ty.abiSize(target); - const len_align = len_ty.abiAlignment(target); + try llvm_field_types.ensureUnusedCapacity(gpa, struct_obj.fields.count()); - var offset: u64 = 0; - offset += ptr_size; - offset = std.mem.alignForwardGeneric(u64, offset, len_align); - const len_offset = offset; + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; - const fields: [2]*llvm.DIType = .{ - dib.createMemberType( - fwd_decl.toScope(), - "ptr", - di_file, - line, - ptr_size * 8, // size in bits - ptr_align * 8, // align in bits - 0, // offset in bits - 0, // flags - try dg.lowerDebugType(ptr_ty), - ), - dib.createMemberType( - fwd_decl.toScope(), - "len", - di_file, - line, - len_size * 8, // size in bits - len_align * 8, // align in bits - len_offset * 8, // offset in bits - 0, // flags - try dg.lowerDebugType(len_ty), - ), - }; + for (struct_obj.fields.values()) |field| { + if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime()) continue; - const replacement_di_ty = dib.createStructType( - compile_unit_scope, - name.ptr, - di_file, - line, - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits - 0, // flags - null, // derived from - &fields, - fields.len, - 0, // run time lang - null, // vtable holder - "", // unique id - ); - dib.replaceTemporary(fwd_decl, replacement_di_ty); - // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try dg.object.di_type_map.put(gpa, ty, replacement_di_ty); - return replacement_di_ty; + const field_align = field.normalAlignment(target); + big_align = @maximum(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); + + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + try llvm_field_types.append(gpa, llvm_array_ty); + } + const field_llvm_ty = try dg.llvmType(field.ty); + try llvm_field_types.append(gpa, field_llvm_ty); + + offset += field.ty.abiSize(target); + } + { + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + try llvm_field_types.append(gpa, llvm_array_ty); + } } - const elem_di_ty = try lowerDebugType(dg, ptr_info.pointee_type); - const name = try ty.nameAlloc(gpa); - defer gpa.free(name); - const ptr_di_ty = dib.createPointerType( - elem_di_ty, - target.cpu.arch.ptrBitWidth(), - ty.ptrAlignment(target) * 8, - name, + llvm_struct_ty.structSetBody( + llvm_field_types.items.ptr, + @intCast(c_uint, llvm_field_types.items.len), + .False, ); - // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try dg.object.di_type_map.put(gpa, ty, ptr_di_ty); - return ptr_di_ty; + + return llvm_struct_ty; }, - .Opaque => { - if (ty.tag() == .anyopaque) { - gop.value_ptr.* = dib.createBasicType("anyopaque", 0, DW.ATE.signed); - return gop.value_ptr.*; + .Union => { + const gop = try dg.object.type_map.getOrPut(gpa, t); + if (gop.found_existing) return gop.value_ptr.*; + + // The Type memory is ephemeral; since we want to store a longer-lived + // reference, we need to copy it here. + gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); + + const layout = t.unionGetLayout(target); + const union_obj = t.cast(Type.Payload.Union).?.data; + + if (layout.payload_size == 0) { + const enum_tag_llvm_ty = try dg.llvmType(union_obj.tag_ty); + gop.value_ptr.* = enum_tag_llvm_ty; + return enum_tag_llvm_ty; } - const name = try ty.nameAlloc(gpa); + + const name = try union_obj.getFullyQualifiedName(gpa); defer gpa.free(name); - const owner_decl = ty.getOwnerDecl(); - const opaque_di_ty = dib.createForwardDeclType( - DW.TAG.structure_type, - name, - try dg.namespaceToDebugScope(owner_decl.src_namespace), - try dg.object.getDIFile(gpa, owner_decl.src_namespace.file_scope), - owner_decl.src_node + 1, - ); - // The recursive call to `lowerDebugType` va `namespaceToDebugScope` - // means we can't use `gop` anymore. - try dg.object.di_type_map.put(gpa, ty, opaque_di_ty); - return opaque_di_ty; - }, - .Array => { - const array_di_ty = dib.createArrayType( - ty.abiSize(target) * 8, - ty.abiAlignment(target) * 8, - try lowerDebugType(dg, ty.childType()), - @intCast(c_int, ty.arrayLen()), - ); - // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try dg.object.di_type_map.put(gpa, ty, array_di_ty); - return array_di_ty; - }, - .Vector => { - const vector_di_ty = dib.createVectorType( - ty.abiSize(target) * 8, - ty.abiAlignment(target) * 8, - try lowerDebugType(dg, ty.childType()), - ty.vectorLen(), - ); - // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try dg.object.di_type_map.put(gpa, ty, vector_di_ty); - return vector_di_ty; + + const llvm_union_ty = dg.context.structCreateNamed(name); + gop.value_ptr.* = llvm_union_ty; // must be done before any recursive calls + + const aligned_field = union_obj.fields.values()[layout.most_aligned_field]; + const llvm_aligned_field_ty = try dg.llvmType(aligned_field.ty); + + const llvm_payload_ty = t: { + if (layout.most_aligned_field_size == layout.payload_size) { + break :t llvm_aligned_field_ty; + } + const padding_len = @intCast(c_uint, layout.payload_size - layout.most_aligned_field_size); + const fields: [2]*const llvm.Type = .{ + llvm_aligned_field_ty, + dg.context.intType(8).arrayType(padding_len), + }; + break :t dg.context.structType(&fields, fields.len, .True); + }; + + if (layout.tag_size == 0) { + var llvm_fields: [1]*const llvm.Type = .{llvm_payload_ty}; + llvm_union_ty.structSetBody(&llvm_fields, llvm_fields.len, .False); + return llvm_union_ty; + } + const enum_tag_llvm_ty = try dg.llvmType(union_obj.tag_ty); + + // Put the tag before or after the payload depending on which one's + // alignment is greater. + var llvm_fields: [3]*const llvm.Type = undefined; + var llvm_fields_len: c_uint = 2; + + if (layout.tag_align >= layout.payload_align) { + llvm_fields = .{ enum_tag_llvm_ty, llvm_payload_ty, undefined }; + } else { + llvm_fields = .{ llvm_payload_ty, enum_tag_llvm_ty, undefined }; + } + + // Insert padding to make the LLVM struct ABI size match the Zig union ABI size. + if (layout.padding != 0) { + llvm_fields[2] = dg.context.intType(8).arrayType(layout.padding); + llvm_fields_len = 3; + } + + llvm_union_ty.structSetBody(&llvm_fields, llvm_fields_len, .False); + return llvm_union_ty; }, - .Optional => { - const name = try ty.nameAlloc(gpa); - defer gpa.free(name); - var buf: Type.Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); - if (!child_ty.hasRuntimeBitsIgnoreComptime()) { - gop.value_ptr.* = dib.createBasicType(name, 1, DW.ATE.boolean); - return gop.value_ptr.*; + .Fn => { + const fn_info = t.fnInfo(); + const sret = firstParamSRet(fn_info, target); + const return_type = fn_info.return_type; + const llvm_sret_ty = if (return_type.hasRuntimeBitsIgnoreComptime()) + try dg.llvmType(return_type) + else + dg.context.voidType(); + const llvm_ret_ty = if (sret) dg.context.voidType() else llvm_sret_ty; + + var llvm_params = std.ArrayList(*const llvm.Type).init(dg.gpa); + defer llvm_params.deinit(); + + if (sret) { + try llvm_params.append(llvm_sret_ty.pointerType(0)); } - if (ty.isPtrLikeOptional()) { - const ptr_di_ty = try dg.lowerDebugType(child_ty); - // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try dg.object.di_type_map.put(gpa, ty, ptr_di_ty); - return ptr_di_ty; + + for (fn_info.param_types) |param_ty| { + if (!param_ty.hasRuntimeBitsIgnoreComptime()) continue; + + const raw_llvm_ty = try dg.llvmType(param_ty); + const actual_llvm_ty = if (!isByRef(param_ty)) raw_llvm_ty else raw_llvm_ty.pointerType(0); + try llvm_params.append(actual_llvm_ty); } - const di_file: ?*llvm.DIFile = null; - const line = 0; - const compile_unit_scope = dg.object.di_compile_unit.?.toScope(); - const fwd_decl = dib.createReplaceableCompositeType( - DW.TAG.structure_type, - name.ptr, - compile_unit_scope, - di_file, - line, + return llvm.functionType( + llvm_ret_ty, + llvm_params.items.ptr, + @intCast(c_uint, llvm_params.items.len), + llvm.Bool.fromBool(fn_info.is_var_args), ); - gop.value_ptr.* = fwd_decl; + }, + .ComptimeInt => unreachable, + .ComptimeFloat => unreachable, + .Type => unreachable, + .Undefined => unreachable, + .Null => unreachable, + .EnumLiteral => unreachable, - const non_null_ty = Type.bool; - const payload_size = child_ty.abiSize(target); - const payload_align = child_ty.abiAlignment(target); - const non_null_size = non_null_ty.abiSize(target); - const non_null_align = non_null_ty.abiAlignment(target); + .BoundFn => @panic("TODO remove BoundFn from the language"), - var offset: u64 = 0; - offset += payload_size; - offset = std.mem.alignForwardGeneric(u64, offset, non_null_align); - const non_null_offset = offset; + .Frame => @panic("TODO implement llvmType for Frame types"), + .AnyFrame => @panic("TODO implement llvmType for AnyFrame types"), + } + } - const fields: [2]*llvm.DIType = .{ - dib.createMemberType( - fwd_decl.toScope(), - "data", - di_file, - line, - payload_size * 8, // size in bits - payload_align * 8, // align in bits - 0, // offset in bits - 0, // flags - try dg.lowerDebugType(child_ty), - ), - dib.createMemberType( - fwd_decl.toScope(), - "some", - di_file, - line, - non_null_size * 8, // size in bits - non_null_align * 8, // align in bits - non_null_offset * 8, // offset in bits - 0, // flags - try dg.lowerDebugType(non_null_ty), - ), - }; + fn genTypedValue(dg: *DeclGen, tv: TypedValue) Error!*const llvm.Value { + if (tv.val.isUndef()) { + const llvm_type = try dg.llvmType(tv.ty); + return llvm_type.getUndef(); + } - const replacement_di_ty = dib.createStructType( - compile_unit_scope, - name.ptr, - di_file, - line, - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits - 0, // flags - null, // derived from - &fields, - fields.len, - 0, // run time lang - null, // vtable holder - "", // unique id - ); - dib.replaceTemporary(fwd_decl, replacement_di_ty); - // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try dg.object.di_type_map.put(gpa, ty, replacement_di_ty); - return replacement_di_ty; + switch (tv.ty.zigTypeTag()) { + .Bool => { + const llvm_type = try dg.llvmType(tv.ty); + return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(); }, - .ErrorUnion => { - const err_set_ty = ty.errorUnionSet(); - const payload_ty = ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - const err_set_di_ty = try dg.lowerDebugType(err_set_ty); - // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try dg.object.di_type_map.put(gpa, ty, err_set_di_ty); - return err_set_di_ty; - } - const name = try ty.nameAlloc(gpa); - defer gpa.free(name); - const di_file: ?*llvm.DIFile = null; - const line = 0; - const compile_unit_scope = dg.object.di_compile_unit.?.toScope(); - const fwd_decl = dib.createReplaceableCompositeType( - DW.TAG.structure_type, - name.ptr, - compile_unit_scope, - di_file, - line, - ); - gop.value_ptr.* = fwd_decl; + // TODO this duplicates code with Pointer but they should share the handling + // of the tv.val.tag() and then Int should do extra constPtrToInt on top + .Int => switch (tv.val.tag()) { + .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl), + .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), + else => { + var bigint_space: Value.BigIntSpace = undefined; + const bigint = tv.val.toBigInt(&bigint_space); + const target = dg.module.getTarget(); + const int_info = tv.ty.intInfo(target); + assert(int_info.bits != 0); + const llvm_type = dg.context.intType(int_info.bits); - const err_set_size = err_set_ty.abiSize(target); - const err_set_align = err_set_ty.abiAlignment(target); - const payload_size = payload_ty.abiSize(target); - const payload_align = payload_ty.abiAlignment(target); + const unsigned_val = v: { + if (bigint.limbs.len == 1) { + break :v llvm_type.constInt(bigint.limbs[0], .False); + } + if (@sizeOf(usize) == @sizeOf(u64)) { + break :v llvm_type.constIntOfArbitraryPrecision( + @intCast(c_uint, bigint.limbs.len), + bigint.limbs.ptr, + ); + } + @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); + }; + if (!bigint.positive) { + return llvm.constNeg(unsigned_val); + } + return unsigned_val; + }, + }, + .Enum => { + var int_buffer: Value.Payload.U64 = undefined; + const int_val = tv.enumToInt(&int_buffer); - var offset: u64 = 0; - offset += err_set_size; - offset = std.mem.alignForwardGeneric(u64, offset, payload_align); - const payload_offset = offset; + var bigint_space: Value.BigIntSpace = undefined; + const bigint = int_val.toBigInt(&bigint_space); - const fields: [2]*llvm.DIType = .{ - dib.createMemberType( - fwd_decl.toScope(), - "tag", - di_file, - line, - err_set_size * 8, // size in bits - err_set_align * 8, // align in bits - 0, // offset in bits - 0, // flags - try dg.lowerDebugType(err_set_ty), - ), - dib.createMemberType( - fwd_decl.toScope(), - "value", - di_file, - line, - payload_size * 8, // size in bits - payload_align * 8, // align in bits - payload_offset * 8, // offset in bits - 0, // flags - try dg.lowerDebugType(payload_ty), - ), + const target = dg.module.getTarget(); + const int_info = tv.ty.intInfo(target); + const llvm_type = dg.context.intType(int_info.bits); + + const unsigned_val = v: { + if (bigint.limbs.len == 1) { + break :v llvm_type.constInt(bigint.limbs[0], .False); + } + if (@sizeOf(usize) == @sizeOf(u64)) { + break :v llvm_type.constIntOfArbitraryPrecision( + @intCast(c_uint, bigint.limbs.len), + bigint.limbs.ptr, + ); + } + @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); }; + if (!bigint.positive) { + return llvm.constNeg(unsigned_val); + } + return unsigned_val; + }, + .Float => { + const llvm_ty = try dg.llvmType(tv.ty); + const target = dg.module.getTarget(); + switch (tv.ty.floatBits(target)) { + 16, 32, 64 => return llvm_ty.constReal(tv.val.toFloat(f64)), + 80 => { + const float = tv.val.toFloat(f80); + const repr = std.math.break_f80(float); + const llvm_i80 = dg.context.intType(80); + var x = llvm_i80.constInt(repr.exp, .False); + x = x.constShl(llvm_i80.constInt(64, .False)); + x = x.constOr(llvm_i80.constInt(repr.fraction, .False)); + if (backendSupportsF80(target)) { + return x.constBitCast(llvm_ty); + } else { + return x; + } + }, + 128 => { + var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128)); + // LLVM seems to require that the lower half of the f128 be placed first + // in the buffer. + if (native_endian == .Big) { + std.mem.swap(u64, &buf[0], &buf[1]); + } + const int = dg.context.intType(128).constIntOfArbitraryPrecision(buf.len, &buf); + return int.constBitCast(llvm_ty); + }, + else => unreachable, + } + }, + .Pointer => switch (tv.val.tag()) { + .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl), + .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), + .variable => { + const decl = tv.val.castTag(.variable).?.data.owner_decl; + decl.markAlive(); + const val = try dg.resolveGlobalDecl(decl); + const llvm_var_type = try dg.llvmType(tv.ty); + const llvm_addrspace = dg.llvmAddressSpace(decl.@"addrspace"); + const llvm_type = llvm_var_type.pointerType(llvm_addrspace); + return val.constBitCast(llvm_type); + }, + .slice => { + const slice = tv.val.castTag(.slice).?.data; + var buf: Type.SlicePtrFieldTypeBuffer = undefined; + const fields: [2]*const llvm.Value = .{ + try dg.genTypedValue(.{ + .ty = tv.ty.slicePtrFieldType(&buf), + .val = slice.ptr, + }), + try dg.genTypedValue(.{ + .ty = Type.usize, + .val = slice.len, + }), + }; + return dg.context.constStruct(&fields, fields.len, .False); + }, + .int_u64, .one, .int_big_positive => { + const llvm_usize = try dg.llvmType(Type.usize); + const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(), .False); + return llvm_int.constIntToPtr(try dg.llvmType(tv.ty)); + }, + .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => { + return dg.lowerParentPtr(tv.val, tv.ty.childType()); + }, + .null_value, .zero => { + const llvm_type = try dg.llvmType(tv.ty); + return llvm_type.constNull(); + }, + else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{ tv.ty, tag }), + }, + .Array => switch (tv.val.tag()) { + .bytes => { + const bytes = tv.val.castTag(.bytes).?.data; + return dg.context.constString( + bytes.ptr, + @intCast(c_uint, tv.ty.arrayLenIncludingSentinel()), + .True, // don't null terminate. bytes has the sentinel, if any. + ); + }, + .aggregate => { + const elem_vals = tv.val.castTag(.aggregate).?.data; + const elem_ty = tv.ty.elemType(); + const gpa = dg.gpa; + const len = @intCast(usize, tv.ty.arrayLenIncludingSentinel()); + const llvm_elems = try gpa.alloc(*const llvm.Value, len); + defer gpa.free(llvm_elems); + var need_unnamed = false; + for (elem_vals[0..len]) |elem_val, i| { + llvm_elems[i] = try dg.genTypedValue(.{ .ty = elem_ty, .val = elem_val }); + need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]); + } + if (need_unnamed) { + return dg.context.constStruct( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + .True, + ); + } else { + const llvm_elem_ty = try dg.llvmType(elem_ty); + return llvm_elem_ty.constArray( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + } + }, + .repeated => { + const val = tv.val.castTag(.repeated).?.data; + const elem_ty = tv.ty.elemType(); + const sentinel = tv.ty.sentinel(); + const len = @intCast(usize, tv.ty.arrayLen()); + const len_including_sent = len + @boolToInt(sentinel != null); + const gpa = dg.gpa; + const llvm_elems = try gpa.alloc(*const llvm.Value, len_including_sent); + defer gpa.free(llvm_elems); - const replacement_di_ty = dib.createStructType( - compile_unit_scope, - name.ptr, - di_file, - line, - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits - 0, // flags - null, // derived from - &fields, - fields.len, - 0, // run time lang - null, // vtable holder - "", // unique id - ); - dib.replaceTemporary(fwd_decl, replacement_di_ty); - // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try dg.object.di_type_map.put(gpa, ty, replacement_di_ty); - return replacement_di_ty; + var need_unnamed = false; + if (len != 0) { + for (llvm_elems[0..len]) |*elem| { + elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = val }); + } + need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]); + } + + if (sentinel) |sent| { + llvm_elems[len] = try dg.genTypedValue(.{ .ty = elem_ty, .val = sent }); + need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]); + } + + if (need_unnamed) { + return dg.context.constStruct( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + .True, + ); + } else { + const llvm_elem_ty = try dg.llvmType(elem_ty); + return llvm_elem_ty.constArray( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + } + }, + .empty_array_sentinel => { + const elem_ty = tv.ty.elemType(); + const sent_val = tv.ty.sentinel().?; + const sentinel = try dg.genTypedValue(.{ .ty = elem_ty, .val = sent_val }); + const llvm_elems: [1]*const llvm.Value = .{sentinel}; + const need_unnamed = dg.isUnnamedType(elem_ty, llvm_elems[0]); + if (need_unnamed) { + return dg.context.constStruct(&llvm_elems, llvm_elems.len, .True); + } else { + const llvm_elem_ty = try dg.llvmType(elem_ty); + return llvm_elem_ty.constArray(&llvm_elems, llvm_elems.len); + } + }, + else => unreachable, + }, + .Optional => { + var buf: Type.Payload.ElemType = undefined; + const payload_ty = tv.ty.optionalChild(&buf); + const llvm_i1 = dg.context.intType(1); + const is_pl = !tv.val.isNull(); + const non_null_bit = if (is_pl) llvm_i1.constAllOnes() else llvm_i1.constNull(); + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + return non_null_bit; + } + if (tv.ty.isPtrLikeOptional()) { + if (tv.val.castTag(.opt_payload)) |payload| { + return dg.genTypedValue(.{ .ty = payload_ty, .val = payload.data }); + } else if (is_pl) { + return dg.genTypedValue(.{ .ty = payload_ty, .val = tv.val }); + } else { + const llvm_ty = try dg.llvmType(tv.ty); + return llvm_ty.constNull(); + } + } + assert(payload_ty.zigTypeTag() != .Fn); + const fields: [2]*const llvm.Value = .{ + try dg.genTypedValue(.{ + .ty = payload_ty, + .val = if (tv.val.castTag(.opt_payload)) |pl| pl.data else Value.initTag(.undef), + }), + non_null_bit, + }; + return dg.context.constStruct(&fields, fields.len, .False); + }, + .Fn => { + const fn_decl = switch (tv.val.tag()) { + .extern_fn => tv.val.castTag(.extern_fn).?.data.owner_decl, + .function => tv.val.castTag(.function).?.data.owner_decl, + else => unreachable, + }; + fn_decl.markAlive(); + return dg.resolveLlvmFunction(fn_decl); }, .ErrorSet => { - // TODO make this a proper enum with all the error codes in it. - // will need to consider how to take incremental compilation into account. - gop.value_ptr.* = dib.createBasicType("anyerror", 16, DW.ATE.unsigned); - return gop.value_ptr.*; + const llvm_ty = try dg.llvmType(tv.ty); + switch (tv.val.tag()) { + .@"error" => { + const err_name = tv.val.castTag(.@"error").?.data.name; + const kv = try dg.module.getErrorValue(err_name); + return llvm_ty.constInt(kv.value, .False); + }, + else => { + // In this case we are rendering an error union which has a 0 bits payload. + return llvm_ty.constNull(); + }, + } }, - .Struct => { - const compile_unit_scope = dg.object.di_compile_unit.?.toScope(); - const name = try ty.nameAlloc(gpa); - defer gpa.free(name); - const fwd_decl = dib.createReplaceableCompositeType( - DW.TAG.structure_type, - name.ptr, - compile_unit_scope, - null, // file - 0, // line - ); - gop.value_ptr.* = fwd_decl; + .ErrorUnion => { + const error_type = tv.ty.errorUnionSet(); + const payload_type = tv.ty.errorUnionPayload(); + const is_pl = tv.val.errorUnionIsPayload(); - if (ty.isTupleOrAnonStruct()) { - const tuple = ty.tupleFields(); + if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + // We use the error type directly as the type. + const err_val = if (!is_pl) tv.val else Value.initTag(.zero); + return dg.genTypedValue(.{ .ty = error_type, .val = err_val }); + } - var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{}; - defer di_fields.deinit(gpa); + const fields: [2]*const llvm.Value = .{ + try dg.genTypedValue(.{ + .ty = error_type, + .val = if (is_pl) Value.initTag(.zero) else tv.val, + }), + try dg.genTypedValue(.{ + .ty = payload_type, + .val = if (tv.val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef), + }), + }; + return dg.context.constStruct(&fields, fields.len, .False); + }, + .Struct => { + const llvm_struct_ty = try dg.llvmType(tv.ty); + const field_vals = tv.val.castTag(.aggregate).?.data; + const gpa = dg.gpa; + const target = dg.module.getTarget(); - try di_fields.ensureUnusedCapacity(gpa, tuple.types.len); + if (tv.ty.isTupleOrAnonStruct()) { + const tuple = tv.ty.tupleFields(); + var llvm_fields: std.ArrayListUnmanaged(*const llvm.Value) = .{}; + defer llvm_fields.deinit(gpa); + + try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len); comptime assert(struct_layout_version == 2); var offset: u64 = 0; + var big_align: u32 = 0; + var need_unnamed = false; for (tuple.types) |field_ty, i| { - const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value) continue; + if (tuple.values[i].tag() != .unreachable_value) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; - const field_size = field_ty.abiSize(target); const field_align = field_ty.abiAlignment(target); - const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); - offset = field_offset + field_size; - - const field_name = if (ty.castTag(.anon_struct)) |payload| - try gpa.dupeZ(u8, payload.data.names[i]) - else - try std.fmt.allocPrintZ(gpa, "{d}", .{i}); - defer gpa.free(field_name); - - try di_fields.append(gpa, dib.createMemberType( - fwd_decl.toScope(), - field_name, - null, // file - 0, // line - field_size * 8, // size in bits - field_align * 8, // align in bits - field_offset * 8, // offset in bits - 0, // flags - try dg.lowerDebugType(field_ty), - )); - } - - const replacement_di_ty = dib.createStructType( - compile_unit_scope, - name.ptr, - null, // file - 0, // line - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits - 0, // flags - null, // derived from - di_fields.items.ptr, - @intCast(c_int, di_fields.items.len), - 0, // run time lang - null, // vtable holder - "", // unique id - ); - dib.replaceTemporary(fwd_decl, replacement_di_ty); - // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try dg.object.di_type_map.put(gpa, ty, replacement_di_ty); - return replacement_di_ty; - } - - const TODO_implement_this = true; // TODO - if (TODO_implement_this or !ty.hasRuntimeBitsIgnoreComptime()) { - const owner_decl = ty.getOwnerDecl(); - const struct_di_ty = try dg.makeEmptyNamespaceDIType(owner_decl); - dib.replaceTemporary(fwd_decl, struct_di_ty); - // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` - // means we can't use `gop` anymore. - try dg.object.di_type_map.put(gpa, ty, struct_di_ty); - return struct_di_ty; - } - @panic("TODO debug info type for struct"); - - //const struct_obj = ty.castTag(.@"struct").?.data; - - //if (struct_obj.layout == .Packed) { - // var buf: Type.Payload.Bits = undefined; - // const int_ty = struct_obj.packedIntegerType(target, &buf); - // const int_llvm_ty = try dg.llvmType(int_ty); - // gop.value_ptr.* = int_llvm_ty; - // return int_llvm_ty; - //} - - //const name = try struct_obj.getFullyQualifiedName(gpa); - //defer gpa.free(name); - - //const llvm_struct_ty = dg.context.structCreateNamed(name); - //gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls - - //assert(struct_obj.haveFieldTypes()); - - //var llvm_field_types: std.ArrayListUnmanaged(*const llvm.Type) = .{}; - //defer llvm_field_types.deinit(gpa); - - //try llvm_field_types.ensureUnusedCapacity(gpa, struct_obj.fields.count()); - - //comptime assert(struct_layout_version == 2); - //var offset: u64 = 0; - //var big_align: u32 = 0; - - //for (struct_obj.fields.values()) |field| { - // if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime()) continue; - - // const field_align = field.normalAlignment(target); - // big_align = @maximum(big_align, field_align); - // const prev_offset = offset; - // offset = std.mem.alignForwardGeneric(u64, offset, field_align); + big_align = @maximum(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); - // const padding_len = offset - prev_offset; - // if (padding_len > 0) { - // const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - // try llvm_field_types.append(gpa, llvm_array_ty); - // } - // const field_llvm_ty = try dg.llvmType(field.ty); - // try llvm_field_types.append(gpa, field_llvm_ty); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + // TODO make this and all other padding elsewhere in debug + // builds be 0xaa not undef. + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } - // offset += field.ty.abiSize(target); - //} - //{ - // const prev_offset = offset; - // offset = std.mem.alignForwardGeneric(u64, offset, big_align); - // const padding_len = offset - prev_offset; - // if (padding_len > 0) { - // const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - // try llvm_field_types.append(gpa, llvm_array_ty); - // } - //} + const field_llvm_val = try dg.genTypedValue(.{ + .ty = field_ty, + .val = field_vals[i], + }); - //llvm_struct_ty.structSetBody( - // llvm_field_types.items.ptr, - // @intCast(c_uint, llvm_field_types.items.len), - // .False, - //); + need_unnamed = need_unnamed or dg.isUnnamedType(field_ty, field_llvm_val); - //return llvm_struct_ty; - }, - .Union => { - const owner_decl = ty.getOwnerDecl(); + llvm_fields.appendAssumeCapacity(field_llvm_val); - const name = try ty.nameAlloc(gpa); - defer gpa.free(name); - const fwd_decl = dib.createReplaceableCompositeType( - DW.TAG.structure_type, - name.ptr, - dg.object.di_compile_unit.?.toScope(), - null, // file - 0, // line - ); - gop.value_ptr.* = fwd_decl; + offset += field_ty.abiSize(target); + } + { + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } + } - const TODO_implement_this = true; // TODO - if (TODO_implement_this or !ty.hasRuntimeBitsIgnoreComptime()) { - const union_di_ty = try dg.makeEmptyNamespaceDIType(owner_decl); - dib.replaceTemporary(fwd_decl, union_di_ty); - // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` - // means we can't use `gop` anymore. - try dg.object.di_type_map.put(gpa, ty, union_di_ty); - return union_di_ty; + if (need_unnamed) { + return dg.context.constStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + .False, + ); + } else { + return llvm_struct_ty.constNamedStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + ); + } } - @panic("TODO debug info type for union"); - //const gop = try dg.object.type_map.getOrPut(gpa, ty); - //if (gop.found_existing) return gop.value_ptr.*; + const struct_obj = tv.ty.castTag(.@"struct").?.data; - //// The Type memory is ephemeral; since we want to store a longer-lived - //// reference, we need to copy it here. - //gop.key_ptr.* = try ty.copy(dg.object.type_map_arena.allocator()); + if (struct_obj.layout == .Packed) { + const big_bits = struct_obj.packedIntegerBits(target); + const int_llvm_ty = dg.context.intType(big_bits); + const fields = struct_obj.fields.values(); + comptime assert(Type.packed_struct_layout_version == 2); + var running_int: *const llvm.Value = int_llvm_ty.constNull(); + var running_bits: u16 = 0; + for (field_vals) |field_val, i| { + const field = fields[i]; + if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; - //const layout = ty.unionGetLayout(target); - //const union_obj = ty.cast(Type.Payload.Union).?.data; + const non_int_val = try dg.genTypedValue(.{ + .ty = field.ty, + .val = field_val, + }); + const ty_bit_size = @intCast(u16, field.ty.bitSize(target)); + const small_int_ty = dg.context.intType(ty_bit_size); + const small_int_val = non_int_val.constBitCast(small_int_ty); + const shift_rhs = int_llvm_ty.constInt(running_bits, .False); + // If the field is as large as the entire packed struct, this + // zext would go from, e.g. i16 to i16. This is legal with + // constZExtOrBitCast but not legal with constZExt. + const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty); + const shifted = extended_int_val.constShl(shift_rhs); + running_int = running_int.constOr(shifted); + running_bits += ty_bit_size; + } + return running_int; + } - //if (layout.payload_size == 0) { - // const enum_tag_llvm_ty = try dg.llvmType(union_obj.tag_ty); - // gop.value_ptr.* = enum_tag_llvm_ty; - // return enum_tag_llvm_ty; - //} + const llvm_field_count = llvm_struct_ty.countStructElementTypes(); + var llvm_fields = try std.ArrayListUnmanaged(*const llvm.Value).initCapacity(gpa, llvm_field_count); + defer llvm_fields.deinit(gpa); - //const name = try union_obj.getFullyQualifiedName(gpa); - //defer gpa.free(name); + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; + var need_unnamed = false; - //const llvm_union_ty = dg.context.structCreateNamed(name); - //gop.value_ptr.* = llvm_union_ty; // must be done before any recursive calls + for (struct_obj.fields.values()) |field, i| { + if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime()) continue; - //const aligned_field = union_obj.fields.values()[layout.most_aligned_field]; - //const llvm_aligned_field_ty = try dg.llvmType(aligned_field.ty); + const field_align = field.normalAlignment(target); + big_align = @maximum(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); - //const llvm_payload_ty = ty: { - // if (layout.most_aligned_field_size == layout.payload_size) { - // break :ty llvm_aligned_field_ty; - // } - // const padding_len = @intCast(c_uint, layout.payload_size - layout.most_aligned_field_size); - // const fields: [2]*const llvm.Type = .{ - // llvm_aligned_field_ty, - // dg.context.intType(8).arrayType(padding_len), - // }; - // break :ty dg.context.structType(&fields, fields.len, .True); - //}; + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + // TODO make this and all other padding elsewhere in debug + // builds be 0xaa not undef. + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } - //if (layout.tag_size == 0) { - // var llvm_fields: [1]*const llvm.Type = .{llvm_payload_ty}; - // llvm_union_ty.structSetBody(&llvm_fields, llvm_fields.len, .False); - // return llvm_union_ty; - //} - //const enum_tag_llvm_ty = try dg.llvmType(union_obj.tag_ty); + const field_llvm_val = try dg.genTypedValue(.{ + .ty = field.ty, + .val = field_vals[i], + }); - //// Put the tag before or after the payload depending on which one's - //// alignment is greater. - //var llvm_fields: [3]*const llvm.Type = undefined; - //var llvm_fields_len: c_uint = 2; + need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val); - //if (layout.tag_align >= layout.payload_align) { - // llvm_fields = .{ enum_tag_llvm_ty, llvm_payload_ty, undefined }; - //} else { - // llvm_fields = .{ llvm_payload_ty, enum_tag_llvm_ty, undefined }; - //} + llvm_fields.appendAssumeCapacity(field_llvm_val); - //// Insert padding to make the LLVM struct ABI size match the Zig union ABI size. - //if (layout.padding != 0) { - // llvm_fields[2] = dg.context.intType(8).arrayType(layout.padding); - // llvm_fields_len = 3; - //} + offset += field.ty.abiSize(target); + } + { + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } + } - //llvm_union_ty.structSetBody(&llvm_fields, llvm_fields_len, .False); - //return llvm_union_ty; + if (need_unnamed) { + return dg.context.constStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + .False, + ); + } else { + return llvm_struct_ty.constNamedStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + ); + } }, - .Fn => { - const fn_info = ty.fnInfo(); - const sret = firstParamSRet(fn_info, target); - - var param_di_types = std.ArrayList(*llvm.DIType).init(dg.gpa); - defer param_di_types.deinit(); + .Union => { + const llvm_union_ty = try dg.llvmType(tv.ty); + const tag_and_val = tv.val.castTag(.@"union").?.data; - // Return type goes first. - const di_ret_ty = if (sret or !fn_info.return_type.hasRuntimeBitsIgnoreComptime()) - Type.void - else - fn_info.return_type; - try param_di_types.append(try dg.lowerDebugType(di_ret_ty)); + const target = dg.module.getTarget(); + const layout = tv.ty.unionGetLayout(target); - if (sret) { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = fn_info.return_type, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); - try param_di_types.append(try dg.lowerDebugType(ptr_ty)); + if (layout.payload_size == 0) { + return genTypedValue(dg, .{ + .ty = tv.ty.unionTagType().?, + .val = tag_and_val.tag, + }); } + const union_obj = tv.ty.cast(Type.Payload.Union).?.data; + const field_index = union_obj.tag_ty.enumTagFieldIndex(tag_and_val.tag).?; + assert(union_obj.haveFieldTypes()); + const field_ty = union_obj.fields.values()[field_index].ty; + const payload = p: { + if (!field_ty.hasRuntimeBitsIgnoreComptime()) { + const padding_len = @intCast(c_uint, layout.payload_size); + break :p dg.context.intType(8).arrayType(padding_len).getUndef(); + } + const field = try genTypedValue(dg, .{ .ty = field_ty, .val = tag_and_val.val }); + const field_size = field_ty.abiSize(target); + if (field_size == layout.payload_size) { + break :p field; + } + const padding_len = @intCast(c_uint, layout.payload_size - field_size); + const fields: [2]*const llvm.Value = .{ + field, dg.context.intType(8).arrayType(padding_len).getUndef(), + }; + break :p dg.context.constStruct(&fields, fields.len, .True); + }; - for (fn_info.param_types) |param_ty| { - if (!param_ty.hasRuntimeBitsIgnoreComptime()) continue; + // In this case we must make an unnamed struct because LLVM does + // not support bitcasting our payload struct to the true union payload type. + // Instead we use an unnamed struct and every reference to the global + // must pointer cast to the expected type before accessing the union. + const need_unnamed = layout.most_aligned_field != field_index; - if (isByRef(param_ty)) { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = param_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); - try param_di_types.append(try dg.lowerDebugType(ptr_ty)); + if (layout.tag_size == 0) { + const fields: [1]*const llvm.Value = .{payload}; + if (need_unnamed) { + return dg.context.constStruct(&fields, fields.len, .False); } else { - try param_di_types.append(try dg.lowerDebugType(param_ty)); + return llvm_union_ty.constNamedStruct(&fields, fields.len); } } + const llvm_tag_value = try genTypedValue(dg, .{ + .ty = tv.ty.unionTagType().?, + .val = tag_and_val.tag, + }); + var fields: [3]*const llvm.Value = undefined; + var fields_len: c_uint = 2; + if (layout.tag_align >= layout.payload_align) { + fields = .{ llvm_tag_value, payload, undefined }; + } else { + fields = .{ payload, llvm_tag_value, undefined }; + } + if (layout.padding != 0) { + fields[2] = dg.context.intType(8).arrayType(layout.padding).getUndef(); + fields_len = 3; + } + if (need_unnamed) { + return dg.context.constStruct(&fields, fields_len, .False); + } else { + return llvm_union_ty.constNamedStruct(&fields, fields_len); + } + }, + .Vector => switch (tv.val.tag()) { + .bytes => { + // Note, sentinel is not stored even if the type has a sentinel. + const bytes = tv.val.castTag(.bytes).?.data; + const vector_len = @intCast(usize, tv.ty.arrayLen()); + assert(vector_len == bytes.len or vector_len + 1 == bytes.len); - const fn_di_ty = dib.createSubroutineType( - param_di_types.items.ptr, - @intCast(c_int, param_di_types.items.len), - 0, - ); - // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try dg.object.di_type_map.put(gpa, ty, fn_di_ty); - return fn_di_ty; + const elem_ty = tv.ty.elemType(); + const llvm_elems = try dg.gpa.alloc(*const llvm.Value, vector_len); + defer dg.gpa.free(llvm_elems); + for (llvm_elems) |*elem, i| { + var byte_payload: Value.Payload.U64 = .{ + .base = .{ .tag = .int_u64 }, + .data = bytes[i], + }; + + elem.* = try dg.genTypedValue(.{ + .ty = elem_ty, + .val = Value.initPayload(&byte_payload.base), + }); + } + return llvm.constVector( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + }, + .aggregate => { + // Note, sentinel is not stored even if the type has a sentinel. + // The value includes the sentinel in those cases. + const elem_vals = tv.val.castTag(.aggregate).?.data; + const vector_len = @intCast(usize, tv.ty.arrayLen()); + assert(vector_len == elem_vals.len or vector_len + 1 == elem_vals.len); + const elem_ty = tv.ty.elemType(); + const llvm_elems = try dg.gpa.alloc(*const llvm.Value, vector_len); + defer dg.gpa.free(llvm_elems); + for (llvm_elems) |*elem, i| { + elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = elem_vals[i] }); + } + return llvm.constVector( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + }, + .repeated => { + // Note, sentinel is not stored even if the type has a sentinel. + const val = tv.val.castTag(.repeated).?.data; + const elem_ty = tv.ty.elemType(); + const len = @intCast(usize, tv.ty.arrayLen()); + const llvm_elems = try dg.gpa.alloc(*const llvm.Value, len); + defer dg.gpa.free(llvm_elems); + for (llvm_elems) |*elem| { + elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = val }); + } + return llvm.constVector( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + }, + else => unreachable, }, + .ComptimeInt => unreachable, .ComptimeFloat => unreachable, .Type => unreachable, + .EnumLiteral => unreachable, + .Void => unreachable, + .NoReturn => unreachable, .Undefined => unreachable, .Null => unreachable, - .EnumLiteral => unreachable, - - .BoundFn => @panic("TODO remove BoundFn from the language"), - - .Frame => @panic("TODO implement lowerDebugType for Frame types"), - .AnyFrame => @panic("TODO implement lowerDebugType for AnyFrame types"), - } - } + .BoundFn => unreachable, + .Opaque => unreachable, - fn namespaceToDebugScope(dg: *DeclGen, namespace: *const Module.Namespace) !*llvm.DIScope { - if (namespace.parent == null) { - const di_file = try dg.object.getDIFile(dg.gpa, namespace.file_scope); - return di_file.toScope(); + .Frame, + .AnyFrame, + => return dg.todo("implement const of type '{}'", .{tv.ty}), } - const di_type = try dg.lowerDebugType(namespace.ty); - return di_type.toScope(); - } - - /// This is to be used instead of void for debug info types, to avoid tripping - /// Assertion `!isa(Scope) && "shouldn't make a namespace scope for a type"' - /// when targeting CodeView (Windows). - fn makeEmptyNamespaceDIType(dg: *DeclGen, decl: *const Module.Decl) !*llvm.DIType { - const fields: [0]*llvm.DIType = .{}; - return dg.object.di_builder.?.createStructType( - try dg.namespaceToDebugScope(decl.src_namespace), - decl.name, // TODO use fully qualified name - try dg.object.getDIFile(dg.gpa, decl.src_namespace.file_scope), - decl.src_line + 1, - 0, // size in bits - 0, // align in bits - 0, // flags - null, // derived from - undefined, // TODO should be able to pass &fields, - fields.len, - 0, // run time lang - null, // vtable holder - "", // unique id - ); } const ParentPtr = struct { @@ -4085,7 +4193,7 @@ pub const FuncGen = struct { name.ptr, self.di_file.?, self.prev_dbg_line, - try self.dg.lowerDebugType(ptr_ty.childType()), + try self.dg.object.lowerDebugType(ptr_ty.childType(), .full), true, // always preserve 0, // flags ); @@ -4107,7 +4215,7 @@ pub const FuncGen = struct { name.ptr, self.di_file.?, self.prev_dbg_line, - try self.dg.lowerDebugType(operand_ty), + try self.dg.object.lowerDebugType(operand_ty, .full), true, // always preserve 0, // flags ); @@ -5359,7 +5467,7 @@ pub const FuncGen = struct { func.getParamName(src_index).ptr, // TODO test 0 bit args self.di_file.?, lbrace_line, - try self.dg.lowerDebugType(inst_ty), + try self.dg.object.lowerDebugType(inst_ty, .full), true, // always preserve 0, // flags self.arg_index, // includes +1 because 0 is return type @@ -7107,3 +7215,30 @@ fn backendSupportsF80(target: std.Target) bool { /// We can do this because for all types, Zig ABI alignment >= LLVM ABI /// alignment. const struct_layout_version = 2; + +/// We use the least significant bit of the pointer address to tell us +/// whether the type is fully resolved. Types that are only fwd declared +/// have the LSB flipped to a 1. +const AnnotatedDITypePtr = enum(usize) { + _, + + fn initFwd(di_type: *llvm.DIType) AnnotatedDITypePtr { + const addr = @ptrToInt(di_type); + assert(@truncate(u1, addr) == 0); + return @intToEnum(AnnotatedDITypePtr, addr | 1); + } + + fn initFull(di_type: *llvm.DIType) AnnotatedDITypePtr { + const addr = @ptrToInt(di_type); + return @intToEnum(AnnotatedDITypePtr, addr); + } + + fn toDIType(self: AnnotatedDITypePtr) *llvm.DIType { + const fixed_addr = @enumToInt(self) & ~@as(usize, 1); + return @intToPtr(*llvm.DIType, fixed_addr); + } + + fn isFwdOnly(self: AnnotatedDITypePtr) bool { + return @truncate(u1, @enumToInt(self)) != 0; + } +}; -- cgit v1.2.3