aboutsummaryrefslogtreecommitdiff
path: root/src/codegen/llvm.zig
diff options
context:
space:
mode:
Diffstat (limited to 'src/codegen/llvm.zig')
-rw-r--r--src/codegen/llvm.zig982
1 files changed, 956 insertions, 26 deletions
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index c837d9a00d..fb7daa80ec 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -5,12 +5,14 @@ const Allocator = std.mem.Allocator;
const log = std.log.scoped(.codegen);
const math = std.math;
const native_endian = builtin.cpu.arch.endian();
+const DW = std.dwarf;
const llvm = @import("llvm/bindings.zig");
const link = @import("../link.zig");
const Compilation = @import("../Compilation.zig");
const build_options = @import("build_options");
const Module = @import("../Module.zig");
+const Package = @import("../Package.zig");
const TypedValue = @import("../TypedValue.zig");
const Air = @import("../Air.zig");
const Liveness = @import("../Liveness.zig");
@@ -159,6 +161,12 @@ pub fn targetTriple(allocator: Allocator, target: std.Target) ![:0]u8 {
pub const Object = struct {
llvm_module: *const llvm.Module,
+ di_builder: ?*llvm.DIBuilder,
+ /// One of these mappings:
+ /// - *Module.File => *DIFile
+ /// - *Module.Decl => *DISubprogram
+ di_map: std.AutoHashMapUnmanaged(*const anyopaque, *llvm.DIScope),
+ di_compile_unit: ?*llvm.DICompileUnit,
context: *const llvm.Context,
target_machine: *const llvm.TargetMachine,
target_data: *const llvm.TargetData,
@@ -180,8 +188,10 @@ pub const Object = struct {
/// The backing memory for `type_map`. Periodically garbage collected after flush().
/// The code for doing the periodical GC is not yet implemented.
type_map_arena: std.heap.ArenaAllocator,
- /// The LLVM global table which holds the names corresponding to Zig errors. Note that the values
- /// are not added until flushModule, when all errors in the compilation are known.
+ di_type_map: DITypeMap,
+ /// The LLVM global table which holds the names corresponding to Zig errors.
+ /// Note that the values are not added until flushModule, when all errors in
+ /// the compilation are known.
error_name_table: ?*const llvm.Value,
pub const TypeMap = std.HashMapUnmanaged(
@@ -191,6 +201,13 @@ pub const Object = struct {
std.hash_map.default_max_load_percentage,
);
+ pub const DITypeMap = std.HashMapUnmanaged(
+ Type,
+ *llvm.DIType,
+ Type.HashContext64,
+ std.hash_map.default_max_load_percentage,
+ );
+
pub fn create(gpa: Allocator, options: link.Options) !*Object {
const obj = try gpa.create(Object);
errdefer gpa.destroy(obj);
@@ -204,9 +221,7 @@ pub const Object = struct {
initializeLLVMTarget(options.target.cpu.arch);
- const root_nameZ = try gpa.dupeZ(u8, options.root_name);
- defer gpa.free(root_nameZ);
- const llvm_module = llvm.Module.createWithName(root_nameZ.ptr, context);
+ const llvm_module = llvm.Module.createWithName(options.root_name.ptr, context);
errdefer llvm_module.dispose();
const llvm_target_triple = try targetTriple(gpa, options.target);
@@ -221,6 +236,60 @@ pub const Object = struct {
return error.InvalidLlvmTriple;
}
+ llvm_module.setTarget(llvm_target_triple.ptr);
+ var opt_di_builder: ?*llvm.DIBuilder = null;
+ errdefer if (opt_di_builder) |di_builder| di_builder.dispose();
+
+ var di_compile_unit: ?*llvm.DICompileUnit = null;
+
+ if (!options.strip) {
+ switch (options.object_format) {
+ .coff => llvm_module.addModuleCodeViewFlag(),
+ else => llvm_module.addModuleDebugInfoFlag(),
+ }
+ const di_builder = llvm_module.createDIBuilder(true);
+ opt_di_builder = di_builder;
+
+ // Don't use the version string here; LLVM misparses it when it
+ // includes the git revision.
+ const producer = try std.fmt.allocPrintZ(gpa, "zig {d}.{d}.{d}", .{
+ build_options.semver.major,
+ build_options.semver.minor,
+ build_options.semver.patch,
+ });
+ defer gpa.free(producer);
+
+ // For macOS stack traces, we want to avoid having to parse the compilation unit debug
+ // info. As long as each debug info file has a path independent of the compilation unit
+ // directory (DW_AT_comp_dir), then we never have to look at the compilation unit debug
+ // info. If we provide an absolute path to LLVM here for the compilation unit debug
+ // info, LLVM will emit DWARF info that depends on DW_AT_comp_dir. To avoid this, we
+ // pass "." for the compilation unit directory. This forces each debug file to have a
+ // directory rather than be relative to DW_AT_comp_dir. According to DWARF 5, debug
+ // files will no longer reference DW_AT_comp_dir, for the purpose of being able to
+ // support the common practice of stripping all but the line number sections from an
+ // executable.
+ const compile_unit_dir = d: {
+ if (options.target.isDarwin()) break :d ".";
+ const mod = options.module orelse break :d ".";
+ break :d mod.root_pkg.root_src_directory.path orelse ".";
+ };
+ const compile_unit_dir_z = try gpa.dupeZ(u8, compile_unit_dir);
+ defer gpa.free(compile_unit_dir_z);
+
+ di_compile_unit = di_builder.createCompileUnit(
+ DW.LANG.C99,
+ di_builder.createFile(options.root_name, compile_unit_dir_z),
+ producer,
+ options.optimize_mode != .Debug,
+ "", // flags
+ 0, // runtime version
+ "", // split name
+ 0, // dwo id
+ true, // emit debug info
+ );
+ }
+
const opt_level: llvm.CodeGenOptLevel = if (options.optimize_mode == .Debug)
.None
else
@@ -266,17 +335,26 @@ pub const Object = struct {
return Object{
.llvm_module = llvm_module,
+ .di_map = .{},
+ .di_builder = opt_di_builder,
+ .di_compile_unit = di_compile_unit,
.context = context,
.target_machine = target_machine,
.target_data = target_data,
.decl_map = .{},
.type_map = .{},
.type_map_arena = std.heap.ArenaAllocator.init(gpa),
+ .di_type_map = .{},
.error_name_table = null,
};
}
pub fn deinit(self: *Object, gpa: Allocator) void {
+ if (self.di_builder) |dib| {
+ dib.dispose();
+ self.di_map.deinit(gpa);
+ self.di_type_map.deinit(gpa);
+ }
self.target_data.dispose();
self.target_machine.dispose();
self.llvm_module.dispose();
@@ -357,6 +435,9 @@ pub const Object = struct {
pub fn flushModule(self: *Object, comp: *Compilation) !void {
try self.genErrorNameTable(comp);
+
+ if (self.di_builder) |dib| dib.finalize();
+
if (comp.verbose_llvm_ir) {
self.llvm_module.dump();
}
@@ -470,8 +551,9 @@ pub const Object = struct {
const target = dg.module.getTarget();
const sret = firstParamSRet(fn_info, target);
const ret_ptr = if (sret) llvm_func.getParam(0) else null;
+ const gpa = dg.gpa;
- var args = std.ArrayList(*const llvm.Value).init(dg.gpa);
+ var args = std.ArrayList(*const llvm.Value).init(gpa);
defer args.deinit();
const param_offset: c_uint = @boolToInt(ret_ptr != null);
@@ -482,8 +564,41 @@ pub const Object = struct {
try args.append(llvm_func.getParam(llvm_arg_i));
}
+ var di_file: ?*llvm.DIFile = null;
+ var di_scope: ?*llvm.DIScope = null;
+
+ if (dg.object.di_builder) |dib| {
+ di_file = try dg.object.getDIFile(gpa, decl.src_namespace.file_scope);
+
+ const line_number = decl.src_line + 1;
+ const is_internal_linkage = decl.val.tag() != .extern_fn and
+ !dg.module.decl_exports.contains(decl);
+ const noret_bit: c_uint = if (fn_info.return_type.isNoReturn())
+ llvm.DIFlags.NoReturn
+ else
+ 0;
+ const subprogram = dib.createFunction(
+ di_file.?.toScope(),
+ decl.name,
+ llvm_func.getValueName(),
+ di_file.?,
+ line_number,
+ try dg.lowerDebugType(decl.ty),
+ is_internal_linkage,
+ true, // is definition
+ line_number + func.lbrace_line, // scope line
+ llvm.DIFlags.StaticMember | noret_bit,
+ dg.module.comp.bin_file.options.optimize_mode != .Debug,
+ null, // decl_subprogram
+ );
+
+ llvm_func.fnSetSubprogram(subprogram);
+
+ di_scope = subprogram.toScope();
+ }
+
var fg: FuncGen = .{
- .gpa = dg.gpa,
+ .gpa = gpa,
.air = air,
.liveness = liveness,
.context = dg.context,
@@ -496,6 +611,8 @@ pub const Object = struct {
.llvm_func = llvm_func,
.blocks = .{},
.single_threaded = module.comp.bin_file.options.single_threaded,
+ .di_scope = di_scope,
+ .di_file = di_file,
};
defer fg.deinit();
@@ -599,6 +716,22 @@ pub const Object = struct {
const llvm_value = self.decl_map.get(decl) orelse return;
llvm_value.deleteGlobal();
}
+
+ fn getDIFile(o: *Object, gpa: Allocator, file: *const Module.File) !*llvm.DIFile {
+ const gop = try o.di_map.getOrPut(gpa, file);
+ errdefer assert(o.di_map.remove(file));
+ if (gop.found_existing) {
+ return @ptrCast(*llvm.DIFile, gop.value_ptr.*);
+ }
+ const dir_path = file.pkg.root_src_directory.path orelse ".";
+ const sub_file_path_z = try gpa.dupeZ(u8, file.sub_file_path);
+ defer gpa.free(sub_file_path_z);
+ const dir_path_z = try gpa.dupeZ(u8, dir_path);
+ defer gpa.free(dir_path_z);
+ const di_file = o.di_builder.?.createFile(sub_file_path_z, dir_path_z);
+ gop.value_ptr.* = di_file.toScope();
+ return di_file;
+ }
};
pub const DeclGen = struct {
@@ -942,15 +1075,13 @@ pub const DeclGen = struct {
},
.Optional => {
var buf: Type.Payload.ElemType = undefined;
- const child_type = t.optionalChild(&buf);
- if (!child_type.hasRuntimeBits()) {
+ const child_ty = t.optionalChild(&buf);
+ if (!child_ty.hasRuntimeBits()) {
return dg.context.intType(1);
}
- const payload_llvm_ty = try dg.llvmType(child_type);
+ const payload_llvm_ty = try dg.llvmType(child_ty);
if (t.isPtrLikeOptional()) {
return payload_llvm_ty;
- } else if (!child_type.hasRuntimeBits()) {
- return dg.context.intType(1);
}
const fields: [2]*const llvm.Type = .{
@@ -1771,6 +1902,768 @@ pub const DeclGen = struct {
}
}
+ fn lowerDebugType(dg: *DeclGen, ty: Type) Allocator.Error!*llvm.DIType {
+ const gpa = dg.gpa;
+ // Be careful not to reference this `gop` variable after any recursive calls
+ // to `lowerDebugType`.
+ const gop = try dg.object.di_type_map.getOrPut(gpa, ty);
+ if (gop.found_existing) return gop.value_ptr.*;
+ errdefer assert(dg.object.di_type_map.remove(ty));
+ // The Type memory is ephemeral; since we want to store a longer-lived
+ // reference, we need to copy it here.
+ gop.key_ptr.* = try ty.copy(dg.object.type_map_arena.allocator());
+ const target = dg.module.getTarget();
+ const dib = dg.object.di_builder.?;
+ switch (ty.zigTypeTag()) {
+ .Void, .NoReturn => {
+ gop.value_ptr.* = dib.createBasicType("void", 0, DW.ATE.signed);
+ return gop.value_ptr.*;
+ },
+ .Int => {
+ const info = ty.intInfo(target);
+ assert(info.bits != 0);
+ const name = try ty.nameAlloc(gpa);
+ defer gpa.free(name);
+ const dwarf_encoding: c_uint = switch (info.signedness) {
+ .signed => DW.ATE.signed,
+ .unsigned => DW.ATE.unsigned,
+ };
+ gop.value_ptr.* = dib.createBasicType(name, info.bits, dwarf_encoding);
+ return gop.value_ptr.*;
+ },
+ .Enum => {
+ const owner_decl = ty.getOwnerDecl();
+
+ if (!ty.hasRuntimeBits()) {
+ const enum_di_ty = try dg.makeEmptyNamespaceDIType(owner_decl);
+ // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
+ // means we can't use `gop` anymore.
+ try dg.object.di_type_map.put(gpa, ty, enum_di_ty);
+ return enum_di_ty;
+ }
+
+ const field_names = ty.enumFields().keys();
+
+ const enumerators = try gpa.alloc(*llvm.DIEnumerator, field_names.len);
+ defer gpa.free(enumerators);
+
+ var buf_field_index: Value.Payload.U32 = .{
+ .base = .{ .tag = .enum_field_index },
+ .data = undefined,
+ };
+ const field_index_val = Value.initPayload(&buf_field_index.base);
+
+ for (field_names) |field_name, i| {
+ const field_name_z = try gpa.dupeZ(u8, field_name);
+ defer gpa.free(field_name_z);
+
+ buf_field_index.data = @intCast(u32, i);
+ var buf_u64: Value.Payload.U64 = undefined;
+ const field_int_val = field_index_val.enumToInt(ty, &buf_u64);
+ // See https://github.com/ziglang/zig/issues/645
+ const field_int = field_int_val.toSignedInt();
+ enumerators[i] = dib.createEnumerator(field_name_z, field_int);
+ }
+
+ const di_file = try dg.object.getDIFile(gpa, owner_decl.src_namespace.file_scope);
+ const di_scope = try dg.namespaceToDebugScope(owner_decl.src_namespace);
+
+ const name = try ty.nameAlloc(gpa);
+ defer gpa.free(name);
+ var buffer: Type.Payload.Bits = undefined;
+ const int_ty = ty.intTagType(&buffer);
+
+ const enum_di_ty = dib.createEnumerationType(
+ di_scope,
+ name,
+ di_file,
+ owner_decl.src_node + 1,
+ ty.abiSize(target) * 8,
+ ty.abiAlignment(target) * 8,
+ enumerators.ptr,
+ @intCast(c_int, enumerators.len),
+ try lowerDebugType(dg, int_ty),
+ "",
+ );
+ // The recursive call to `lowerDebugType` means we can't use `gop` anymore.
+ try dg.object.di_type_map.put(gpa, ty, enum_di_ty);
+ return enum_di_ty;
+ },
+ .Float => {
+ const bits = ty.floatBits(target);
+ const name = try ty.nameAlloc(gpa);
+ defer gpa.free(name);
+ gop.value_ptr.* = dib.createBasicType(name, bits, DW.ATE.float);
+ return gop.value_ptr.*;
+ },
+ .Bool => {
+ gop.value_ptr.* = dib.createBasicType("bool", 1, DW.ATE.boolean);
+ return gop.value_ptr.*;
+ },
+ .Pointer => {
+ // Normalize everything that the debug info does not represent.
+ const ptr_info = ty.ptrInfo().data;
+
+ if (ptr_info.sentinel != null or
+ ptr_info.@"addrspace" != .generic or
+ ptr_info.bit_offset != 0 or
+ ptr_info.host_size != 0 or
+ ptr_info.@"allowzero" or
+ !ptr_info.mutable or
+ ptr_info.@"volatile" or
+ ptr_info.size == .Many or ptr_info.size == .C)
+ {
+ var payload: Type.Payload.Pointer = .{
+ .data = .{
+ .pointee_type = ptr_info.pointee_type,
+ .sentinel = null,
+ .@"align" = ptr_info.@"align",
+ .@"addrspace" = .generic,
+ .bit_offset = 0,
+ .host_size = 0,
+ .@"allowzero" = false,
+ .mutable = true,
+ .@"volatile" = false,
+ .size = switch (ptr_info.size) {
+ .Many, .C, .One => .One,
+ .Slice => .Slice,
+ },
+ },
+ };
+ const bland_ptr_ty = Type.initPayload(&payload.base);
+ const ptr_di_ty = try dg.lowerDebugType(bland_ptr_ty);
+ // The recursive call to `lowerDebugType` means we can't use `gop` anymore.
+ try dg.object.di_type_map.put(gpa, ty, ptr_di_ty);
+ return ptr_di_ty;
+ }
+
+ if (ty.isSlice()) {
+ var buf: Type.SlicePtrFieldTypeBuffer = undefined;
+ const ptr_ty = ty.slicePtrFieldType(&buf);
+ const len_ty = Type.usize;
+
+ const name = try ty.nameAlloc(gpa);
+ defer gpa.free(name);
+ const di_file: ?*llvm.DIFile = null;
+ const line = 0;
+ const compile_unit_scope = dg.object.di_compile_unit.?.toScope();
+ const fwd_decl = dib.createReplaceableCompositeType(
+ DW.TAG.structure_type,
+ name.ptr,
+ compile_unit_scope,
+ di_file,
+ line,
+ );
+ gop.value_ptr.* = fwd_decl;
+
+ const ptr_size = ptr_ty.abiSize(target);
+ const ptr_align = ptr_ty.abiAlignment(target);
+ const len_size = len_ty.abiSize(target);
+ const len_align = len_ty.abiAlignment(target);
+
+ var offset: u64 = 0;
+ offset += ptr_size;
+ offset = std.mem.alignForwardGeneric(u64, offset, len_align);
+ const len_offset = offset;
+
+ const fields: [2]*llvm.DIType = .{
+ dib.createMemberType(
+ fwd_decl.toScope(),
+ "ptr",
+ di_file,
+ line,
+ ptr_size * 8, // size in bits
+ ptr_align * 8, // align in bits
+ 0, // offset in bits
+ 0, // flags
+ try dg.lowerDebugType(ptr_ty),
+ ),
+ dib.createMemberType(
+ fwd_decl.toScope(),
+ "len",
+ di_file,
+ line,
+ len_size * 8, // size in bits
+ len_align * 8, // align in bits
+ len_offset * 8, // offset in bits
+ 0, // flags
+ try dg.lowerDebugType(len_ty),
+ ),
+ };
+
+ const replacement_di_ty = dib.createStructType(
+ compile_unit_scope,
+ name.ptr,
+ di_file,
+ line,
+ ty.abiSize(target) * 8, // size in bits
+ ty.abiAlignment(target) * 8, // align in bits
+ 0, // flags
+ null, // derived from
+ &fields,
+ fields.len,
+ 0, // run time lang
+ null, // vtable holder
+ "", // unique id
+ );
+ dib.replaceTemporary(fwd_decl, replacement_di_ty);
+ // The recursive call to `lowerDebugType` means we can't use `gop` anymore.
+ try dg.object.di_type_map.put(gpa, ty, replacement_di_ty);
+ return replacement_di_ty;
+ }
+
+ const elem_di_ty = try lowerDebugType(dg, ptr_info.pointee_type);
+ const name = try ty.nameAlloc(gpa);
+ defer gpa.free(name);
+ const ptr_di_ty = dib.createPointerType(
+ elem_di_ty,
+ target.cpu.arch.ptrBitWidth(),
+ ty.ptrAlignment(target) * 8,
+ name,
+ );
+ // The recursive call to `lowerDebugType` means we can't use `gop` anymore.
+ try dg.object.di_type_map.put(gpa, ty, ptr_di_ty);
+ return ptr_di_ty;
+ },
+ .Opaque => {
+ if (ty.tag() == .anyopaque) {
+ gop.value_ptr.* = dib.createBasicType("anyopaque", 0, DW.ATE.signed);
+ return gop.value_ptr.*;
+ }
+ const name = try ty.nameAlloc(gpa);
+ defer gpa.free(name);
+ const owner_decl = ty.getOwnerDecl();
+ const opaque_di_ty = dib.createForwardDeclType(
+ DW.TAG.structure_type,
+ name,
+ try dg.namespaceToDebugScope(owner_decl.src_namespace),
+ try dg.object.getDIFile(gpa, owner_decl.src_namespace.file_scope),
+ owner_decl.src_node + 1,
+ );
+ // The recursive call to `lowerDebugType` va `namespaceToDebugScope`
+ // means we can't use `gop` anymore.
+ try dg.object.di_type_map.put(gpa, ty, opaque_di_ty);
+ return opaque_di_ty;
+ },
+ .Array => {
+ const array_di_ty = dib.createArrayType(
+ ty.abiSize(target) * 8,
+ ty.abiAlignment(target) * 8,
+ try lowerDebugType(dg, ty.childType()),
+ @intCast(c_int, ty.arrayLen()),
+ );
+ // The recursive call to `lowerDebugType` means we can't use `gop` anymore.
+ try dg.object.di_type_map.put(gpa, ty, array_di_ty);
+ return array_di_ty;
+ },
+ .Vector => {
+ const vector_di_ty = dib.createVectorType(
+ ty.abiSize(target) * 8,
+ ty.abiAlignment(target) * 8,
+ try lowerDebugType(dg, ty.childType()),
+ ty.vectorLen(),
+ );
+ // The recursive call to `lowerDebugType` means we can't use `gop` anymore.
+ try dg.object.di_type_map.put(gpa, ty, vector_di_ty);
+ return vector_di_ty;
+ },
+ .Optional => {
+ const name = try ty.nameAlloc(gpa);
+ defer gpa.free(name);
+ var buf: Type.Payload.ElemType = undefined;
+ const child_ty = ty.optionalChild(&buf);
+ if (!child_ty.hasRuntimeBits()) {
+ gop.value_ptr.* = dib.createBasicType(name, 1, DW.ATE.boolean);
+ return gop.value_ptr.*;
+ }
+ if (ty.isPtrLikeOptional()) {
+ const ptr_di_ty = try dg.lowerDebugType(child_ty);
+ // The recursive call to `lowerDebugType` means we can't use `gop` anymore.
+ try dg.object.di_type_map.put(gpa, ty, ptr_di_ty);
+ return ptr_di_ty;
+ }
+
+ const di_file: ?*llvm.DIFile = null;
+ const line = 0;
+ const compile_unit_scope = dg.object.di_compile_unit.?.toScope();
+ const fwd_decl = dib.createReplaceableCompositeType(
+ DW.TAG.structure_type,
+ name.ptr,
+ compile_unit_scope,
+ di_file,
+ line,
+ );
+ gop.value_ptr.* = fwd_decl;
+
+ const non_null_ty = Type.bool;
+ const payload_size = child_ty.abiSize(target);
+ const payload_align = child_ty.abiAlignment(target);
+ const non_null_size = non_null_ty.abiSize(target);
+ const non_null_align = non_null_ty.abiAlignment(target);
+
+ var offset: u64 = 0;
+ offset += payload_size;
+ offset = std.mem.alignForwardGeneric(u64, offset, non_null_align);
+ const non_null_offset = offset;
+
+ const fields: [2]*llvm.DIType = .{
+ dib.createMemberType(
+ fwd_decl.toScope(),
+ "data",
+ di_file,
+ line,
+ payload_size * 8, // size in bits
+ payload_align * 8, // align in bits
+ 0, // offset in bits
+ 0, // flags
+ try dg.lowerDebugType(child_ty),
+ ),
+ dib.createMemberType(
+ fwd_decl.toScope(),
+ "some",
+ di_file,
+ line,
+ non_null_size * 8, // size in bits
+ non_null_align * 8, // align in bits
+ non_null_offset * 8, // offset in bits
+ 0, // flags
+ try dg.lowerDebugType(non_null_ty),
+ ),
+ };
+
+ const replacement_di_ty = dib.createStructType(
+ compile_unit_scope,
+ name.ptr,
+ di_file,
+ line,
+ ty.abiSize(target) * 8, // size in bits
+ ty.abiAlignment(target) * 8, // align in bits
+ 0, // flags
+ null, // derived from
+ &fields,
+ fields.len,
+ 0, // run time lang
+ null, // vtable holder
+ "", // unique id
+ );
+ dib.replaceTemporary(fwd_decl, replacement_di_ty);
+ // The recursive call to `lowerDebugType` means we can't use `gop` anymore.
+ try dg.object.di_type_map.put(gpa, ty, replacement_di_ty);
+ return replacement_di_ty;
+ },
+ .ErrorUnion => {
+ const err_set_ty = ty.errorUnionSet();
+ const payload_ty = ty.errorUnionPayload();
+ if (!payload_ty.hasRuntimeBits()) {
+ const err_set_di_ty = try dg.lowerDebugType(err_set_ty);
+ // The recursive call to `lowerDebugType` means we can't use `gop` anymore.
+ try dg.object.di_type_map.put(gpa, ty, err_set_di_ty);
+ return err_set_di_ty;
+ }
+ const name = try ty.nameAlloc(gpa);
+ defer gpa.free(name);
+ const di_file: ?*llvm.DIFile = null;
+ const line = 0;
+ const compile_unit_scope = dg.object.di_compile_unit.?.toScope();
+ const fwd_decl = dib.createReplaceableCompositeType(
+ DW.TAG.structure_type,
+ name.ptr,
+ compile_unit_scope,
+ di_file,
+ line,
+ );
+ gop.value_ptr.* = fwd_decl;
+
+ const err_set_size = err_set_ty.abiSize(target);
+ const err_set_align = err_set_ty.abiAlignment(target);
+ const payload_size = payload_ty.abiSize(target);
+ const payload_align = payload_ty.abiAlignment(target);
+
+ var offset: u64 = 0;
+ offset += err_set_size;
+ offset = std.mem.alignForwardGeneric(u64, offset, payload_align);
+ const payload_offset = offset;
+
+ const fields: [2]*llvm.DIType = .{
+ dib.createMemberType(
+ fwd_decl.toScope(),
+ "tag",
+ di_file,
+ line,
+ err_set_size * 8, // size in bits
+ err_set_align * 8, // align in bits
+ 0, // offset in bits
+ 0, // flags
+ try dg.lowerDebugType(err_set_ty),
+ ),
+ dib.createMemberType(
+ fwd_decl.toScope(),
+ "value",
+ di_file,
+ line,
+ payload_size * 8, // size in bits
+ payload_align * 8, // align in bits
+ payload_offset * 8, // offset in bits
+ 0, // flags
+ try dg.lowerDebugType(payload_ty),
+ ),
+ };
+
+ const replacement_di_ty = dib.createStructType(
+ compile_unit_scope,
+ name.ptr,
+ di_file,
+ line,
+ ty.abiSize(target) * 8, // size in bits
+ ty.abiAlignment(target) * 8, // align in bits
+ 0, // flags
+ null, // derived from
+ &fields,
+ fields.len,
+ 0, // run time lang
+ null, // vtable holder
+ "", // unique id
+ );
+ dib.replaceTemporary(fwd_decl, replacement_di_ty);
+ // The recursive call to `lowerDebugType` means we can't use `gop` anymore.
+ try dg.object.di_type_map.put(gpa, ty, replacement_di_ty);
+ return replacement_di_ty;
+ },
+ .ErrorSet => {
+ // TODO make this a proper enum with all the error codes in it.
+ // will need to consider how to take incremental compilation into account.
+ gop.value_ptr.* = dib.createBasicType("anyerror", 16, DW.ATE.unsigned);
+ return gop.value_ptr.*;
+ },
+ .Struct => {
+ const compile_unit_scope = dg.object.di_compile_unit.?.toScope();
+ const name = try ty.nameAlloc(gpa);
+ defer gpa.free(name);
+ const fwd_decl = dib.createReplaceableCompositeType(
+ DW.TAG.structure_type,
+ name.ptr,
+ compile_unit_scope,
+ null, // file
+ 0, // line
+ );
+ gop.value_ptr.* = fwd_decl;
+
+ if (ty.isTupleOrAnonStruct()) {
+ const tuple = ty.tupleFields();
+
+ var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{};
+ defer di_fields.deinit(gpa);
+
+ try di_fields.ensureUnusedCapacity(gpa, tuple.types.len);
+
+ comptime assert(struct_layout_version == 2);
+ var offset: u64 = 0;
+
+ for (tuple.types) |field_ty, i| {
+ const field_val = tuple.values[i];
+ if (field_val.tag() != .unreachable_value) continue;
+
+ const field_size = field_ty.abiSize(target);
+ const field_align = field_ty.abiAlignment(target);
+ const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align);
+ offset = field_offset + field_size;
+
+ const field_name = if (ty.castTag(.anon_struct)) |payload|
+ try gpa.dupeZ(u8, payload.data.names[i])
+ else
+ try std.fmt.allocPrintZ(gpa, "{d}", .{i});
+ defer gpa.free(field_name);
+
+ try di_fields.append(gpa, dib.createMemberType(
+ fwd_decl.toScope(),
+ field_name,
+ null, // file
+ 0, // line
+ field_size * 8, // size in bits
+ field_align * 8, // align in bits
+ field_offset * 8, // offset in bits
+ 0, // flags
+ try dg.lowerDebugType(field_ty),
+ ));
+ }
+
+ const replacement_di_ty = dib.createStructType(
+ compile_unit_scope,
+ name.ptr,
+ null, // file
+ 0, // line
+ ty.abiSize(target) * 8, // size in bits
+ ty.abiAlignment(target) * 8, // align in bits
+ 0, // flags
+ null, // derived from
+ di_fields.items.ptr,
+ @intCast(c_int, di_fields.items.len),
+ 0, // run time lang
+ null, // vtable holder
+ "", // unique id
+ );
+ dib.replaceTemporary(fwd_decl, replacement_di_ty);
+ // The recursive call to `lowerDebugType` means we can't use `gop` anymore.
+ try dg.object.di_type_map.put(gpa, ty, replacement_di_ty);
+ return replacement_di_ty;
+ }
+
+ const TODO_implement_this = true; // TODO
+ if (TODO_implement_this or !ty.hasRuntimeBits()) {
+ const owner_decl = ty.getOwnerDecl();
+ const struct_di_ty = try dg.makeEmptyNamespaceDIType(owner_decl);
+ dib.replaceTemporary(fwd_decl, struct_di_ty);
+ // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
+ // means we can't use `gop` anymore.
+ try dg.object.di_type_map.put(gpa, ty, struct_di_ty);
+ return struct_di_ty;
+ }
+ @panic("TODO debug info type for struct");
+
+ //const struct_obj = ty.castTag(.@"struct").?.data;
+
+ //if (struct_obj.layout == .Packed) {
+ // var buf: Type.Payload.Bits = undefined;
+ // const int_ty = struct_obj.packedIntegerType(target, &buf);
+ // const int_llvm_ty = try dg.llvmType(int_ty);
+ // gop.value_ptr.* = int_llvm_ty;
+ // return int_llvm_ty;
+ //}
+
+ //const name = try struct_obj.getFullyQualifiedName(gpa);
+ //defer gpa.free(name);
+
+ //const llvm_struct_ty = dg.context.structCreateNamed(name);
+ //gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls
+
+ //assert(struct_obj.haveFieldTypes());
+
+ //var llvm_field_types: std.ArrayListUnmanaged(*const llvm.Type) = .{};
+ //defer llvm_field_types.deinit(gpa);
+
+ //try llvm_field_types.ensureUnusedCapacity(gpa, struct_obj.fields.count());
+
+ //comptime assert(struct_layout_version == 2);
+ //var offset: u64 = 0;
+ //var big_align: u32 = 0;
+
+ //for (struct_obj.fields.values()) |field| {
+ // if (field.is_comptime or !field.ty.hasRuntimeBits()) continue;
+
+ // const field_align = field.normalAlignment(target);
+ // big_align = @maximum(big_align, field_align);
+ // const prev_offset = offset;
+ // offset = std.mem.alignForwardGeneric(u64, offset, field_align);
+
+ // const padding_len = offset - prev_offset;
+ // if (padding_len > 0) {
+ // const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ // try llvm_field_types.append(gpa, llvm_array_ty);
+ // }
+ // const field_llvm_ty = try dg.llvmType(field.ty);
+ // try llvm_field_types.append(gpa, field_llvm_ty);
+
+ // offset += field.ty.abiSize(target);
+ //}
+ //{
+ // const prev_offset = offset;
+ // offset = std.mem.alignForwardGeneric(u64, offset, big_align);
+ // const padding_len = offset - prev_offset;
+ // if (padding_len > 0) {
+ // const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ // try llvm_field_types.append(gpa, llvm_array_ty);
+ // }
+ //}
+
+ //llvm_struct_ty.structSetBody(
+ // llvm_field_types.items.ptr,
+ // @intCast(c_uint, llvm_field_types.items.len),
+ // .False,
+ //);
+
+ //return llvm_struct_ty;
+ },
+ .Union => {
+ const owner_decl = ty.getOwnerDecl();
+
+ const name = try ty.nameAlloc(gpa);
+ defer gpa.free(name);
+ const fwd_decl = dib.createReplaceableCompositeType(
+ DW.TAG.structure_type,
+ name.ptr,
+ dg.object.di_compile_unit.?.toScope(),
+ null, // file
+ 0, // line
+ );
+ gop.value_ptr.* = fwd_decl;
+
+ const TODO_implement_this = true; // TODO
+ if (TODO_implement_this or !ty.hasRuntimeBits()) {
+ const union_di_ty = try dg.makeEmptyNamespaceDIType(owner_decl);
+ dib.replaceTemporary(fwd_decl, union_di_ty);
+ // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
+ // means we can't use `gop` anymore.
+ try dg.object.di_type_map.put(gpa, ty, union_di_ty);
+ return union_di_ty;
+ }
+
+ @panic("TODO debug info type for union");
+ //const gop = try dg.object.type_map.getOrPut(gpa, ty);
+ //if (gop.found_existing) return gop.value_ptr.*;
+
+ //// The Type memory is ephemeral; since we want to store a longer-lived
+ //// reference, we need to copy it here.
+ //gop.key_ptr.* = try ty.copy(dg.object.type_map_arena.allocator());
+
+ //const layout = ty.unionGetLayout(target);
+ //const union_obj = ty.cast(Type.Payload.Union).?.data;
+
+ //if (layout.payload_size == 0) {
+ // const enum_tag_llvm_ty = try dg.llvmType(union_obj.tag_ty);
+ // gop.value_ptr.* = enum_tag_llvm_ty;
+ // return enum_tag_llvm_ty;
+ //}
+
+ //const name = try union_obj.getFullyQualifiedName(gpa);
+ //defer gpa.free(name);
+
+ //const llvm_union_ty = dg.context.structCreateNamed(name);
+ //gop.value_ptr.* = llvm_union_ty; // must be done before any recursive calls
+
+ //const aligned_field = union_obj.fields.values()[layout.most_aligned_field];
+ //const llvm_aligned_field_ty = try dg.llvmType(aligned_field.ty);
+
+ //const llvm_payload_ty = ty: {
+ // if (layout.most_aligned_field_size == layout.payload_size) {
+ // break :ty llvm_aligned_field_ty;
+ // }
+ // const padding_len = @intCast(c_uint, layout.payload_size - layout.most_aligned_field_size);
+ // const fields: [2]*const llvm.Type = .{
+ // llvm_aligned_field_ty,
+ // dg.context.intType(8).arrayType(padding_len),
+ // };
+ // break :ty dg.context.structType(&fields, fields.len, .True);
+ //};
+
+ //if (layout.tag_size == 0) {
+ // var llvm_fields: [1]*const llvm.Type = .{llvm_payload_ty};
+ // llvm_union_ty.structSetBody(&llvm_fields, llvm_fields.len, .False);
+ // return llvm_union_ty;
+ //}
+ //const enum_tag_llvm_ty = try dg.llvmType(union_obj.tag_ty);
+
+ //// Put the tag before or after the payload depending on which one's
+ //// alignment is greater.
+ //var llvm_fields: [3]*const llvm.Type = undefined;
+ //var llvm_fields_len: c_uint = 2;
+
+ //if (layout.tag_align >= layout.payload_align) {
+ // llvm_fields = .{ enum_tag_llvm_ty, llvm_payload_ty, undefined };
+ //} else {
+ // llvm_fields = .{ llvm_payload_ty, enum_tag_llvm_ty, undefined };
+ //}
+
+ //// Insert padding to make the LLVM struct ABI size match the Zig union ABI size.
+ //if (layout.padding != 0) {
+ // llvm_fields[2] = dg.context.intType(8).arrayType(layout.padding);
+ // llvm_fields_len = 3;
+ //}
+
+ //llvm_union_ty.structSetBody(&llvm_fields, llvm_fields_len, .False);
+ //return llvm_union_ty;
+ },
+ .Fn => {
+ const fn_info = ty.fnInfo();
+ const sret = firstParamSRet(fn_info, target);
+
+ var param_di_types = std.ArrayList(*llvm.DIType).init(dg.gpa);
+ defer param_di_types.deinit();
+
+ // Return type goes first.
+ const di_ret_ty = if (sret or !fn_info.return_type.hasRuntimeBits())
+ Type.void
+ else
+ fn_info.return_type;
+ try param_di_types.append(try dg.lowerDebugType(di_ret_ty));
+
+ if (sret) {
+ var ptr_ty_payload: Type.Payload.ElemType = .{
+ .base = .{ .tag = .single_mut_pointer },
+ .data = fn_info.return_type,
+ };
+ const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ try param_di_types.append(try dg.lowerDebugType(ptr_ty));
+ }
+
+ for (fn_info.param_types) |param_ty| {
+ if (!param_ty.hasRuntimeBits()) continue;
+
+ if (isByRef(param_ty)) {
+ var ptr_ty_payload: Type.Payload.ElemType = .{
+ .base = .{ .tag = .single_mut_pointer },
+ .data = param_ty,
+ };
+ const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ try param_di_types.append(try dg.lowerDebugType(ptr_ty));
+ } else {
+ try param_di_types.append(try dg.lowerDebugType(param_ty));
+ }
+ }
+
+ const fn_di_ty = dib.createSubroutineType(
+ param_di_types.items.ptr,
+ @intCast(c_int, param_di_types.items.len),
+ 0,
+ );
+ // The recursive call to `lowerDebugType` means we can't use `gop` anymore.
+ try dg.object.di_type_map.put(gpa, ty, fn_di_ty);
+ return fn_di_ty;
+ },
+ .ComptimeInt => unreachable,
+ .ComptimeFloat => unreachable,
+ .Type => unreachable,
+ .Undefined => unreachable,
+ .Null => unreachable,
+ .EnumLiteral => unreachable,
+
+ .BoundFn => @panic("TODO remove BoundFn from the language"),
+
+ .Frame => @panic("TODO implement lowerDebugType for Frame types"),
+ .AnyFrame => @panic("TODO implement lowerDebugType for AnyFrame types"),
+ }
+ }
+
+ fn namespaceToDebugScope(dg: *DeclGen, namespace: *const Module.Namespace) !*llvm.DIScope {
+ if (namespace.parent == null) {
+ const di_file = try dg.object.getDIFile(dg.gpa, namespace.file_scope);
+ return di_file.toScope();
+ }
+ const di_type = try dg.lowerDebugType(namespace.ty);
+ return di_type.toScope();
+ }
+
+ /// This is to be used instead of void for debug info types, to avoid tripping
+ /// Assertion `!isa<DIType>(Scope) && "shouldn't make a namespace scope for a type"'
+ /// when targeting CodeView (Windows).
+ fn makeEmptyNamespaceDIType(dg: *DeclGen, decl: *const Module.Decl) !*llvm.DIType {
+ const fields: [0]*llvm.DIType = .{};
+ return dg.object.di_builder.?.createStructType(
+ try dg.namespaceToDebugScope(decl.src_namespace),
+ decl.name, // TODO use fully qualified name
+ try dg.object.getDIFile(dg.gpa, decl.src_namespace.file_scope),
+ decl.src_line + 1,
+ 0, // size in bits
+ 0, // align in bits
+ 0, // flags
+ null, // derived from
+ undefined, // TODO should be able to pass &fields,
+ fields.len,
+ 0, // run time lang
+ null, // vtable holder
+ "", // unique id
+ );
+ }
+
const ParentPtr = struct {
ty: Type,
llvm_ptr: *const llvm.Value,
@@ -2085,6 +2978,8 @@ pub const FuncGen = struct {
liveness: Liveness,
context: *const llvm.Context,
builder: *const llvm.Builder,
+ di_scope: ?*llvm.DIScope,
+ di_file: ?*llvm.DIFile,
/// This stores the LLVM values used in a function, such that they can be referred to
/// in other instructions. This table is cleared before every function is generated.
@@ -2100,7 +2995,7 @@ pub const FuncGen = struct {
/// it omits 0-bit types. If the function uses sret as the first parameter,
/// this slice does not include it.
args: []const *const llvm.Value,
- arg_index: usize,
+ arg_index: c_uint,
llvm_func: *const llvm.Value,
@@ -2330,10 +3225,7 @@ pub const FuncGen = struct {
.constant => unreachable,
.const_ty => unreachable,
.unreach => self.airUnreach(inst),
- .dbg_stmt => blk: {
- // TODO: implement debug info
- break :blk null;
- },
+ .dbg_stmt => self.airDbgStmt(inst),
// zig fmt: on
};
if (opt_value) |val| {
@@ -3043,6 +3935,17 @@ pub const FuncGen = struct {
return null;
}
+ fn airDbgStmt(self: *FuncGen, inst: Air.Inst.Index) ?*const llvm.Value {
+ const di_scope = self.di_scope orelse return null;
+ const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
+ self.builder.setCurrentDebugLocation(
+ @intCast(c_int, self.dg.decl.src_line + dbg_stmt.line + 1),
+ @intCast(c_int, dbg_stmt.column + 1),
+ di_scope,
+ );
+ return null;
+ }
+
fn airAssembly(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
// Eventually, the Zig compiler needs to be reworked to have inline assembly go
// through the same parsing code regardless of backend, and have LLVM-flavored
@@ -4243,15 +5146,42 @@ pub const FuncGen = struct {
self.arg_index += 1;
const inst_ty = self.air.typeOfIndex(inst);
- if (isByRef(inst_ty)) {
- // TODO declare debug variable
- return arg_val;
- } else {
- const ptr_val = self.buildAlloca(try self.dg.llvmType(inst_ty));
- _ = self.builder.buildStore(arg_val, ptr_val);
- // TODO declare debug variable
- return arg_val;
+ if (self.dg.object.di_builder) |dib| {
+ const src_index = self.getSrcArgIndex(self.arg_index - 1);
+ const func = self.dg.decl.getFunction().?;
+ const lbrace_line = func.owner_decl.src_line + func.lbrace_line + 1;
+ const lbrace_col = func.lbrace_column + 1;
+ const di_local_var = dib.createParameterVariable(
+ self.di_scope.?,
+ func.getParamName(src_index).ptr, // TODO test 0 bit args
+ self.di_file.?,
+ lbrace_line,
+ try self.dg.lowerDebugType(inst_ty),
+ true, // always preserve
+ 0, // flags
+ self.arg_index, // includes +1 because 0 is return type
+ );
+
+ const debug_loc = llvm.getDebugLoc(lbrace_line, lbrace_col, self.di_scope.?);
+ const insert_block = self.builder.getInsertBlock();
+ if (isByRef(inst_ty)) {
+ _ = dib.insertDeclareAtEnd(arg_val, di_local_var, debug_loc, insert_block);
+ } else {
+ _ = dib.insertDbgValueIntrinsicAtEnd(arg_val, di_local_var, debug_loc, insert_block);
+ }
}
+
+ return arg_val;
+ }
+
+ fn getSrcArgIndex(self: *FuncGen, runtime_index: u32) u32 {
+ const fn_info = self.dg.decl.ty.fnInfo();
+ var i: u32 = 0;
+ for (fn_info.param_types) |param_ty, src_index| {
+ if (!param_ty.hasRuntimeBits()) continue;
+ if (i == runtime_index) return @intCast(u32, src_index);
+ i += 1;
+ } else unreachable;
}
fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
@@ -4774,7 +5704,7 @@ pub const FuncGen = struct {
const prev_debug_location = self.builder.getCurrentDebugLocation2();
defer {
self.builder.positionBuilderAtEnd(prev_block);
- if (!self.dg.module.comp.bin_file.options.strip) {
+ if (self.di_scope != null) {
self.builder.setCurrentDebugLocation2(prev_debug_location);
}
}