diff options
Diffstat (limited to 'src/Module.zig')
| -rw-r--r-- | src/Module.zig | 158 |
1 files changed, 141 insertions, 17 deletions
diff --git a/src/Module.zig b/src/Module.zig index 3892b35aea..3cae41a6a5 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -848,9 +848,11 @@ pub const Struct = struct { // which `have_layout` does not ensure. fully_resolved, }, - /// If true, definitely nonzero size at runtime. If false, resolving the fields - /// is necessary to determine whether it has bits at runtime. - known_has_bits: bool, + /// If true, has more than one possible value. However it may still be non-runtime type + /// if it is a comptime-only type. + /// If false, resolving the fields is necessary to determine whether the type has only + /// one possible value. + known_non_opv: bool, requires_comptime: RequiresComptime = .unknown, pub const Fields = std.StringArrayHashMapUnmanaged(Field); @@ -898,6 +900,45 @@ pub const Struct = struct { }; } + pub fn fieldSrcLoc(s: Struct, gpa: Allocator, query: FieldSrcQuery) SrcLoc { + @setCold(true); + const tree = s.owner_decl.getFileScope().getTree(gpa) catch |err| { + // In this case we emit a warning + a less precise source location. + log.warn("unable to load {s}: {s}", .{ + s.owner_decl.getFileScope().sub_file_path, @errorName(err), + }); + return s.srcLoc(); + }; + const node = s.owner_decl.relativeToNodeIndex(s.node_offset); + const node_tags = tree.nodes.items(.tag); + const file = s.owner_decl.getFileScope(); + switch (node_tags[node]) { + .container_decl, + .container_decl_trailing, + => return queryFieldSrc(tree.*, query, file, tree.containerDecl(node)), + .container_decl_two, .container_decl_two_trailing => { + var buffer: [2]Ast.Node.Index = undefined; + return queryFieldSrc(tree.*, query, file, tree.containerDeclTwo(&buffer, node)); + }, + .container_decl_arg, + .container_decl_arg_trailing, + => return queryFieldSrc(tree.*, query, file, tree.containerDeclArg(node)), + + .tagged_union, + .tagged_union_trailing, + => return queryFieldSrc(tree.*, query, file, tree.taggedUnion(node)), + .tagged_union_two, .tagged_union_two_trailing => { + var buffer: [2]Ast.Node.Index = undefined; + return queryFieldSrc(tree.*, query, file, tree.taggedUnionTwo(&buffer, node)); + }, + .tagged_union_enum_tag, + .tagged_union_enum_tag_trailing, + => return queryFieldSrc(tree.*, query, file, tree.taggedUnionEnumTag(node)), + + else => unreachable, + } + } + pub fn haveFieldTypes(s: Struct) bool { return switch (s.status) { .none, @@ -1063,6 +1104,33 @@ pub const Union = struct { }; } + pub fn fieldSrcLoc(u: Union, gpa: Allocator, query: FieldSrcQuery) SrcLoc { + @setCold(true); + const tree = u.owner_decl.getFileScope().getTree(gpa) catch |err| { + // In this case we emit a warning + a less precise source location. + log.warn("unable to load {s}: {s}", .{ + u.owner_decl.getFileScope().sub_file_path, @errorName(err), + }); + return u.srcLoc(); + }; + const node = u.owner_decl.relativeToNodeIndex(u.node_offset); + const node_tags = tree.nodes.items(.tag); + const file = u.owner_decl.getFileScope(); + switch (node_tags[node]) { + .container_decl, + .container_decl_trailing, + => return queryFieldSrc(tree.*, query, file, tree.containerDecl(node)), + .container_decl_two, .container_decl_two_trailing => { + var buffer: [2]Ast.Node.Index = undefined; + return queryFieldSrc(tree.*, query, file, tree.containerDeclTwo(&buffer, node)); + }, + .container_decl_arg, + .container_decl_arg_trailing, + => return queryFieldSrc(tree.*, query, file, tree.containerDeclArg(node)), + else => unreachable, + } + } + pub fn haveFieldTypes(u: Union) bool { return switch (u.status) { .none, @@ -1080,7 +1148,7 @@ pub const Union = struct { pub fn hasAllZeroBitFieldTypes(u: Union) bool { assert(u.haveFieldTypes()); for (u.fields.values()) |field| { - if (field.ty.hasCodeGenBits()) return false; + if (field.ty.hasRuntimeBits()) return false; } return true; } @@ -1090,7 +1158,7 @@ pub const Union = struct { var most_alignment: u32 = 0; var most_index: usize = undefined; for (u.fields.values()) |field, i| { - if (!field.ty.hasCodeGenBits()) continue; + if (!field.ty.hasRuntimeBits()) continue; const field_align = a: { if (field.abi_align.tag() == .abi_align_default) { @@ -1111,7 +1179,7 @@ pub const Union = struct { var max_align: u32 = 0; if (have_tag) max_align = u.tag_ty.abiAlignment(target); for (u.fields.values()) |field| { - if (!field.ty.hasCodeGenBits()) continue; + if (!field.ty.hasRuntimeBits()) continue; const field_align = a: { if (field.abi_align.tag() == .abi_align_default) { @@ -1164,7 +1232,7 @@ pub const Union = struct { var payload_size: u64 = 0; var payload_align: u32 = 0; for (u.fields.values()) |field, i| { - if (!field.ty.hasCodeGenBits()) continue; + if (!field.ty.hasRuntimeBits()) continue; const field_align = a: { if (field.abi_align.tag() == .abi_align_default) { @@ -3391,7 +3459,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { .zir_index = undefined, // set below .layout = .Auto, .status = .none, - .known_has_bits = undefined, + .known_non_opv = undefined, .namespace = .{ .parent = null, .ty = struct_ty, @@ -3628,7 +3696,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { var type_changed = true; if (decl.has_tv) { - prev_type_has_bits = decl.ty.hasCodeGenBits(); + prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(); type_changed = !decl.ty.eql(decl_tv.ty); if (decl.getFunction()) |prev_func| { prev_is_inline = prev_func.state == .inline_only; @@ -3648,8 +3716,9 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { decl.analysis = .complete; decl.generation = mod.generation; - const is_inline = decl_tv.ty.fnCallingConvention() == .Inline; - if (!is_inline and decl_tv.ty.hasCodeGenBits()) { + const has_runtime_bits = try sema.fnHasRuntimeBits(&block_scope, src, decl.ty); + + if (has_runtime_bits) { // We don't fully codegen the decl until later, but we do need to reserve a global // offset table index for it. This allows us to codegen decls out of dependency // order, increasing how many computations can be done in parallel. @@ -3662,6 +3731,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { mod.comp.bin_file.freeDecl(decl); } + const is_inline = decl.ty.fnCallingConvention() == .Inline; if (decl.is_exported) { const export_src = src; // TODO make this point at `export` token if (is_inline) { @@ -3682,6 +3752,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { decl.owns_tv = false; var queue_linker_work = false; + var is_extern = false; switch (decl_tv.val.tag()) { .variable => { const variable = decl_tv.val.castTag(.variable).?.data; @@ -3698,6 +3769,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { if (decl == owner_decl) { decl.owns_tv = true; queue_linker_work = true; + is_extern = true; } }, @@ -3723,7 +3795,10 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { decl.analysis = .complete; decl.generation = mod.generation; - if (queue_linker_work and decl.ty.hasCodeGenBits()) { + const has_runtime_bits = is_extern or + (queue_linker_work and try sema.typeHasRuntimeBits(&block_scope, src, decl.ty)); + + if (has_runtime_bits) { log.debug("queue linker work for {*} ({s})", .{ decl, decl.name }); try mod.comp.bin_file.allocateDeclIndexes(decl); @@ -4224,7 +4299,7 @@ pub fn clearDecl( mod.deleteDeclExports(decl); if (decl.has_tv) { - if (decl.ty.hasCodeGenBits()) { + if (decl.ty.isFnOrHasRuntimeBits()) { mod.comp.bin_file.freeDecl(decl); // TODO instead of a union, put this memory trailing Decl objects, @@ -4277,7 +4352,7 @@ pub fn deleteUnusedDecl(mod: *Module, decl: *Decl) void { switch (mod.comp.bin_file.tag) { .c => {}, // this linker backend has already migrated to the new API else => if (decl.has_tv) { - if (decl.ty.hasCodeGenBits()) { + if (decl.ty.isFnOrHasRuntimeBits()) { mod.comp.bin_file.freeDecl(decl); } }, @@ -4662,8 +4737,8 @@ pub fn createAnonymousDeclFromDeclNamed( new_decl.src_line = src_decl.src_line; new_decl.ty = typed_value.ty; new_decl.val = typed_value.val; - new_decl.align_val = Value.initTag(.null_value); - new_decl.linksection_val = Value.initTag(.null_value); + new_decl.align_val = Value.@"null"; + new_decl.linksection_val = Value.@"null"; new_decl.has_tv = true; new_decl.analysis = .complete; new_decl.generation = mod.generation; @@ -4674,7 +4749,7 @@ pub fn createAnonymousDeclFromDeclNamed( // if the Decl is referenced by an instruction or another constant. Otherwise, // the Decl will be garbage collected by the `codegen_decl` task instead of sent // to the linker. - if (typed_value.ty.hasCodeGenBits()) { + if (typed_value.ty.isFnOrHasRuntimeBits()) { try mod.comp.bin_file.allocateDeclIndexes(new_decl); try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = new_decl }); } @@ -4905,6 +4980,55 @@ pub const PeerTypeCandidateSrc = union(enum) { } }; +const FieldSrcQuery = struct { + index: usize, + range: enum { name, type, value, alignment }, +}; + +fn queryFieldSrc( + tree: Ast, + query: FieldSrcQuery, + file_scope: *File, + container_decl: Ast.full.ContainerDecl, +) SrcLoc { + const node_tags = tree.nodes.items(.tag); + var field_index: usize = 0; + for (container_decl.ast.members) |member_node| { + const field = switch (node_tags[member_node]) { + .container_field_init => tree.containerFieldInit(member_node), + .container_field_align => tree.containerFieldAlign(member_node), + .container_field => tree.containerField(member_node), + else => continue, + }; + if (field_index == query.index) { + return switch (query.range) { + .name => .{ + .file_scope = file_scope, + .parent_decl_node = 0, + .lazy = .{ .token_abs = field.ast.name_token }, + }, + .type => .{ + .file_scope = file_scope, + .parent_decl_node = 0, + .lazy = .{ .node_abs = field.ast.type_expr }, + }, + .value => .{ + .file_scope = file_scope, + .parent_decl_node = 0, + .lazy = .{ .node_abs = field.ast.value_expr }, + }, + .alignment => .{ + .file_scope = file_scope, + .parent_decl_node = 0, + .lazy = .{ .node_abs = field.ast.align_expr }, + }, + }; + } + field_index += 1; + } + unreachable; +} + /// Called from `performAllTheWork`, after all AstGen workers have finished, /// and before the main semantic analysis loop begins. pub fn processOutdatedAndDeletedDecls(mod: *Module) !void { |
