diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2021-04-12 16:44:51 -0700 |
|---|---|---|
| committer | Andrew Kelley <andrew@ziglang.org> | 2021-04-15 19:06:39 -0700 |
| commit | bcfebb4b2b17dcac445fc5dedbbd259cc8c2f306 (patch) | |
| tree | 48950201c23c8ccde48e2e31c91464c7e96e6163 /src | |
| parent | 429cd2b5dd27bec15a4a3351114ce1bcd12d8d01 (diff) | |
| download | zig-bcfebb4b2b17dcac445fc5dedbbd259cc8c2f306.tar.gz zig-bcfebb4b2b17dcac445fc5dedbbd259cc8c2f306.zip | |
stage2: improvements aimed at std lib integration
* AstGen: emit decl lookup ZIR instructions rather than directly
looking up decls in AstGen. This is necessary because we want to
reuse the same immutable ZIR code for multiple generic instantiations
(and comptime function calls).
* AstGen: fix using members_len instead of fields_len for struct decls.
* structs: the struct_decl ZIR instruction is now also a block. This is
so that the type expressions, default field value expressions, and
alignment expressions can be evaluated in a scope that contains the
decls from the struct namespace itself.
* Add "std" and "builtin" packages to the builtin package.
* Don't try to build glibc, musl, or mingw-w64 when using `-ofmt=c`.
* builtin.zig is generated without `usingnamespace`.
* builtin.zig takes advantage of `std.zig.fmtId` for CPU features.
* A first pass at implementing `usingnamespace`. It's problematic and
should either be deleted, or polished, before merging this branch.
* Sema: allow explicitly specifying the namespace in which to look up
Decls. This is used by `struct_decl` in order to put the decls from
the struct namespace itself in scope when evaluating the type
expressions, default value expressions, and alignment expressions.
* Module: fix `analyzeNamespace` assuming that it is the top-level root
declaration node.
* Sema: implement comptime and runtime cmp operator.
* Sema: implement peer type resolution for enums and enum literals.
* Pull in the changes from master branch:
262e09c482d98a78531c049a18b7f24146fe157f.
* ZIR: complete out simple_ptr_type debug printing
Diffstat (limited to 'src')
| -rw-r--r-- | src/AstGen.zig | 64 | ||||
| -rw-r--r-- | src/Compilation.zig | 44 | ||||
| -rw-r--r-- | src/Module.zig | 140 | ||||
| -rw-r--r-- | src/Sema.zig | 252 | ||||
| -rw-r--r-- | src/codegen/spirv/spec.zig | 2 | ||||
| -rw-r--r-- | src/link.zig | 2 | ||||
| -rw-r--r-- | src/stage1/codegen.cpp | 42 | ||||
| -rw-r--r-- | src/value.zig | 27 | ||||
| -rw-r--r-- | src/zir.zig | 46 |
9 files changed, 468 insertions, 151 deletions
diff --git a/src/AstGen.zig b/src/AstGen.zig index 827f545c1b..c3754db766 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -1254,6 +1254,8 @@ fn blockExprStmts( .coerce_result_ptr, .decl_ref, .decl_val, + .decl_ref_named, + .decl_val_named, .load, .div, .elem_ptr, @@ -1817,7 +1819,7 @@ pub fn structDeclInner( tag: zir.Inst.Tag, ) InnerError!zir.Inst.Ref { if (container_decl.ast.members.len == 0) { - return gz.addPlNode(tag, node, zir.Inst.StructDecl{ .fields_len = 0 }); + return gz.addPlNode(tag, node, zir.Inst.StructDecl{ .fields_len = 0, .body_len = 0 }); } const astgen = gz.astgen; @@ -1826,12 +1828,21 @@ pub fn structDeclInner( const tree = gz.tree(); const node_tags = tree.nodes.items(.tag); + // The struct_decl instruction introduces a scope in which the decls of the struct + // are in scope, so that field types, alignments, and default value expressions + // can refer to decls within the struct itself. + var block_scope: GenZir = .{ + .parent = scope, + .astgen = astgen, + .force_comptime = true, + }; + defer block_scope.instructions.deinit(gpa); + + // We don't know which members are fields until we iterate, so cannot do + // an accurate ensureCapacity yet. var fields_data = ArrayListUnmanaged(u32){}; defer fields_data.deinit(gpa); - // field_name and field_type are both mandatory - try fields_data.ensureCapacity(gpa, container_decl.ast.members.len * 2); - // We only need this if there are greater than 16 fields. var bit_bag = ArrayListUnmanaged(u32){}; defer bit_bag.deinit(gpa); @@ -1857,7 +1868,7 @@ pub fn structDeclInner( const field_name = try gz.identAsString(member.ast.name_token); fields_data.appendAssumeCapacity(field_name); - const field_type = try typeExpr(gz, scope, member.ast.type_expr); + const field_type = try typeExpr(&block_scope, &block_scope.base, member.ast.type_expr); fields_data.appendAssumeCapacity(@enumToInt(field_type)); const have_align = member.ast.align_expr != 0; @@ -1867,31 +1878,40 @@ pub fn structDeclInner( (@as(u32, @boolToInt(have_value)) << 31); if (have_align) { - const align_inst = try comptimeExpr(gz, scope, .{ .ty = .u32_type }, member.ast.align_expr); + const align_inst = try expr(&block_scope, &block_scope.base, .{ .ty = .u32_type }, member.ast.align_expr); fields_data.appendAssumeCapacity(@enumToInt(align_inst)); } if (have_value) { - const default_inst = try comptimeExpr(gz, scope, .{ .ty = field_type }, member.ast.value_expr); + const default_inst = try expr(&block_scope, &block_scope.base, .{ .ty = field_type }, member.ast.value_expr); fields_data.appendAssumeCapacity(@enumToInt(default_inst)); } field_index += 1; } if (field_index == 0) { - return gz.addPlNode(tag, node, zir.Inst.StructDecl{ .fields_len = 0 }); + return gz.addPlNode(tag, node, zir.Inst.StructDecl{ .fields_len = 0, .body_len = 0 }); } const empty_slot_count = 16 - (field_index % 16); cur_bit_bag >>= @intCast(u5, empty_slot_count * 2); - const result = try gz.addPlNode(tag, node, zir.Inst.StructDecl{ - .fields_len = @intCast(u32, container_decl.ast.members.len), - }); + const decl_inst = try gz.addBlock(tag, node); + try gz.instructions.append(gpa, decl_inst); + _ = try block_scope.addBreak(.break_inline, decl_inst, .void_value); + try astgen.extra.ensureCapacity(gpa, astgen.extra.items.len + - bit_bag.items.len + 1 + fields_data.items.len); + @typeInfo(zir.Inst.StructDecl).Struct.fields.len + + bit_bag.items.len + 1 + fields_data.items.len + + block_scope.instructions.items.len); + const zir_datas = astgen.instructions.items(.data); + zir_datas[decl_inst].pl_node.payload_index = astgen.addExtraAssumeCapacity(zir.Inst.StructDecl{ + .body_len = @intCast(u32, block_scope.instructions.items.len), + .fields_len = @intCast(u32, field_index), + }); + astgen.extra.appendSliceAssumeCapacity(block_scope.instructions.items); astgen.extra.appendSliceAssumeCapacity(bit_bag.items); // Likely empty. astgen.extra.appendAssumeCapacity(cur_bit_bag); astgen.extra.appendSliceAssumeCapacity(fields_data.items); - return result; + return astgen.indexToRef(decl_inst); } fn containerDecl( @@ -3722,16 +3742,16 @@ fn identifier( }; } - const decl = mod.lookupIdentifier(scope, ident_name) orelse { - // TODO insert a "dependency on the non-existence of a decl" here to make this - // compile error go away when the decl is introduced. This data should be in a global - // sparse map since it is only relevant when a compile error occurs. - return mod.failNode(scope, ident, "use of undeclared identifier '{s}'", .{ident_name}); - }; - const decl_index = try mod.declareDeclDependency(astgen.decl, decl); + // We can't look up Decls until Sema because the same ZIR code is supposed to be + // used for multiple generic instantiations, and this may refer to a different Decl + // depending on the scope, determined by the generic instantiation. + const str_index = try gz.identAsString(ident_token); switch (rl) { - .ref, .none_or_ref => return gz.addDecl(.decl_ref, decl_index, ident), - else => return rvalue(gz, scope, rl, try gz.addDecl(.decl_val, decl_index, ident), ident), + .ref, .none_or_ref => return gz.addStrTok(.decl_ref_named, str_index, ident_token), + else => { + const result = try gz.addStrTok(.decl_val_named, str_index, ident_token); + return rvalue(gz, scope, rl, result, ident); + }, } } diff --git a/src/Compilation.zig b/src/Compilation.zig index eaf9b7f5b4..cef24204d1 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -531,7 +531,7 @@ pub const InitOptions = struct { /// is externally modified - essentially anything other than zig-cache - then /// this flag would be set to disable this machinery to avoid false positives. disable_lld_caching: bool = false, - object_format: ?std.builtin.ObjectFormat = null, + object_format: ?std.Target.ObjectFormat = null, optimize_mode: std.builtin.Mode = .Debug, keep_source_files_loaded: bool = false, clang_argv: []const []const u8 = &[0][]const u8{}, @@ -1041,6 +1041,10 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation { try std_pkg.add(gpa, "builtin", builtin_pkg); try std_pkg.add(gpa, "root", root_pkg); + try std_pkg.add(gpa, "std", std_pkg); + + try builtin_pkg.add(gpa, "std", std_pkg); + try builtin_pkg.add(gpa, "builtin", builtin_pkg); } // TODO when we implement serialization and deserialization of incremental @@ -2993,7 +2997,8 @@ fn wantBuildLibCFromSource(comp: Compilation) bool { .Exe => true, }; return comp.bin_file.options.link_libc and is_exe_or_dyn_lib and - comp.bin_file.options.libc_installation == null; + comp.bin_file.options.libc_installation == null and + comp.bin_file.options.object_format != .c; } fn wantBuildGLibCFromSource(comp: Compilation) bool { @@ -3017,6 +3022,7 @@ fn wantBuildLibUnwindFromSource(comp: *Compilation) bool { }; return comp.bin_file.options.link_libc and is_exe_or_dyn_lib and comp.bin_file.options.libc_installation == null and + comp.bin_file.options.object_format != .c and target_util.libcNeedsLibUnwind(comp.getTarget()); } @@ -3068,26 +3074,21 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: *Allocator) Alloc @setEvalBranchQuota(4000); try buffer.writer().print( - \\usingnamespace @import("std").builtin; - \\/// Deprecated - \\pub const arch = Target.current.cpu.arch; - \\/// Deprecated - \\pub const endian = Target.current.cpu.arch.endian(); - \\ + \\const std = @import("std"); \\/// Zig version. When writing code that supports multiple versions of Zig, prefer \\/// feature detection (i.e. with `@hasDecl` or `@hasField`) over version checks. - \\pub const zig_version = try @import("std").SemanticVersion.parse("{s}"); + \\pub const zig_version = try std.SemanticVersion.parse("{s}"); \\pub const zig_is_stage2 = {}; \\ - \\pub const output_mode = OutputMode.{}; - \\pub const link_mode = LinkMode.{}; + \\pub const output_mode = std.builtin.OutputMode.{}; + \\pub const link_mode = std.builtin.LinkMode.{}; \\pub const is_test = {}; \\pub const single_threaded = {}; - \\pub const abi = Abi.{}; - \\pub const cpu: Cpu = Cpu{{ + \\pub const abi = std.Target.Abi.{}; + \\pub const cpu: std.Target.Cpu = .{{ \\ .arch = .{}, - \\ .model = &Target.{}.cpu.{}, - \\ .features = Target.{}.featureSet(&[_]Target.{}.Feature{{ + \\ .model = &std.Target.{}.cpu.{}, + \\ .features = std.Target.{}.featureSet(&[_]std.Target.{}.Feature{{ \\ , .{ build_options.version, @@ -3115,7 +3116,7 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: *Allocator) Alloc try buffer.writer().print( \\ }}), \\}}; - \\pub const os = Os{{ + \\pub const os = std.Target.Os{{ \\ .tag = .{}, \\ .version_range = .{{ , @@ -3202,8 +3203,13 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: *Allocator) Alloc (comp.bin_file.options.skip_linker_dependencies and comp.bin_file.options.parent_compilation_link_libc); try buffer.writer().print( - \\pub const object_format = ObjectFormat.{}; - \\pub const mode = Mode.{}; + \\pub const target = std.Target{{ + \\ .cpu = cpu, + \\ .os = os, + \\ .abi = abi, + \\}}; + \\pub const object_format = std.Target.ObjectFormat.{}; + \\pub const mode = std.builtin.Mode.{}; \\pub const link_libc = {}; \\pub const link_libcpp = {}; \\pub const have_error_return_tracing = {}; @@ -3211,7 +3217,7 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: *Allocator) Alloc \\pub const position_independent_code = {}; \\pub const position_independent_executable = {}; \\pub const strip_debug_info = {}; - \\pub const code_model = CodeModel.{}; + \\pub const code_model = std.builtin.CodeModel.{}; \\ , .{ std.zig.fmtId(@tagName(comp.bin_file.options.object_format)), diff --git a/src/Module.zig b/src/Module.zig index 20cc6b3c0d..807b30c4d7 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -657,6 +657,7 @@ pub const Scope = struct { /// Direct children of the namespace. Used during an update to detect /// which decls have been added/removed from source. decls: std.AutoArrayHashMapUnmanaged(*Decl, void) = .{}, + usingnamespace_set: std.AutoHashMapUnmanaged(*Namespace, bool) = .{}, pub fn deinit(ns: *Namespace, gpa: *Allocator) void { ns.decls.deinit(gpa); @@ -2540,6 +2541,7 @@ fn astgenAndSemaDecl(mod: *Module, decl: *Decl) !bool { .code = code, .inst_map = try analysis_arena.allocator.alloc(*ir.Inst, code.instructions.len), .owner_decl = decl, + .namespace = decl.namespace, .func = null, .owner_func = null, .param_inst_list = &.{}, @@ -2560,7 +2562,73 @@ fn astgenAndSemaDecl(mod: *Module, decl: *Decl) !bool { decl.generation = mod.generation; return true; }, - .@"usingnamespace" => @panic("TODO usingnamespace decl"), + .@"usingnamespace" => { + decl.analysis = .in_progress; + + const type_expr = node_datas[decl_node].lhs; + const is_pub = blk: { + const main_tokens = tree.nodes.items(.main_token); + const token_tags = tree.tokens.items(.tag); + const main_token = main_tokens[decl_node]; + break :blk (main_token > 0 and token_tags[main_token - 1] == .keyword_pub); + }; + + // A usingnamespace decl does not store any value so we can + // deinit this arena after analysis is done. + var analysis_arena = std.heap.ArenaAllocator.init(mod.gpa); + defer analysis_arena.deinit(); + + var code: zir.Code = blk: { + var astgen = try AstGen.init(mod, decl, &analysis_arena.allocator); + defer astgen.deinit(); + + var gen_scope: Scope.GenZir = .{ + .force_comptime = true, + .parent = &decl.namespace.base, + .astgen = &astgen, + }; + defer gen_scope.instructions.deinit(mod.gpa); + + const ns_type = try AstGen.typeExpr(&gen_scope, &gen_scope.base, type_expr); + _ = try gen_scope.addBreak(.break_inline, 0, ns_type); + + const code = try gen_scope.finish(); + if (std.builtin.mode == .Debug and mod.comp.verbose_ir) { + code.dump(mod.gpa, "usingnamespace_type", &gen_scope.base, 0) catch {}; + } + break :blk code; + }; + defer code.deinit(mod.gpa); + + var sema: Sema = .{ + .mod = mod, + .gpa = mod.gpa, + .arena = &analysis_arena.allocator, + .code = code, + .inst_map = try analysis_arena.allocator.alloc(*ir.Inst, code.instructions.len), + .owner_decl = decl, + .namespace = decl.namespace, + .func = null, + .owner_func = null, + .param_inst_list = &.{}, + }; + var block_scope: Scope.Block = .{ + .parent = null, + .sema = &sema, + .src_decl = decl, + .instructions = .{}, + .inlining = null, + .is_comptime = true, + }; + defer block_scope.instructions.deinit(mod.gpa); + + const ty = try sema.rootAsType(&block_scope); + try decl.namespace.usingnamespace_set.put(mod.gpa, ty.getNamespace().?, is_pub); + + decl.analysis = .complete; + decl.generation = mod.generation; + return true; + }, else => unreachable, } } @@ -2765,6 +2833,7 @@ fn astgenAndSemaFn( .code = fn_type_code, .inst_map = try fn_type_scope_arena.allocator.alloc(*ir.Inst, fn_type_code.instructions.len), .owner_decl = decl, + .namespace = decl.namespace, .func = null, .owner_func = null, .param_inst_list = &.{}, @@ -3064,6 +3133,7 @@ fn astgenAndSemaVarDecl( .code = code, .inst_map = try gen_scope_arena.allocator.alloc(*ir.Inst, code.instructions.len), .owner_decl = decl, + .namespace = decl.namespace, .func = null, .owner_func = null, .param_inst_list = &.{}, @@ -3125,6 +3195,7 @@ fn astgenAndSemaVarDecl( .code = code, .inst_map = try type_scope_arena.allocator.alloc(*ir.Inst, code.instructions.len), .owner_decl = decl, + .namespace = decl.namespace, .func = null, .owner_func = null, .param_inst_list = &.{}, @@ -3387,6 +3458,7 @@ pub fn importFile(mod: *Module, cur_pkg: *Package, import_string: []const u8) !* .code = code, .inst_map = try gen_scope_arena.allocator.alloc(*ir.Inst, code.instructions.len), .owner_decl = top_decl, + .namespace = top_decl.namespace, .func = null, .owner_func = null, .param_inst_list = &.{}, @@ -3411,7 +3483,7 @@ pub fn importFile(mod: *Module, cur_pkg: *Package, import_string: []const u8) !* struct_decl.contents_hash = top_decl.contents_hash; new_file.namespace = struct_ty.getNamespace().?; new_file.namespace.parent = null; - new_file.namespace.parent_name_hash = tmp_namespace.parent_name_hash; + //new_file.namespace.parent_name_hash = tmp_namespace.parent_name_hash; // Transfer the dependencies to `owner_decl`. assert(top_decl.dependants.count() == 0); @@ -3422,24 +3494,31 @@ pub fn importFile(mod: *Module, cur_pkg: *Package, import_string: []const u8) !* _ = try mod.declareDeclDependency(struct_decl, dep); } - try mod.analyzeFile(new_file); return new_file; } pub fn analyzeFile(mod: *Module, file: *Scope.File) !void { - return mod.analyzeNamespace(file.namespace); + // We call `getAstTree` here so that `analyzeFile` has the error set that includes + // file system operations, but `analyzeNamespace` does not. + const tree = try mod.getAstTree(file.namespace.file_scope); + const decls = tree.rootDecls(); + return mod.analyzeNamespace(file.namespace, decls); } -pub fn analyzeNamespace(mod: *Module, namespace: *Scope.Namespace) !void { +pub fn analyzeNamespace( + mod: *Module, + namespace: *Scope.Namespace, + decls: []const ast.Node.Index, +) InnerError!void { const tracy = trace(@src()); defer tracy.end(); // We may be analyzing it for the first time, or this may be // an incremental update. This code handles both cases. - const tree = try mod.getAstTree(namespace.file_scope); + assert(namespace.file_scope.status == .loaded_success); // Caller must ensure tree loaded. + const tree: *const ast.Tree = &namespace.file_scope.tree; const node_tags = tree.nodes.items(.tag); const node_datas = tree.nodes.items(.data); - const decls = tree.rootDecls(); try mod.comp.work_queue.ensureUnusedCapacity(decls.len); try namespace.decls.ensureCapacity(mod.gpa, decls.len); @@ -3612,7 +3691,20 @@ pub fn analyzeNamespace(mod: *Module, namespace: *Scope.Namespace) !void { } }, .@"usingnamespace" => { - log.err("TODO: analyze usingnamespace decl", .{}); + const name_index = mod.getNextAnonNameIndex(); + const name = try std.fmt.allocPrint(mod.gpa, "__usingnamespace_{d}", .{name_index}); + defer mod.gpa.free(name); + + const name_hash = namespace.fullyQualifiedNameHash(name); + const contents_hash = std.zig.hashSrc(tree.getNodeSource(decl_node)); + + const new_decl = try mod.createNewDecl(namespace, name, decl_node, name_hash, contents_hash); + namespace.decls.putAssumeCapacity(new_decl, {}); + + mod.ensureDeclAnalyzed(new_decl) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => continue, + }; }, else => unreachable, }; @@ -3900,6 +3992,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void { .code = func.zir, .inst_map = try mod.gpa.alloc(*ir.Inst, func.zir.instructions.len), .owner_decl = decl, + .namespace = decl.namespace, .func = func, .owner_func = func, .param_inst_list = param_inst_list, @@ -4001,6 +4094,10 @@ fn createNewDecl( const new_decl = try mod.allocateNewDecl(namespace, src_node, contents_hash); errdefer mod.gpa.destroy(new_decl); new_decl.name = try mem.dupeZ(mod.gpa, u8, decl_name); + log.debug("insert Decl {s} with hash {}", .{ + new_decl.name, + std.fmt.fmtSliceHexLower(&name_hash), + }); mod.decl_table.putAssumeCapacityNoClobber(name_hash, new_decl); return new_decl; } @@ -4245,7 +4342,7 @@ fn getNextAnonNameIndex(mod: *Module) usize { pub fn lookupIdentifier(mod: *Module, scope: *Scope, ident_name: []const u8) ?*Decl { var namespace = scope.namespace(); while (true) { - if (mod.lookupInNamespace(namespace, ident_name)) |decl| { + if (mod.lookupInNamespace(namespace, ident_name, false)) |decl| { return decl; } namespace = namespace.parent orelse break; @@ -4259,9 +4356,32 @@ pub fn lookupInNamespace( mod: *Module, namespace: *Scope.Namespace, ident_name: []const u8, + only_pub_usingnamespaces: bool, ) ?*Decl { const name_hash = namespace.fullyQualifiedNameHash(ident_name); - return mod.decl_table.get(name_hash); + log.debug("lookup Decl {s} with hash {}", .{ + ident_name, + std.fmt.fmtSliceHexLower(&name_hash), + }); + // TODO handle decl collision with usingnamespace + // TODO the decl doing the looking up needs to create a decl dependency + // on each usingnamespace decl here. + if (mod.decl_table.get(name_hash)) |decl| { + return decl; + } + { + var it = namespace.usingnamespace_set.iterator(); + while (it.next()) |entry| { + const other_ns = entry.key; + const other_is_pub = entry.value; + if (only_pub_usingnamespaces and !other_is_pub) continue; + // TODO handle cycles + if (mod.lookupInNamespace(other_ns, ident_name, true)) |decl| { + return decl; + } + } + } + return null; } pub fn makeIntType(arena: *Allocator, signedness: std.builtin.Signedness, bits: u16) !Type { diff --git a/src/Sema.zig b/src/Sema.zig index e8d3a72c64..a18c3bb20c 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -17,6 +17,8 @@ inst_map: []*Inst, /// and `src_decl` of `Scope.Block` is the `Decl` of the callee. /// This `Decl` owns the arena memory of this `Sema`. owner_decl: *Decl, +/// How to look up decl names. +namespace: *Scope.Namespace, /// For an inline or comptime function call, this will be the root parent function /// which contains the callsite. Corresponds to `owner_decl`. owner_func: ?*Module.Fn, @@ -169,7 +171,9 @@ pub fn analyzeBody( .cmp_neq => try sema.zirCmp(block, inst, .neq), .coerce_result_ptr => try sema.zirCoerceResultPtr(block, inst), .decl_ref => try sema.zirDeclRef(block, inst), + .decl_ref_named => try sema.zirDeclRefNamed(block, inst), .decl_val => try sema.zirDeclVal(block, inst), + .decl_val_named => try sema.zirDeclValNamed(block, inst), .load => try sema.zirLoad(block, inst), .div => try sema.zirArithmetic(block, inst), .elem_ptr => try sema.zirElemPtr(block, inst), @@ -535,68 +539,10 @@ fn zirStructDecl( const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(zir.Inst.StructDecl, inst_data.payload_index); + const body = sema.code.extra[extra.end..][0..extra.data.body_len]; const fields_len = extra.data.fields_len; - const bit_bags_count = std.math.divCeil(usize, fields_len, 16) catch unreachable; var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - - var fields_map: std.StringArrayHashMapUnmanaged(Module.Struct.Field) = .{}; - try fields_map.ensureCapacity(&new_decl_arena.allocator, fields_len); - - { - var field_index: usize = extra.end + bit_bags_count; - var bit_bag_index: usize = extra.end; - var cur_bit_bag: u32 = undefined; - var field_i: u32 = 0; - while (field_i < fields_len) : (field_i += 1) { - if (field_i % 16 == 0) { - cur_bit_bag = sema.code.extra[bit_bag_index]; - bit_bag_index += 1; - } - const has_align = @truncate(u1, cur_bit_bag) != 0; - cur_bit_bag >>= 1; - const has_default = @truncate(u1, cur_bit_bag) != 0; - cur_bit_bag >>= 1; - - const field_name_zir = sema.code.nullTerminatedString(sema.code.extra[field_index]); - field_index += 1; - const field_type_ref = @intToEnum(zir.Inst.Ref, sema.code.extra[field_index]); - field_index += 1; - - // This string needs to outlive the ZIR code. - const field_name = try new_decl_arena.allocator.dupe(u8, field_name_zir); - // TODO: if we need to report an error here, use a source location - // that points to this type expression rather than the struct. - // But only resolve the source location if we need to emit a compile error. - const field_ty = try sema.resolveType(block, src, field_type_ref); - - const gop = fields_map.getOrPutAssumeCapacity(field_name); - assert(!gop.found_existing); - gop.entry.value = .{ - .ty = field_ty, - .abi_align = Value.initTag(.abi_align_default), - .default_val = Value.initTag(.unreachable_value), - }; - - if (has_align) { - const align_ref = @intToEnum(zir.Inst.Ref, sema.code.extra[field_index]); - field_index += 1; - // TODO: if we need to report an error here, use a source location - // that points to this alignment expression rather than the struct. - // But only resolve the source location if we need to emit a compile error. - gop.entry.value.abi_align = (try sema.resolveInstConst(block, src, align_ref)).val; - } - if (has_default) { - const default_ref = @intToEnum(zir.Inst.Ref, sema.code.extra[field_index]); - field_index += 1; - // TODO: if we need to report an error here, use a source location - // that points to this default value expression rather than the struct. - // But only resolve the source location if we need to emit a compile error. - gop.entry.value.default_val = (try sema.resolveInstConst(block, src, default_ref)).val; - } - } - } const struct_obj = try new_decl_arena.allocator.create(Module.Struct); const struct_ty = try Type.Tag.@"struct".create(&new_decl_arena.allocator, struct_obj); @@ -607,7 +553,7 @@ fn zirStructDecl( }); struct_obj.* = .{ .owner_decl = sema.owner_decl, - .fields = fields_map, + .fields = .{}, .node_offset = inst_data.src_node, .namespace = .{ .parent = sema.owner_decl.namespace, @@ -616,6 +562,128 @@ fn zirStructDecl( .file_scope = block.getFileScope(), }, }; + + { + const ast = std.zig.ast; + const node = sema.owner_decl.relativeToNodeIndex(inst_data.src_node); + const tree: *const ast.Tree = &struct_obj.namespace.file_scope.tree; + const node_tags = tree.nodes.items(.tag); + var buf: [2]ast.Node.Index = undefined; + const members: []const ast.Node.Index = switch (node_tags[node]) { + .container_decl, + .container_decl_trailing, + => tree.containerDecl(node).ast.members, + + .container_decl_two, + .container_decl_two_trailing, + => tree.containerDeclTwo(&buf, node).ast.members, + + .container_decl_arg, + .container_decl_arg_trailing, + => tree.containerDeclArg(node).ast.members, + + .root => tree.rootDecls(), + else => unreachable, + }; + try sema.mod.analyzeNamespace(&struct_obj.namespace, members); + } + + if (fields_len == 0) { + assert(body.len == 0); + return sema.analyzeDeclVal(block, src, new_decl); + } + + try struct_obj.fields.ensureCapacity(&new_decl_arena.allocator, fields_len); + + { + // We create a block for the field type instructions because they + // may need to reference Decls from inside the struct namespace. + // Within the field type, default value, and alignment expressions, the "owner decl" + // should be the struct itself. Thus we need a new Sema. + var struct_sema: Sema = .{ + .mod = sema.mod, + .gpa = sema.mod.gpa, + .arena = &new_decl_arena.allocator, + .code = sema.code, + .inst_map = sema.inst_map, + .owner_decl = new_decl, + .namespace = &struct_obj.namespace, + .owner_func = null, + .func = null, + .param_inst_list = &.{}, + .branch_quota = sema.branch_quota, + .branch_count = sema.branch_count, + }; + + var struct_block: Scope.Block = .{ + .parent = null, + .sema = &struct_sema, + .src_decl = new_decl, + .instructions = .{}, + .inlining = null, + .is_comptime = true, + }; + defer assert(struct_block.instructions.items.len == 0); // should all be comptime instructions + + _ = try struct_sema.analyzeBody(&struct_block, body); + + sema.branch_count = struct_sema.branch_count; + sema.branch_quota = struct_sema.branch_quota; + } + const bit_bags_count = std.math.divCeil(usize, fields_len, 16) catch unreachable; + const body_end = extra.end + body.len; + var field_index: usize = body_end + bit_bags_count; + var bit_bag_index: usize = body_end; + var cur_bit_bag: u32 = undefined; + var field_i: u32 = 0; + while (field_i < fields_len) : (field_i += 1) { + if (field_i % 16 == 0) { + cur_bit_bag = sema.code.extra[bit_bag_index]; + bit_bag_index += 1; + } + const has_align = @truncate(u1, cur_bit_bag) != 0; + cur_bit_bag >>= 1; + const has_default = @truncate(u1, cur_bit_bag) != 0; + cur_bit_bag >>= 1; + + const field_name_zir = sema.code.nullTerminatedString(sema.code.extra[field_index]); + field_index += 1; + const field_type_ref = @intToEnum(zir.Inst.Ref, sema.code.extra[field_index]); + field_index += 1; + + // This string needs to outlive the ZIR code. + const field_name = try new_decl_arena.allocator.dupe(u8, field_name_zir); + // TODO: if we need to report an error here, use a source location + // that points to this type expression rather than the struct. + // But only resolve the source location if we need to emit a compile error. + const field_ty = try sema.resolveType(block, src, field_type_ref); + + const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name); + assert(!gop.found_existing); + gop.entry.value = .{ + .ty = field_ty, + .abi_align = Value.initTag(.abi_align_default), + .default_val = Value.initTag(.unreachable_value), + }; + + if (has_align) { + const align_ref = @intToEnum(zir.Inst.Ref, sema.code.extra[field_index]); + field_index += 1; + // TODO: if we need to report an error here, use a source location + // that points to this alignment expression rather than the struct. + // But only resolve the source location if we need to emit a compile error. + gop.entry.value.abi_align = (try sema.resolveInstConst(block, src, align_ref)).val; + } + if (has_default) { + const default_ref = @intToEnum(zir.Inst.Ref, sema.code.extra[field_index]); + field_index += 1; + // TODO: if we need to report an error here, use a source location + // that points to this default value expression rather than the struct. + // But only resolve the source location if we need to emit a compile error. + gop.entry.value.default_val = (try sema.resolveInstConst(block, src, default_ref)).val; + } + } + return sema.analyzeDeclVal(block, src, new_decl); } @@ -1447,6 +1515,34 @@ fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError return sema.analyzeDeclVal(block, src, decl); } +fn zirDeclRefNamed(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst { + const inst_data = sema.code.instructions.items(.data)[inst].str_tok; + const src = inst_data.src(); + const decl_name = inst_data.get(sema.code); + const decl = try sema.lookupIdentifier(block, src, decl_name); + return sema.analyzeDeclRef(block, src, decl); +} + +fn zirDeclValNamed(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*Inst { + const inst_data = sema.code.instructions.items(.data)[inst].str_tok; + const src = inst_data.src(); + const decl_name = inst_data.get(sema.code); + const decl = try sema.lookupIdentifier(block, src, decl_name); + return sema.analyzeDeclVal(block, src, decl); +} + +fn lookupIdentifier(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, name: []const u8) !*Decl { + const mod = sema.mod; + const decl = mod.lookupIdentifier(&sema.namespace.base, name) orelse { + // TODO insert a "dependency on the non-existence of a decl" here to make this + // compile error go away when the decl is introduced. This data should be in a global + // sparse map since it is only relevant when a compile error occurs. + return mod.fail(&block.base, src, "use of undeclared identifier '{s}'", .{name}); + }; + _ = try mod.declareDeclDependency(sema.owner_decl, decl); + return decl; +} + fn zirCallNone( sema: *Sema, block: *Scope.Block, @@ -1587,6 +1683,7 @@ fn analyzeCall( .code = module_fn.zir, .inst_map = try sema.gpa.alloc(*ir.Inst, module_fn.zir.instructions.len), .owner_decl = sema.owner_decl, + .namespace = sema.owner_decl.namespace, .owner_func = sema.owner_func, .func = module_fn, .param_inst_list = casted_args, @@ -3647,7 +3744,7 @@ fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError "expected struct, enum, union, or opaque, found '{}'", .{container_type}, ); - if (mod.lookupInNamespace(namespace, decl_name)) |decl| { + if (mod.lookupInNamespace(namespace, decl_name, true)) |decl| { if (decl.is_pub or decl.namespace.file_scope == block.base.namespace().file_scope) { return mod.constBool(arena, src, true); } @@ -3673,7 +3770,8 @@ fn zirImport(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError! return mod.fail(&block.base, src, "unable to find '{s}'", .{operand}); }, else => { - // TODO: make sure this gets retried and not cached + // TODO: these errors are file system errors; make sure an update() will + // retry this and not cache the file system error, which may be transient. return mod.fail(&block.base, src, "unable to open '{s}': {s}", .{ operand, @errorName(err) }); }, }; @@ -4069,8 +4167,21 @@ fn zirCmp( const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - try sema.requireRuntimeBlock(block, src); // TODO try to do it at comptime - const bool_type = Type.initTag(.bool); // TODO handle vectors + + if (casted_lhs.value()) |lhs_val| { + if (casted_rhs.value()) |rhs_val| { + if (lhs_val.isUndef() or rhs_val.isUndef()) { + return sema.mod.constInst(sema.arena, src, .{ + .ty = resolved_type, + .val = Value.initTag(.undef), + }); + } + const result = lhs_val.compare(op, rhs_val); + return sema.mod.constBool(sema.arena, src, result); + } + } + + try sema.requireRuntimeBlock(block, src); const tag: Inst.Tag = switch (op) { .lt => .cmp_lt, .lte => .cmp_lte, @@ -4079,6 +4190,7 @@ fn zirCmp( .gt => .cmp_gt, .neq => .cmp_neq, }; + const bool_type = Type.initTag(.bool); // TODO handle vectors return block.addBinOp(src, bool_type, tag, casted_lhs, casted_rhs); } @@ -4525,7 +4637,7 @@ fn requireFunctionBlock(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) !void fn requireRuntimeBlock(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) !void { if (block.is_comptime) { - return sema.mod.fail(&block.base, src, "unable to resolve comptime value", .{}); + return sema.failWithNeededComptime(block, src); } try sema.requireFunctionBlock(block, src); } @@ -4775,7 +4887,7 @@ fn analyzeNamespaceLookup( ) InnerError!?*Inst { const mod = sema.mod; const gpa = sema.gpa; - if (mod.lookupInNamespace(namespace, decl_name)) |decl| { + if (mod.lookupInNamespace(namespace, decl_name, true)) |decl| { if (!decl.is_pub and decl.namespace.file_scope != block.getFileScope()) { const msg = msg: { const msg = try mod.errMsg(&block.base, src, "'{s}' is not marked 'pub'", .{ @@ -5639,6 +5751,14 @@ fn resolvePeerTypes(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, instructi continue; } + if (chosen.ty.zigTypeTag() == .Enum and candidate.ty.zigTypeTag() == .EnumLiteral) { + continue; + } + if (chosen.ty.zigTypeTag() == .EnumLiteral and candidate.ty.zigTypeTag() == .Enum) { + chosen = candidate; + continue; + } + // TODO error notes pointing out each type return sema.mod.fail(&block.base, src, "incompatible types: '{}' and '{}'", .{ chosen.ty, candidate.ty }); } diff --git a/src/codegen/spirv/spec.zig b/src/codegen/spirv/spec.zig index ceb62f1e5d..a014098811 100644 --- a/src/codegen/spirv/spec.zig +++ b/src/codegen/spirv/spec.zig @@ -21,7 +21,7 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS // IN THE MATERIALS. -const Version = @import("builtin").Version; +const Version = @import("std").builtin.Version; pub const version = Version{ .major = 1, .minor = 5, .patch = 4 }; pub const magic_number: u32 = 0x07230203; pub const Opcode = extern enum(u16) { diff --git a/src/link.zig b/src/link.zig index c0f9a50b2b..0b8e3a0b8e 100644 --- a/src/link.zig +++ b/src/link.zig @@ -30,7 +30,7 @@ pub const Options = struct { target: std.Target, output_mode: std.builtin.OutputMode, link_mode: std.builtin.LinkMode, - object_format: std.builtin.ObjectFormat, + object_format: std.Target.ObjectFormat, optimize_mode: std.builtin.Mode, machine_code_model: std.builtin.CodeModel, root_name: []const u8, diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp index 6d219c517d..968caaf19b 100644 --- a/src/stage1/codegen.cpp +++ b/src/stage1/codegen.cpp @@ -8921,10 +8921,10 @@ static const char *bool_to_str(bool b) { static const char *build_mode_to_str(BuildMode build_mode) { switch (build_mode) { - case BuildModeDebug: return "Mode.Debug"; - case BuildModeSafeRelease: return "Mode.ReleaseSafe"; - case BuildModeFastRelease: return "Mode.ReleaseFast"; - case BuildModeSmallRelease: return "Mode.ReleaseSmall"; + case BuildModeDebug: return "Debug"; + case BuildModeSafeRelease: return "ReleaseSafe"; + case BuildModeFastRelease: return "ReleaseFast"; + case BuildModeSmallRelease: return "ReleaseSmall"; } zig_unreachable(); } @@ -8995,7 +8995,9 @@ Buf *codegen_generate_builtin_source(CodeGen *g) { g->have_err_ret_tracing = detect_err_ret_tracing(g); Buf *contents = buf_alloc(); - buf_appendf(contents, "usingnamespace @import(\"std\").builtin;\n\n"); + buf_appendf(contents, + "const std = @import(\"std\");\n" + ); const char *cur_os = nullptr; { @@ -9089,19 +9091,23 @@ Buf *codegen_generate_builtin_source(CodeGen *g) { static_assert(TargetSubsystemEfiRom == 6, ""); static_assert(TargetSubsystemEfiRuntimeDriver == 7, ""); - buf_append_str(contents, "/// Deprecated: use `std.Target.current.cpu.arch`\n"); - buf_append_str(contents, "pub const arch = Target.current.cpu.arch;\n"); - buf_append_str(contents, "/// Deprecated: use `std.Target.current.cpu.arch.endian()`\n"); - buf_append_str(contents, "pub const endian = Target.current.cpu.arch.endian();\n"); - buf_appendf(contents, "pub const output_mode = OutputMode.Obj;\n"); - buf_appendf(contents, "pub const link_mode = LinkMode.%s;\n", ZIG_QUOTE(ZIG_LINK_MODE)); + buf_appendf(contents, "pub const output_mode = std.builtin.OutputMode.Obj;\n"); + buf_appendf(contents, "pub const link_mode = std.builtin.LinkMode.%s;\n", ZIG_QUOTE(ZIG_LINK_MODE)); buf_appendf(contents, "pub const is_test = false;\n"); buf_appendf(contents, "pub const single_threaded = %s;\n", bool_to_str(g->is_single_threaded)); - buf_appendf(contents, "pub const abi = Abi.%s;\n", cur_abi); - buf_appendf(contents, "pub const cpu: Cpu = Target.Cpu.baseline(.%s);\n", cur_arch); - buf_appendf(contents, "pub const os = Target.Os.Tag.defaultVersionRange(.%s);\n", cur_os); - buf_appendf(contents, "pub const object_format = ObjectFormat.%s;\n", cur_obj_fmt); - buf_appendf(contents, "pub const mode = %s;\n", build_mode_to_str(g->build_mode)); + buf_appendf(contents, "pub const abi = std.Target.Abi.%s;\n", cur_abi); + buf_appendf(contents, "pub const cpu = std.Target.Cpu.baseline(.%s);\n", cur_arch); + buf_appendf(contents, "pub const os = std.Target.Os.Tag.defaultVersionRange(.%s);\n", cur_os); + buf_appendf(contents, + "pub const target = std.Target{\n" + " .cpu = cpu,\n" + " .os = os,\n" + " .abi = abi,\n" + "};\n" + ); + + buf_appendf(contents, "pub const object_format = std.Target.ObjectFormat.%s;\n", cur_obj_fmt); + buf_appendf(contents, "pub const mode = std.builtin.Mode.%s;\n", build_mode_to_str(g->build_mode)); buf_appendf(contents, "pub const link_libc = %s;\n", bool_to_str(g->link_libc)); buf_appendf(contents, "pub const link_libcpp = %s;\n", bool_to_str(g->link_libcpp)); buf_appendf(contents, "pub const have_error_return_tracing = %s;\n", bool_to_str(g->have_err_ret_tracing)); @@ -9109,13 +9115,13 @@ Buf *codegen_generate_builtin_source(CodeGen *g) { buf_appendf(contents, "pub const position_independent_code = %s;\n", bool_to_str(g->have_pic)); buf_appendf(contents, "pub const position_independent_executable = %s;\n", bool_to_str(g->have_pie)); buf_appendf(contents, "pub const strip_debug_info = %s;\n", bool_to_str(g->strip_debug_symbols)); - buf_appendf(contents, "pub const code_model = CodeModel.default;\n"); + buf_appendf(contents, "pub const code_model = std.builtin.CodeModel.default;\n"); buf_appendf(contents, "pub const zig_is_stage2 = false;\n"); { TargetSubsystem detected_subsystem = detect_subsystem(g); if (detected_subsystem != TargetSubsystemAuto) { - buf_appendf(contents, "pub const explicit_subsystem = SubSystem.%s;\n", subsystem_to_str(detected_subsystem)); + buf_appendf(contents, "pub const explicit_subsystem = std.builtin.SubSystem.%s;\n", subsystem_to_str(detected_subsystem)); } } diff --git a/src/value.zig b/src/value.zig index 66a23692c1..a9aec47272 100644 --- a/src/value.zig +++ b/src/value.zig @@ -930,7 +930,11 @@ pub const Value = extern union { /// Asserts the value is comparable. pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value) bool { - return order(lhs, rhs).compare(op); + return switch (op) { + .eq => lhs.eql(rhs), + .neq => !lhs.eql(rhs), + else => order(lhs, rhs).compare(op), + }; } /// Asserts the value is comparable. @@ -942,12 +946,19 @@ pub const Value = extern union { const a_tag = a.tag(); const b_tag = b.tag(); if (a_tag == b_tag) { - if (a_tag == .void_value or a_tag == .null_value) { - return true; - } else if (a_tag == .enum_literal) { - const a_name = a.castTag(.enum_literal).?.data; - const b_name = b.castTag(.enum_literal).?.data; - return std.mem.eql(u8, a_name, b_name); + switch (a_tag) { + .void_value, .null_value => return true, + .enum_literal => { + const a_name = a.castTag(.enum_literal).?.data; + const b_name = b.castTag(.enum_literal).?.data; + return std.mem.eql(u8, a_name, b_name); + }, + .enum_field_index => { + const a_field_index = a.castTag(.enum_field_index).?.data; + const b_field_index = b.castTag(.enum_field_index).?.data; + return a_field_index == b_field_index; + }, + else => {}, } } if (a.isType() and b.isType()) { @@ -958,7 +969,7 @@ pub const Value = extern union { const b_type = b.toType(&fib.allocator) catch unreachable; return a_type.eql(b_type); } - return compare(a, .eq, b); + return order(a, b).compare(.eq); } pub fn hash_u32(self: Value) u32 { diff --git a/src/zir.zig b/src/zir.zig index 06cf3bcf07..3d000713f2 100644 --- a/src/zir.zig +++ b/src/zir.zig @@ -294,6 +294,12 @@ pub const Inst = struct { /// Equivalent to a decl_ref followed by load. /// Uses the `pl_node` union field. `payload_index` is into `decls`. decl_val, + /// Same as `decl_ref` except instead of indexing into decls, uses + /// a name to identify the Decl. Uses the `str_tok` union field. + decl_ref_named, + /// Same as `decl_val` except instead of indexing into decls, uses + /// a name to identify the Decl. Uses the `str_tok` union field. + decl_val_named, /// Load the value from a pointer. Assumes `x.*` syntax. /// Uses `un_node` field. AST node is the `x.*` syntax. load, @@ -744,6 +750,8 @@ pub const Inst = struct { .dbg_stmt_node, .decl_ref, .decl_val, + .decl_ref_named, + .decl_val_named, .load, .div, .elem_ptr, @@ -1507,17 +1515,19 @@ pub const Inst = struct { }; /// Trailing: - /// 0. has_bits: u32 // for every 16 fields + /// 0. inst: Index // for every body_len + /// 1. has_bits: u32 // for every 16 fields /// - sets of 2 bits: /// 0b0X: whether corresponding field has an align expression /// 0bX0: whether corresponding field has a default expression - /// 1. fields: { // for every fields_len + /// 2. fields: { // for every fields_len /// field_name: u32, /// field_type: Ref, /// align: Ref, // if corresponding bit is set /// default_value: Ref, // if corresponding bit is set /// } pub const StructDecl = struct { + body_len: u32, fields_len: u32, }; @@ -1792,6 +1802,8 @@ const Writer = struct { .error_value, .enum_literal, + .decl_ref_named, + .decl_val_named, => try self.writeStrTok(stream, inst), .fn_type => try self.writeFnType(stream, inst, false), @@ -1872,7 +1884,16 @@ const Writer = struct { inst: Inst.Index, ) (@TypeOf(stream).Error || error{OutOfMemory})!void { const inst_data = self.code.instructions.items(.data)[inst].ptr_type_simple; - try stream.writeAll("TODO)"); + const str_allowzero = if (inst_data.is_allowzero) "allowzero, " else ""; + const str_const = if (!inst_data.is_mutable) "const, " else ""; + const str_volatile = if (inst_data.is_volatile) "volatile, " else ""; + try self.writeInstRef(stream, inst_data.elem_type); + try stream.print(", {s}{s}{s}{s})", .{ + str_allowzero, + str_const, + str_volatile, + @tagName(inst_data.size), + }); } fn writePtrType( @@ -1991,14 +2012,27 @@ const Writer = struct { fn writeStructDecl(self: *Writer, stream: anytype, inst: Inst.Index) !void { const inst_data = self.code.instructions.items(.data)[inst].pl_node; const extra = self.code.extraData(Inst.StructDecl, inst_data.payload_index); + const body = self.code.extra[extra.end..][0..extra.data.body_len]; const fields_len = extra.data.fields_len; - const bit_bags_count = std.math.divCeil(usize, fields_len, 16) catch unreachable; + + if (fields_len == 0) { + assert(body.len == 0); + try stream.writeAll("{}, {}) "); + try self.writeSrc(stream, inst_data.src()); + return; + } try stream.writeAll("{\n"); self.indent += 2; + try self.writeBody(stream, body); - var field_index: usize = extra.end + bit_bags_count; - var bit_bag_index: usize = extra.end; + try stream.writeByteNTimes(' ', self.indent - 2); + try stream.writeAll("}, {\n"); + + const bit_bags_count = std.math.divCeil(usize, fields_len, 16) catch unreachable; + const body_end = extra.end + body.len; + var field_index: usize = body_end + bit_bags_count; + var bit_bag_index: usize = body_end; var cur_bit_bag: u32 = undefined; var field_i: u32 = 0; while (field_i < fields_len) : (field_i += 1) { |
