aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Compilation.zig104
-rw-r--r--src/Module.zig91
-rw-r--r--src/Sema.zig91
-rw-r--r--src/ThreadPool.zig4
-rw-r--r--src/Zir.zig10
-rw-r--r--src/arch/wasm/CodeGen.zig2
-rw-r--r--src/codegen/c.zig2
-rw-r--r--src/codegen/llvm.zig2
-rw-r--r--src/codegen/spirv.zig4
-rw-r--r--src/config.zig.in1
-rw-r--r--src/link.zig6
-rw-r--r--src/link/Elf.zig5
-rw-r--r--src/main.zig7
-rw-r--r--src/print_zir.zig20
-rw-r--r--src/test.zig43
15 files changed, 265 insertions, 127 deletions
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 7d686b2f40..54d87faa7b 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -338,6 +338,8 @@ pub const AllErrors = struct {
line: u32,
column: u32,
byte_offset: u32,
+ /// Usually one, but incremented for redundant messages.
+ count: u32 = 1,
/// Does not include the trailing newline.
source_line: ?[]const u8,
notes: []Message = &.{},
@@ -345,8 +347,21 @@ pub const AllErrors = struct {
plain: struct {
msg: []const u8,
notes: []Message = &.{},
+ /// Usually one, but incremented for redundant messages.
+ count: u32 = 1,
},
+ pub fn incrementCount(msg: *Message) void {
+ switch (msg.*) {
+ .src => |*src| {
+ src.count += 1;
+ },
+ .plain => |*plain| {
+ plain.count += 1;
+ },
+ }
+ }
+
pub fn renderToStdErr(msg: Message, ttyconf: std.debug.TTY.Config) void {
std.debug.getStderrMutex().lock();
defer std.debug.getStderrMutex().unlock();
@@ -376,7 +391,13 @@ pub const AllErrors = struct {
try stderr.writeAll(kind);
ttyconf.setColor(stderr, .Reset);
ttyconf.setColor(stderr, .Bold);
- try stderr.print(" {s}\n", .{src.msg});
+ if (src.count == 1) {
+ try stderr.print(" {s}\n", .{src.msg});
+ } else {
+ try stderr.print(" {s}", .{src.msg});
+ ttyconf.setColor(stderr, .Dim);
+ try stderr.print(" ({d} times)\n", .{src.count});
+ }
ttyconf.setColor(stderr, .Reset);
if (ttyconf != .no_color) {
if (src.source_line) |line| {
@@ -400,7 +421,13 @@ pub const AllErrors = struct {
try stderr.writeByteNTimes(' ', indent);
try stderr.writeAll(kind);
ttyconf.setColor(stderr, .Reset);
- try stderr.print(" {s}\n", .{plain.msg});
+ if (plain.count == 1) {
+ try stderr.print(" {s}\n", .{plain.msg});
+ } else {
+ try stderr.print(" {s}", .{plain.msg});
+ ttyconf.setColor(stderr, .Dim);
+ try stderr.print(" ({d} times)\n", .{plain.count});
+ }
ttyconf.setColor(stderr, .Reset);
for (plain.notes) |note| {
try note.renderToStdErrInner(ttyconf, stderr_file, "error:", .Red, indent + 4);
@@ -408,6 +435,50 @@ pub const AllErrors = struct {
},
}
}
+
+ pub const HashContext = struct {
+ pub fn hash(ctx: HashContext, key: *Message) u64 {
+ _ = ctx;
+ var hasher = std.hash.Wyhash.init(0);
+
+ switch (key.*) {
+ .src => |src| {
+ hasher.update(src.msg);
+ hasher.update(src.src_path);
+ std.hash.autoHash(&hasher, src.line);
+ std.hash.autoHash(&hasher, src.column);
+ std.hash.autoHash(&hasher, src.byte_offset);
+ },
+ .plain => |plain| {
+ hasher.update(plain.msg);
+ },
+ }
+
+ return hasher.final();
+ }
+
+ pub fn eql(ctx: HashContext, a: *Message, b: *Message) bool {
+ _ = ctx;
+ switch (a.*) {
+ .src => |a_src| switch (b.*) {
+ .src => |b_src| {
+ return mem.eql(u8, a_src.msg, b_src.msg) and
+ mem.eql(u8, a_src.src_path, b_src.src_path) and
+ a_src.line == b_src.line and
+ a_src.column == b_src.column and
+ a_src.byte_offset == b_src.byte_offset;
+ },
+ .plain => return false,
+ },
+ .plain => |a_plain| switch (b.*) {
+ .src => return false,
+ .plain => |b_plain| {
+ return mem.eql(u8, a_plain.msg, b_plain.msg);
+ },
+ },
+ }
+ }
+ };
};
pub fn deinit(self: *AllErrors, gpa: Allocator) void {
@@ -421,13 +492,25 @@ pub const AllErrors = struct {
module_err_msg: Module.ErrorMsg,
) !void {
const allocator = arena.allocator();
- const notes = try allocator.alloc(Message, module_err_msg.notes.len);
- for (notes) |*note, i| {
- const module_note = module_err_msg.notes[i];
+
+ const notes_buf = try allocator.alloc(Message, module_err_msg.notes.len);
+ var note_i: usize = 0;
+
+ // De-duplicate error notes. The main use case in mind for this is
+ // too many "note: called from here" notes when eval branch quota is reached.
+ var seen_notes = std.HashMap(
+ *Message,
+ void,
+ Message.HashContext,
+ std.hash_map.default_max_load_percentage,
+ ).init(allocator);
+
+ for (module_err_msg.notes) |module_note| {
const source = try module_note.src_loc.file_scope.getSource(module.gpa);
const byte_offset = try module_note.src_loc.byteOffset(module.gpa);
const loc = std.zig.findLineColumn(source.bytes, byte_offset);
const file_path = try module_note.src_loc.file_scope.fullPath(allocator);
+ const note = &notes_buf[note_i];
note.* = .{
.src = .{
.src_path = file_path,
@@ -438,6 +521,12 @@ pub const AllErrors = struct {
.source_line = try allocator.dupe(u8, loc.source_line),
},
};
+ const gop = try seen_notes.getOrPut(note);
+ if (gop.found_existing) {
+ gop.key_ptr.*.incrementCount();
+ } else {
+ note_i += 1;
+ }
}
if (module_err_msg.src_loc.lazy == .entire_file) {
try errors.append(.{
@@ -458,7 +547,7 @@ pub const AllErrors = struct {
.byte_offset = byte_offset,
.line = @intCast(u32, loc.line),
.column = @intCast(u32, loc.column),
- .notes = notes,
+ .notes = notes_buf[0..note_i],
.source_line = try allocator.dupe(u8, loc.source_line),
},
});
@@ -762,7 +851,6 @@ pub const InitOptions = struct {
linker_z_notext: bool = false,
linker_z_defs: bool = false,
linker_z_origin: bool = false,
- linker_z_noexecstack: bool = false,
linker_z_now: bool = true,
linker_z_relro: bool = true,
linker_z_nocopyreloc: bool = false,
@@ -1602,7 +1690,6 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
.z_defs = options.linker_z_defs,
.z_origin = options.linker_z_origin,
.z_nocopyreloc = options.linker_z_nocopyreloc,
- .z_noexecstack = options.linker_z_noexecstack,
.z_now = options.linker_z_now,
.z_relro = options.linker_z_relro,
.tsaware = options.linker_tsaware,
@@ -2350,7 +2437,6 @@ fn addNonIncrementalStuffToCacheManifest(comp: *Compilation, man: *Cache.Manifes
man.hash.add(comp.bin_file.options.z_defs);
man.hash.add(comp.bin_file.options.z_origin);
man.hash.add(comp.bin_file.options.z_nocopyreloc);
- man.hash.add(comp.bin_file.options.z_noexecstack);
man.hash.add(comp.bin_file.options.z_now);
man.hash.add(comp.bin_file.options.z_relro);
man.hash.add(comp.bin_file.options.hash_style);
diff --git a/src/Module.zig b/src/Module.zig
index 12d311046a..f03ba77a39 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -659,7 +659,7 @@ pub const Decl = struct {
}
pub fn nodeSrcLoc(decl: Decl, node_index: Ast.Node.Index) LazySrcLoc {
- return .{ .node_offset = decl.nodeIndexToRelative(node_index) };
+ return LazySrcLoc.nodeOffset(decl.nodeIndexToRelative(node_index));
}
pub fn srcLoc(decl: Decl) SrcLoc {
@@ -670,7 +670,7 @@ pub const Decl = struct {
return .{
.file_scope = decl.getFileScope(),
.parent_decl_node = decl.src_node,
- .lazy = .{ .node_offset = node_offset },
+ .lazy = LazySrcLoc.nodeOffset(node_offset),
};
}
@@ -861,7 +861,7 @@ pub const ErrorSet = struct {
return .{
.file_scope = owner_decl.getFileScope(),
.parent_decl_node = owner_decl.src_node,
- .lazy = .{ .node_offset = self.node_offset },
+ .lazy = LazySrcLoc.nodeOffset(self.node_offset),
};
}
@@ -947,7 +947,7 @@ pub const Struct = struct {
return .{
.file_scope = owner_decl.getFileScope(),
.parent_decl_node = owner_decl.src_node,
- .lazy = .{ .node_offset = s.node_offset },
+ .lazy = LazySrcLoc.nodeOffset(s.node_offset),
};
}
@@ -1066,7 +1066,7 @@ pub const EnumSimple = struct {
return .{
.file_scope = owner_decl.getFileScope(),
.parent_decl_node = owner_decl.src_node,
- .lazy = .{ .node_offset = self.node_offset },
+ .lazy = LazySrcLoc.nodeOffset(self.node_offset),
};
}
};
@@ -1097,7 +1097,7 @@ pub const EnumNumbered = struct {
return .{
.file_scope = owner_decl.getFileScope(),
.parent_decl_node = owner_decl.src_node,
- .lazy = .{ .node_offset = self.node_offset },
+ .lazy = LazySrcLoc.nodeOffset(self.node_offset),
};
}
};
@@ -1131,7 +1131,7 @@ pub const EnumFull = struct {
return .{
.file_scope = owner_decl.getFileScope(),
.parent_decl_node = owner_decl.src_node,
- .lazy = .{ .node_offset = self.node_offset },
+ .lazy = LazySrcLoc.nodeOffset(self.node_offset),
};
}
};
@@ -1197,7 +1197,7 @@ pub const Union = struct {
return .{
.file_scope = owner_decl.getFileScope(),
.parent_decl_node = owner_decl.src_node,
- .lazy = .{ .node_offset = self.node_offset },
+ .lazy = LazySrcLoc.nodeOffset(self.node_offset),
};
}
@@ -1404,7 +1404,7 @@ pub const Opaque = struct {
return .{
.file_scope = owner_decl.getFileScope(),
.parent_decl_node = owner_decl.src_node,
- .lazy = .{ .node_offset = self.node_offset },
+ .lazy = LazySrcLoc.nodeOffset(self.node_offset),
};
}
@@ -2105,7 +2105,17 @@ pub const SrcLoc = struct {
const token_starts = tree.tokens.items(.start);
return token_starts[tok_index];
},
- .node_offset, .node_offset_bin_op => |node_off| {
+ .node_offset => |traced_off| {
+ const node_off = traced_off.x;
+ const tree = try src_loc.file_scope.getTree(gpa);
+ const node = src_loc.declRelativeToNodeIndex(node_off);
+ assert(src_loc.file_scope.tree_loaded);
+ const main_tokens = tree.nodes.items(.main_token);
+ const tok_index = main_tokens[node];
+ const token_starts = tree.tokens.items(.start);
+ return token_starts[tok_index];
+ },
+ .node_offset_bin_op => |node_off| {
const tree = try src_loc.file_scope.getTree(gpa);
const node = src_loc.declRelativeToNodeIndex(node_off);
assert(src_loc.file_scope.tree_loaded);
@@ -2515,6 +2525,15 @@ pub const SrcLoc = struct {
}
};
+/// This wraps a simple integer in debug builds so that later on we can find out
+/// where in semantic analysis the value got set.
+const TracedOffset = struct {
+ x: i32,
+ trace: std.debug.Trace = .{},
+
+ const want_tracing = build_options.value_tracing;
+};
+
/// Resolving a source location into a byte offset may require doing work
/// that we would rather not do unless the error actually occurs.
/// Therefore we need a data structure that contains the information necessary
@@ -2555,7 +2574,7 @@ pub const LazySrcLoc = union(enum) {
/// The source location points to an AST node, which is this value offset
/// from its containing Decl node AST index.
/// The Decl is determined contextually.
- node_offset: i32,
+ node_offset: TracedOffset,
/// The source location points to two tokens left of the first token of an AST node,
/// which is this value offset from its containing Decl node AST index.
/// The Decl is determined contextually.
@@ -2705,6 +2724,18 @@ pub const LazySrcLoc = union(enum) {
/// The Decl is determined contextually.
node_offset_array_type_elem: i32,
+ pub const nodeOffset = if (TracedOffset.want_tracing) nodeOffsetDebug else nodeOffsetRelease;
+
+ noinline fn nodeOffsetDebug(node_offset: i32) LazySrcLoc {
+ var result: LazySrcLoc = .{ .node_offset = .{ .x = node_offset } };
+ result.node_offset.trace.addAddr(@returnAddress(), "init");
+ return result;
+ }
+
+ fn nodeOffsetRelease(node_offset: i32) LazySrcLoc {
+ return .{ .node_offset = .{ .x = node_offset } };
+ }
+
/// Upgrade to a `SrcLoc` based on the `Decl` provided.
pub fn toSrcLoc(lazy: LazySrcLoc, decl: *Decl) SrcLoc {
return switch (lazy) {
@@ -4014,7 +4045,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
const body = zir.extra[extra.end..][0..extra.data.body_len];
const result_ref = (try sema.analyzeBodyBreak(&block_scope, body)).?.operand;
try wip_captures.finalize();
- const src: LazySrcLoc = .{ .node_offset = 0 };
+ const src = LazySrcLoc.nodeOffset(0);
const decl_tv = try sema.resolveInstValue(&block_scope, src, result_ref);
const decl_align: u32 = blk: {
const align_ref = decl.zirAlignRef();
@@ -5044,7 +5075,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
// Crucially, this happens *after* we set the function state to success above,
// so that dependencies on the function body will now be satisfied rather than
// result in circular dependency errors.
- const src: LazySrcLoc = .{ .node_offset = 0 };
+ const src = LazySrcLoc.nodeOffset(0);
sema.resolveFnTypes(&inner_block, src, fn_ty_info) catch |err| switch (err) {
error.NeededSourceLocation => unreachable,
error.GenericPoison => unreachable,
@@ -5338,7 +5369,7 @@ pub const SwitchProngSrc = union(enum) {
log.warn("unable to load {s}: {s}", .{
decl.getFileScope().sub_file_path, @errorName(err),
});
- return LazySrcLoc{ .node_offset = 0 };
+ return LazySrcLoc.nodeOffset(0);
};
const switch_node = decl.relativeToNodeIndex(switch_node_offset);
const main_tokens = tree.nodes.items(.main_token);
@@ -5367,17 +5398,17 @@ pub const SwitchProngSrc = union(enum) {
node_tags[case.ast.values[0]] == .switch_range;
switch (prong_src) {
- .scalar => |i| if (!is_multi and i == scalar_i) return LazySrcLoc{
- .node_offset = decl.nodeIndexToRelative(case.ast.values[0]),
- },
+ .scalar => |i| if (!is_multi and i == scalar_i) return LazySrcLoc.nodeOffset(
+ decl.nodeIndexToRelative(case.ast.values[0]),
+ ),
.multi => |s| if (is_multi and s.prong == multi_i) {
var item_i: u32 = 0;
for (case.ast.values) |item_node| {
if (node_tags[item_node] == .switch_range) continue;
- if (item_i == s.item) return LazySrcLoc{
- .node_offset = decl.nodeIndexToRelative(item_node),
- };
+ if (item_i == s.item) return LazySrcLoc.nodeOffset(
+ decl.nodeIndexToRelative(item_node),
+ );
item_i += 1;
} else unreachable;
},
@@ -5387,15 +5418,15 @@ pub const SwitchProngSrc = union(enum) {
if (node_tags[range] != .switch_range) continue;
if (range_i == s.item) switch (range_expand) {
- .none => return LazySrcLoc{
- .node_offset = decl.nodeIndexToRelative(range),
- },
- .first => return LazySrcLoc{
- .node_offset = decl.nodeIndexToRelative(node_datas[range].lhs),
- },
- .last => return LazySrcLoc{
- .node_offset = decl.nodeIndexToRelative(node_datas[range].rhs),
- },
+ .none => return LazySrcLoc.nodeOffset(
+ decl.nodeIndexToRelative(range),
+ ),
+ .first => return LazySrcLoc.nodeOffset(
+ decl.nodeIndexToRelative(node_datas[range].lhs),
+ ),
+ .last => return LazySrcLoc.nodeOffset(
+ decl.nodeIndexToRelative(node_datas[range].rhs),
+ ),
};
range_i += 1;
} else unreachable;
@@ -5450,7 +5481,7 @@ pub const PeerTypeCandidateSrc = union(enum) {
log.warn("unable to load {s}: {s}", .{
decl.getFileScope().sub_file_path, @errorName(err),
});
- return LazySrcLoc{ .node_offset = 0 };
+ return LazySrcLoc.nodeOffset(0);
};
const node = decl.relativeToNodeIndex(node_offset);
const node_datas = tree.nodes.items(.data);
diff --git a/src/Sema.zig b/src/Sema.zig
index 738098dd83..048a702e7b 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -1154,7 +1154,7 @@ fn analyzeBodyInner(
.repeat => {
if (block.is_comptime) {
// Send comptime control flow back to the beginning of this block.
- const src: LazySrcLoc = .{ .node_offset = datas[inst].node };
+ const src = LazySrcLoc.nodeOffset(datas[inst].node);
try sema.emitBackwardBranch(block, src);
if (wip_captures.scope.captures.count() != orig_captures) {
try wip_captures.reset(parent_capture_scope);
@@ -1165,14 +1165,14 @@ fn analyzeBodyInner(
continue;
} else {
const src_node = sema.code.instructions.items(.data)[inst].node;
- const src: LazySrcLoc = .{ .node_offset = src_node };
+ const src = LazySrcLoc.nodeOffset(src_node);
try sema.requireRuntimeBlock(block, src);
break always_noreturn;
}
},
.repeat_inline => {
// Send comptime control flow back to the beginning of this block.
- const src: LazySrcLoc = .{ .node_offset = datas[inst].node };
+ const src = LazySrcLoc.nodeOffset(datas[inst].node);
try sema.emitBackwardBranch(block, src);
if (wip_captures.scope.captures.count() != orig_captures) {
try wip_captures.reset(parent_capture_scope);
@@ -2087,7 +2087,7 @@ fn zirStructDecl(
const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
const src: LazySrcLoc = if (small.has_src_node) blk: {
const node_offset = @bitCast(i32, sema.code.extra[extended.operand]);
- break :blk .{ .node_offset = node_offset };
+ break :blk LazySrcLoc.nodeOffset(node_offset);
} else sema.src;
var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
@@ -2108,7 +2108,7 @@ fn zirStructDecl(
struct_obj.* = .{
.owner_decl = new_decl_index,
.fields = .{},
- .node_offset = src.node_offset,
+ .node_offset = src.node_offset.x,
.zir_index = inst,
.layout = small.layout,
.status = .none,
@@ -2210,7 +2210,7 @@ fn zirEnumDecl(
const src: LazySrcLoc = if (small.has_src_node) blk: {
const node_offset = @bitCast(i32, sema.code.extra[extra_index]);
extra_index += 1;
- break :blk .{ .node_offset = node_offset };
+ break :blk LazySrcLoc.nodeOffset(node_offset);
} else sema.src;
const tag_type_ref = if (small.has_tag_type) blk: {
@@ -2263,7 +2263,7 @@ fn zirEnumDecl(
.tag_ty_inferred = true,
.fields = .{},
.values = .{},
- .node_offset = src.node_offset,
+ .node_offset = src.node_offset.x,
.namespace = .{
.parent = block.namespace,
.ty = enum_ty,
@@ -2385,8 +2385,8 @@ fn zirEnumDecl(
const gop = enum_obj.fields.getOrPutAssumeCapacity(field_name);
if (gop.found_existing) {
const tree = try sema.getAstTree(block);
- const field_src = enumFieldSrcLoc(sema.mod.declPtr(block.src_decl), tree.*, src.node_offset, field_i);
- const other_tag_src = enumFieldSrcLoc(sema.mod.declPtr(block.src_decl), tree.*, src.node_offset, gop.index);
+ const field_src = enumFieldSrcLoc(sema.mod.declPtr(block.src_decl), tree.*, src.node_offset.x, field_i);
+ const other_tag_src = enumFieldSrcLoc(sema.mod.declPtr(block.src_decl), tree.*, src.node_offset.x, gop.index);
const msg = msg: {
const msg = try sema.errMsg(block, field_src, "duplicate enum tag", .{});
errdefer msg.destroy(gpa);
@@ -2442,7 +2442,7 @@ fn zirUnionDecl(
const src: LazySrcLoc = if (small.has_src_node) blk: {
const node_offset = @bitCast(i32, sema.code.extra[extra_index]);
extra_index += 1;
- break :blk .{ .node_offset = node_offset };
+ break :blk LazySrcLoc.nodeOffset(node_offset);
} else sema.src;
extra_index += @boolToInt(small.has_tag_type);
@@ -2480,7 +2480,7 @@ fn zirUnionDecl(
.owner_decl = new_decl_index,
.tag_ty = Type.initTag(.@"null"),
.fields = .{},
- .node_offset = src.node_offset,
+ .node_offset = src.node_offset.x,
.zir_index = inst,
.layout = small.layout,
.status = .none,
@@ -2516,7 +2516,7 @@ fn zirOpaqueDecl(
const src: LazySrcLoc = if (small.has_src_node) blk: {
const node_offset = @bitCast(i32, sema.code.extra[extra_index]);
extra_index += 1;
- break :blk .{ .node_offset = node_offset };
+ break :blk LazySrcLoc.nodeOffset(node_offset);
} else sema.src;
const decls_len = if (small.has_decls_len) blk: {
@@ -2547,7 +2547,7 @@ fn zirOpaqueDecl(
opaque_obj.* = .{
.owner_decl = new_decl_index,
- .node_offset = src.node_offset,
+ .node_offset = src.node_offset.x,
.namespace = .{
.parent = block.namespace,
.ty = opaque_ty,
@@ -2623,7 +2623,7 @@ fn zirRetPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].node;
- const src: LazySrcLoc = .{ .node_offset = inst_data };
+ const src = LazySrcLoc.nodeOffset(inst_data);
try sema.requireFunctionBlock(block, src);
if (block.is_comptime or try sema.typeRequiresComptime(block, src, sema.fn_ret_ty)) {
@@ -2661,7 +2661,7 @@ fn zirRetType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].node;
- const src: LazySrcLoc = .{ .node_offset = inst_data };
+ const src = LazySrcLoc.nodeOffset(inst_data);
try sema.requireFunctionBlock(block, src);
return sema.addType(sema.fn_ret_ty);
}
@@ -2750,7 +2750,7 @@ fn zirAllocExtended(
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand);
- const src: LazySrcLoc = .{ .node_offset = extra.data.src_node };
+ const src = LazySrcLoc.nodeOffset(extra.data.src_node);
const ty_src = src; // TODO better source location
const align_src = src; // TODO better source location
const small = @bitCast(Zir.Inst.AllocExtended.Small, extended.small);
@@ -2903,7 +2903,7 @@ fn zirAllocInferredComptime(
inferred_alloc_ty: Type,
) CompileError!Air.Inst.Ref {
const src_node = sema.code.instructions.items(.data)[inst].node;
- const src: LazySrcLoc = .{ .node_offset = src_node };
+ const src = LazySrcLoc.nodeOffset(src_node);
sema.src = src;
return sema.addConstant(
inferred_alloc_ty,
@@ -2967,7 +2967,7 @@ fn zirAllocInferred(
defer tracy.end();
const src_node = sema.code.instructions.items(.data)[inst].node;
- const src: LazySrcLoc = .{ .node_offset = src_node };
+ const src = LazySrcLoc.nodeOffset(src_node);
sema.src = src;
if (block.is_comptime) {
@@ -3718,7 +3718,7 @@ fn zirValidateArrayInit(
outer: for (instrs) |elem_ptr, i| {
const elem_ptr_data = sema.code.instructions.items(.data)[elem_ptr].pl_node;
- const elem_src: LazySrcLoc = .{ .node_offset = elem_ptr_data.src_node };
+ const elem_src = LazySrcLoc.nodeOffset(elem_ptr_data.src_node);
// Determine whether the value stored to this pointer is comptime-known.
@@ -4203,7 +4203,7 @@ fn zirCompileLog(
const extra = sema.code.extraData(Zir.Inst.NodeMultiOp, extended.operand);
const src_node = extra.data.src_node;
- const src: LazySrcLoc = .{ .node_offset = src_node };
+ const src = LazySrcLoc.nodeOffset(src_node);
const args = sema.code.refSlice(extra.end, extended.small);
for (args) |arg_ref, i| {
@@ -4707,7 +4707,7 @@ pub fn analyzeExport(
fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
- const src: LazySrcLoc = .{ .node_offset = extra.node };
+ const src = LazySrcLoc.nodeOffset(extra.node);
const alignment = try sema.resolveAlign(block, operand_src, extra.operand);
if (alignment > 256) {
return sema.fail(block, src, "attempt to @setAlignStack({d}); maximum is 256", .{
@@ -5312,7 +5312,7 @@ fn analyzeCall(
delete_memoized_call_key = true;
}
- try sema.emitBackwardBranch(&child_block, call_src);
+ try sema.emitBackwardBranch(block, call_src);
// Whether this call should be memoized, set to false if the call can mutate
// comptime state.
@@ -6988,7 +6988,7 @@ fn funcCommon(
const param_types = try sema.arena.alloc(Type, block.params.items.len);
const comptime_params = try sema.arena.alloc(bool, block.params.items.len);
for (block.params.items) |param, i| {
- const param_src: LazySrcLoc = .{ .node_offset = src_node_offset }; // TODO better src
+ const param_src = LazySrcLoc.nodeOffset(src_node_offset); // TODO better src
param_types[i] = param.ty;
comptime_params[i] = param.is_comptime or
try sema.typeRequiresComptime(block, param_src, param.ty);
@@ -7378,7 +7378,7 @@ fn zirFieldCallBindNamed(sema: *Sema, block: *Block, extended: Zir.Inst.Extended
defer tracy.end();
const extra = sema.code.extraData(Zir.Inst.FieldNamedNode, extended.operand).data;
- const src: LazySrcLoc = .{ .node_offset = extra.node };
+ const src = LazySrcLoc.nodeOffset(extra.node);
const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
const object_ptr = try sema.resolveInst(extra.lhs);
const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name);
@@ -10088,7 +10088,7 @@ fn zirOverflowArithmetic(
defer tracy.end();
const extra = sema.code.extraData(Zir.Inst.OverflowArithmetic, extended.operand).data;
- const src: LazySrcLoc = .{ .node_offset = extra.node };
+ const src = LazySrcLoc.nodeOffset(extra.node);
const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
@@ -11309,7 +11309,7 @@ fn zirAsm(
defer tracy.end();
const extra = sema.code.extraData(Zir.Inst.Asm, extended.operand);
- const src: LazySrcLoc = .{ .node_offset = extra.data.src_node };
+ const src = LazySrcLoc.nodeOffset(extra.data.src_node);
const ret_ty_src: LazySrcLoc = .{ .node_offset_asm_ret_ty = extra.data.src_node };
const outputs_len = @truncate(u5, extended.small);
const inputs_len = @truncate(u5, extended.small >> 5);
@@ -11761,7 +11761,7 @@ fn zirThis(
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const this_decl_index = block.namespace.getDeclIndex();
- const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) };
+ const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand));
return sema.analyzeDeclVal(block, src, this_decl_index);
}
@@ -11815,7 +11815,7 @@ fn zirRetAddr(
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
- const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) };
+ const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand));
try sema.requireRuntimeBlock(block, src);
return try block.addNoOp(.ret_addr);
}
@@ -11825,7 +11825,7 @@ fn zirFrameAddress(
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
- const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) };
+ const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand));
try sema.requireRuntimeBlock(block, src);
return try block.addNoOp(.frame_addr);
}
@@ -11838,7 +11838,7 @@ fn zirBuiltinSrc(
const tracy = trace(@src());
defer tracy.end();
- const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) };
+ const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand));
const extra = sema.code.extraData(Zir.Inst.LineColumn, extended.operand).data;
const func = sema.func orelse return sema.fail(block, src, "@src outside function", .{});
const fn_owner_decl = sema.mod.declPtr(func.owner_decl);
@@ -12842,7 +12842,7 @@ fn zirTypeofPeer(
defer tracy.end();
const extra = sema.code.extraData(Zir.Inst.TypeOfPeer, extended.operand);
- const src: LazySrcLoc = .{ .node_offset = extra.data.src_node };
+ const src = LazySrcLoc.nodeOffset(extra.data.src_node);
const body = sema.code.extra[extra.data.body_index..][0..extra.data.body_len];
var child_block: Block = .{
@@ -14157,7 +14157,7 @@ fn zirErrorReturnTrace(
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
- const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) };
+ const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand));
return sema.getErrorReturnTrace(block, src);
}
@@ -14185,7 +14185,7 @@ fn zirFrame(
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
- const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) };
+ const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand));
return sema.fail(block, src, "TODO: Sema.zirFrame", .{});
}
@@ -14629,7 +14629,7 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
.tag_ty_inferred = false,
.fields = .{},
.values = .{},
- .node_offset = src.node_offset,
+ .node_offset = src.node_offset.x,
.namespace = .{
.parent = block.namespace,
.ty = enum_ty,
@@ -14711,7 +14711,7 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
opaque_obj.* = .{
.owner_decl = new_decl_index,
- .node_offset = src.node_offset,
+ .node_offset = src.node_offset.x,
.namespace = .{
.parent = block.namespace,
.ty = opaque_ty,
@@ -14763,7 +14763,7 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
.owner_decl = new_decl_index,
.tag_ty = Type.initTag(.@"null"),
.fields = .{},
- .node_offset = src.node_offset,
+ .node_offset = src.node_offset.x,
.zir_index = inst,
.layout = layout_val.toEnum(std.builtin.Type.ContainerLayout),
.status = .have_field_types,
@@ -14930,7 +14930,7 @@ fn reifyStruct(
struct_obj.* = .{
.owner_decl = new_decl_index,
.fields = .{},
- .node_offset = src.node_offset,
+ .node_offset = src.node_offset.x,
.zir_index = inst,
.layout = layout_val.toEnum(std.builtin.Type.ContainerLayout),
.status = .have_field_types,
@@ -15130,7 +15130,7 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
- const src: LazySrcLoc = .{ .node_offset = extra.node };
+ const src = LazySrcLoc.nodeOffset(extra.node);
const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs);
@@ -17114,7 +17114,7 @@ fn zirAwaitNosuspend(
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
- const src: LazySrcLoc = .{ .node_offset = extra.node };
+ const src = LazySrcLoc.nodeOffset(extra.node);
return sema.fail(block, src, "TODO: Sema.zirAwaitNosuspend", .{});
}
@@ -17443,7 +17443,7 @@ fn zirWasmMemorySize(
) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const index_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
- const builtin_src: LazySrcLoc = .{ .node_offset = extra.node };
+ const builtin_src = LazySrcLoc.nodeOffset(extra.node);
const target = sema.mod.getTarget();
if (!target.isWasm()) {
return sema.fail(block, builtin_src, "builtin @wasmMemorySize is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)});
@@ -17466,7 +17466,7 @@ fn zirWasmMemoryGrow(
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
- const builtin_src: LazySrcLoc = .{ .node_offset = extra.node };
+ const builtin_src = LazySrcLoc.nodeOffset(extra.node);
const index_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const delta_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
const target = sema.mod.getTarget();
@@ -17534,7 +17534,7 @@ fn zirBuiltinExtern(
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
- const src: LazySrcLoc = .{ .node_offset = extra.node };
+ const src = LazySrcLoc.nodeOffset(extra.node);
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
@@ -18061,7 +18061,6 @@ fn safetyPanic(
fn emitBackwardBranch(sema: *Sema, block: *Block, src: LazySrcLoc) !void {
sema.branch_count += 1;
if (sema.branch_count > sema.branch_quota) {
- // TODO show the "called from here" stack
return sema.fail(block, src, "evaluation exceeded {d} backwards branches", .{sema.branch_quota});
}
}
@@ -23586,7 +23585,7 @@ fn semaStructFields(
const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
var extra_index: usize = extended.operand;
- const src: LazySrcLoc = .{ .node_offset = struct_obj.node_offset };
+ const src = LazySrcLoc.nodeOffset(struct_obj.node_offset);
extra_index += @boolToInt(small.has_src_node);
const body_len = if (small.has_body_len) blk: {
@@ -23773,7 +23772,7 @@ fn semaUnionFields(block: *Block, mod: *Module, union_obj: *Module.Union) Compil
const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small);
var extra_index: usize = extended.operand;
- const src: LazySrcLoc = .{ .node_offset = union_obj.node_offset };
+ const src = LazySrcLoc.nodeOffset(union_obj.node_offset);
extra_index += @boolToInt(small.has_src_node);
const tag_type_ref: Zir.Inst.Ref = if (small.has_tag_type) blk: {
@@ -24459,7 +24458,7 @@ fn enumFieldSrcLoc(
.container_field,
=> {
if (it_index == field_index) {
- return .{ .node_offset = decl.nodeIndexToRelative(member_node) };
+ return LazySrcLoc.nodeOffset(decl.nodeIndexToRelative(member_node));
}
it_index += 1;
},
diff --git a/src/ThreadPool.zig b/src/ThreadPool.zig
index 7d1c8420af..55e40ea287 100644
--- a/src/ThreadPool.zig
+++ b/src/ThreadPool.zig
@@ -49,6 +49,10 @@ pub fn deinit(self: *ThreadPool) void {
}
fn join(self: *ThreadPool, spawned: usize) void {
+ if (builtin.single_threaded) {
+ return;
+ }
+
{
self.mutex.lock();
defer self.mutex.unlock();
diff --git a/src/Zir.zig b/src/Zir.zig
index 370f996fd5..1ca31755f7 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -2427,7 +2427,7 @@ pub const Inst = struct {
operand: Ref,
pub fn src(self: @This()) LazySrcLoc {
- return .{ .node_offset = self.src_node };
+ return LazySrcLoc.nodeOffset(self.src_node);
}
},
/// Used for unary operators, with a token source location.
@@ -2450,7 +2450,7 @@ pub const Inst = struct {
payload_index: u32,
pub fn src(self: @This()) LazySrcLoc {
- return .{ .node_offset = self.src_node };
+ return LazySrcLoc.nodeOffset(self.src_node);
}
},
pl_tok: struct {
@@ -2526,7 +2526,7 @@ pub const Inst = struct {
bit_count: u16,
pub fn src(self: @This()) LazySrcLoc {
- return .{ .node_offset = self.src_node };
+ return LazySrcLoc.nodeOffset(self.src_node);
}
},
bool_br: struct {
@@ -2545,7 +2545,7 @@ pub const Inst = struct {
force_comptime: bool,
pub fn src(self: @This()) LazySrcLoc {
- return .{ .node_offset = self.src_node };
+ return LazySrcLoc.nodeOffset(self.src_node);
}
},
@"break": struct {
@@ -2566,7 +2566,7 @@ pub const Inst = struct {
inst: Index,
pub fn src(self: @This()) LazySrcLoc {
- return .{ .node_offset = self.src_node };
+ return LazySrcLoc.nodeOffset(self.src_node);
}
},
str_op: struct {
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index da4e84b587..33cf5422f2 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -622,7 +622,7 @@ pub fn deinit(self: *Self) void {
/// Sets `err_msg` on `CodeGen` and returns `error.CodegenFail` which is caught in link/Wasm.zig
fn fail(self: *Self, comptime fmt: []const u8, args: anytype) InnerError {
- const src: LazySrcLoc = .{ .node_offset = 0 };
+ const src = LazySrcLoc.nodeOffset(0);
const src_loc = src.toSrcLoc(self.decl);
self.err_msg = try Module.ErrorMsg.create(self.gpa, src_loc, fmt, args);
return error.CodegenFail;
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 4c2239b306..45191f3107 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -363,7 +363,7 @@ pub const DeclGen = struct {
fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } {
@setCold(true);
- const src: LazySrcLoc = .{ .node_offset = 0 };
+ const src = LazySrcLoc.nodeOffset(0);
const src_loc = src.toSrcLoc(dg.decl);
dg.error_msg = try Module.ErrorMsg.create(dg.module.gpa, src_loc, format, args);
return error.AnalysisFail;
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index bda81711fb..19a6917be4 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -2163,7 +2163,7 @@ pub const DeclGen = struct {
fn todo(self: *DeclGen, comptime format: []const u8, args: anytype) Error {
@setCold(true);
assert(self.err_msg == null);
- const src_loc = @as(LazySrcLoc, .{ .node_offset = 0 }).toSrcLoc(self.decl);
+ const src_loc = LazySrcLoc.nodeOffset(0).toSrcLoc(self.decl);
self.err_msg = try Module.ErrorMsg.create(self.gpa, src_loc, "TODO (LLVM): " ++ format, args);
return error.CodegenFail;
}
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index 0dc39db134..9879bc7f35 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -184,7 +184,7 @@ pub const DeclGen = struct {
fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error {
@setCold(true);
- const src: LazySrcLoc = .{ .node_offset = 0 };
+ const src = LazySrcLoc.nodeOffset(0);
const src_loc = src.toSrcLoc(self.decl);
assert(self.error_msg == null);
self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, format, args);
@@ -193,7 +193,7 @@ pub const DeclGen = struct {
fn todo(self: *DeclGen, comptime format: []const u8, args: anytype) Error {
@setCold(true);
- const src: LazySrcLoc = .{ .node_offset = 0 };
+ const src = LazySrcLoc.nodeOffset(0);
const src_loc = src.toSrcLoc(self.decl);
assert(self.error_msg == null);
self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, "TODO (SPIR-V): " ++ format, args);
diff --git a/src/config.zig.in b/src/config.zig.in
index f193fddb20..104c3ed8eb 100644
--- a/src/config.zig.in
+++ b/src/config.zig.in
@@ -8,6 +8,7 @@ pub const semver = @import("std").SemanticVersion.parse(version) catch unreachab
pub const enable_logging: bool = @ZIG_ENABLE_LOGGING_BOOL@;
pub const enable_link_snapshots: bool = false;
pub const enable_tracy = false;
+pub const value_tracing = false;
pub const is_stage1 = true;
pub const skip_non_native = false;
pub const omit_stage2: bool = @ZIG_OMIT_STAGE2_BOOL@;
diff --git a/src/link.zig b/src/link.zig
index 60baa6a92a..51712db40e 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -113,7 +113,6 @@ pub const Options = struct {
z_defs: bool,
z_origin: bool,
z_nocopyreloc: bool,
- z_noexecstack: bool,
z_now: bool,
z_relro: bool,
tsaware: bool,
@@ -798,9 +797,8 @@ pub const File = struct {
} else {
try base.flushModule(comp, prog_node);
}
- break :blk try fs.path.join(arena, &.{
- fs.path.dirname(full_out_path_z).?, base.intermediary_basename.?,
- });
+ const dirname = fs.path.dirname(full_out_path_z) orelse ".";
+ break :blk try fs.path.join(arena, &.{ dirname, base.intermediary_basename.? });
} else null;
log.debug("module_obj_path={s}", .{if (module_obj_path) |s| s else "(null)"});
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 30504c7a1a..e0f114acd4 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -1333,7 +1333,6 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
man.hash.add(self.base.options.z_defs);
man.hash.add(self.base.options.z_origin);
man.hash.add(self.base.options.z_nocopyreloc);
- man.hash.add(self.base.options.z_noexecstack);
man.hash.add(self.base.options.z_now);
man.hash.add(self.base.options.z_relro);
man.hash.add(self.base.options.hash_style);
@@ -1512,10 +1511,6 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
try argv.append("-z");
try argv.append("nocopyreloc");
}
- if (self.base.options.z_noexecstack) {
- try argv.append("-z");
- try argv.append("noexecstack");
- }
if (self.base.options.z_now) {
// LLD defaults to -zlazy
try argv.append("-znow");
diff --git a/src/main.zig b/src/main.zig
index eaff34ee9e..39237d1625 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -433,7 +433,6 @@ const usage_build_generic =
\\ defs Force a fatal error if any undefined symbols remain
\\ origin Indicate that the object must have its origin processed
\\ nocopyreloc Disable the creation of copy relocations
- \\ noexecstack Indicate that the object requires an executable stack
\\ now (default) Force all relocations to be processed on load
\\ lazy Don't force all relocations to be processed on load
\\ relro (default) Force all relocations to be read-only after processing
@@ -656,7 +655,6 @@ fn buildOutputType(
var linker_z_notext = false;
var linker_z_defs = false;
var linker_z_origin = false;
- var linker_z_noexecstack = false;
var linker_z_now = true;
var linker_z_relro = true;
var linker_tsaware = false;
@@ -1207,8 +1205,6 @@ fn buildOutputType(
linker_z_defs = true;
} else if (mem.eql(u8, z_arg, "origin")) {
linker_z_origin = true;
- } else if (mem.eql(u8, z_arg, "noexecstack")) {
- linker_z_noexecstack = true;
} else if (mem.eql(u8, z_arg, "now")) {
linker_z_now = true;
} else if (mem.eql(u8, z_arg, "lazy")) {
@@ -1694,7 +1690,7 @@ fn buildOutputType(
} else if (mem.eql(u8, z_arg, "origin")) {
linker_z_origin = true;
} else if (mem.eql(u8, z_arg, "noexecstack")) {
- linker_z_noexecstack = true;
+ // noexecstack is the default when linking with LLD
} else if (mem.eql(u8, z_arg, "now")) {
linker_z_now = true;
} else if (mem.eql(u8, z_arg, "lazy")) {
@@ -2719,7 +2715,6 @@ fn buildOutputType(
.linker_z_notext = linker_z_notext,
.linker_z_defs = linker_z_defs,
.linker_z_origin = linker_z_origin,
- .linker_z_noexecstack = linker_z_noexecstack,
.linker_z_now = linker_z_now,
.linker_z_relro = linker_z_relro,
.linker_tsaware = linker_tsaware,
diff --git a/src/print_zir.zig b/src/print_zir.zig
index 3257a3cb58..1760c617ce 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -497,7 +497,7 @@ const Writer = struct {
.wasm_memory_size,
=> {
const inst_data = self.code.extraData(Zir.Inst.UnNode, extended.operand).data;
- const src: LazySrcLoc = .{ .node_offset = inst_data.node };
+ const src = LazySrcLoc.nodeOffset(inst_data.node);
try self.writeInstRef(stream, inst_data.operand);
try stream.writeAll(")) ");
try self.writeSrc(stream, src);
@@ -510,7 +510,7 @@ const Writer = struct {
.prefetch,
=> {
const inst_data = self.code.extraData(Zir.Inst.BinNode, extended.operand).data;
- const src: LazySrcLoc = .{ .node_offset = inst_data.node };
+ const src = LazySrcLoc.nodeOffset(inst_data.node);
try self.writeInstRef(stream, inst_data.lhs);
try stream.writeAll(", ");
try self.writeInstRef(stream, inst_data.rhs);
@@ -520,7 +520,7 @@ const Writer = struct {
.field_call_bind_named => {
const extra = self.code.extraData(Zir.Inst.FieldNamedNode, extended.operand).data;
- const src: LazySrcLoc = .{ .node_offset = extra.node };
+ const src = LazySrcLoc.nodeOffset(extra.node);
try self.writeInstRef(stream, extra.lhs);
try stream.writeAll(", ");
try self.writeInstRef(stream, extra.field_name);
@@ -531,7 +531,7 @@ const Writer = struct {
}
fn writeExtNode(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
- const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) };
+ const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand));
try stream.writeAll(")) ");
try self.writeSrc(stream, src);
}
@@ -1050,7 +1050,7 @@ const Writer = struct {
fn writeNodeMultiOp(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
const extra = self.code.extraData(Zir.Inst.NodeMultiOp, extended.operand);
- const src: LazySrcLoc = .{ .node_offset = extra.data.src_node };
+ const src = LazySrcLoc.nodeOffset(extra.data.src_node);
const operands = self.code.refSlice(extra.end, extended.small);
for (operands) |operand, i| {
@@ -1074,7 +1074,7 @@ const Writer = struct {
fn writeAsm(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
const extra = self.code.extraData(Zir.Inst.Asm, extended.operand);
- const src: LazySrcLoc = .{ .node_offset = extra.data.src_node };
+ const src = LazySrcLoc.nodeOffset(extra.data.src_node);
const outputs_len = @truncate(u5, extended.small);
const inputs_len = @truncate(u5, extended.small >> 5);
const clobbers_len = @truncate(u5, extended.small >> 10);
@@ -1145,7 +1145,7 @@ const Writer = struct {
fn writeOverflowArithmetic(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
const extra = self.code.extraData(Zir.Inst.OverflowArithmetic, extended.operand).data;
- const src: LazySrcLoc = .{ .node_offset = extra.node };
+ const src = LazySrcLoc.nodeOffset(extra.node);
try self.writeInstRef(stream, extra.lhs);
try stream.writeAll(", ");
@@ -1898,7 +1898,7 @@ const Writer = struct {
inst: Zir.Inst.Index,
) (@TypeOf(stream).Error || error{OutOfMemory})!void {
const src_node = self.code.instructions.items(.data)[inst].node;
- const src: LazySrcLoc = .{ .node_offset = src_node };
+ const src = LazySrcLoc.nodeOffset(src_node);
try stream.writeAll(") ");
try self.writeSrc(stream, src);
}
@@ -2117,7 +2117,7 @@ const Writer = struct {
fn writeAllocExtended(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
const extra = self.code.extraData(Zir.Inst.AllocExtended, extended.operand);
const small = @bitCast(Zir.Inst.AllocExtended.Small, extended.small);
- const src: LazySrcLoc = .{ .node_offset = extra.data.src_node };
+ const src = LazySrcLoc.nodeOffset(extra.data.src_node);
var extra_index: usize = extra.end;
const type_inst: Zir.Inst.Ref = if (!small.has_type) .none else blk: {
@@ -2351,7 +2351,7 @@ const Writer = struct {
fn writeSrcNode(self: *Writer, stream: anytype, src_node: ?i32) !void {
const node_offset = src_node orelse return;
- const src: LazySrcLoc = .{ .node_offset = node_offset };
+ const src = LazySrcLoc.nodeOffset(node_offset);
try stream.writeAll(" ");
return self.writeSrc(stream, src);
}
diff --git a/src/test.zig b/src/test.zig
index 2a9b82027f..9e8e3ccb95 100644
--- a/src/test.zig
+++ b/src/test.zig
@@ -61,10 +61,12 @@ const ErrorMsg = union(enum) {
// this is a workaround for stage1 compiler bug I ran into when making it ?u32
column: u32,
kind: Kind,
+ count: u32,
},
plain: struct {
msg: []const u8,
kind: Kind,
+ count: u32,
},
const Kind = enum {
@@ -81,12 +83,14 @@ const ErrorMsg = union(enum) {
.line = @intCast(u32, src.line),
.column = @intCast(u32, src.column),
.kind = kind,
+ .count = src.count,
},
},
.plain => |plain| return .{
.plain = .{
.msg = plain.msg,
.kind = kind,
+ .count = plain.count,
},
},
}
@@ -118,10 +122,16 @@ const ErrorMsg = union(enum) {
try writer.writeAll("?: ");
}
}
- return writer.print("{s}: {s}", .{ @tagName(src.kind), src.msg });
+ try writer.print("{s}: {s}", .{ @tagName(src.kind), src.msg });
+ if (src.count != 1) {
+ try writer.print(" ({d} times)", .{src.count});
+ }
},
.plain => |plain| {
- return writer.print("{s}: {s}", .{ @tagName(plain.kind), plain.msg });
+ try writer.print("{s}: {s}", .{ @tagName(plain.kind), plain.msg });
+ if (plain.count != 1) {
+ try writer.print(" ({d} times)", .{plain.count});
+ }
},
}
}
@@ -647,12 +657,20 @@ pub const TestContext = struct {
for (errors) |err_msg_line, i| {
if (std.mem.startsWith(u8, err_msg_line, "error: ")) {
array[i] = .{
- .plain = .{ .msg = err_msg_line["error: ".len..], .kind = .@"error" },
+ .plain = .{
+ .msg = err_msg_line["error: ".len..],
+ .kind = .@"error",
+ .count = 1,
+ },
};
continue;
} else if (std.mem.startsWith(u8, err_msg_line, "note: ")) {
array[i] = .{
- .plain = .{ .msg = err_msg_line["note: ".len..], .kind = .note },
+ .plain = .{
+ .msg = err_msg_line["note: ".len..],
+ .kind = .note,
+ .count = 1,
+ },
};
continue;
}
@@ -662,7 +680,7 @@ pub const TestContext = struct {
const line_text = it.next() orelse @panic("missing line");
const col_text = it.next() orelse @panic("missing column");
const kind_text = it.next() orelse @panic("missing 'error'/'note'");
- const msg = it.rest()[1..]; // skip over the space at end of "error: "
+ var msg = it.rest()[1..]; // skip over the space at end of "error: "
const line: ?u32 = if (std.mem.eql(u8, line_text, "?"))
null
@@ -695,6 +713,14 @@ pub const TestContext = struct {
break :blk n - 1;
} else std.math.maxInt(u32);
+ const suffix = " times)";
+ const count = if (std.mem.endsWith(u8, msg, suffix)) count: {
+ const lparen = std.mem.lastIndexOfScalar(u8, msg, '(').?;
+ const count = std.fmt.parseInt(u32, msg[lparen + 1 .. msg.len - suffix.len], 10) catch @panic("bad error note count number");
+ msg = msg[0 .. lparen - 1];
+ break :count count;
+ } else 1;
+
array[i] = .{
.src = .{
.src_path = src_path,
@@ -702,6 +728,7 @@ pub const TestContext = struct {
.line = line_0based,
.column = column_0based,
.kind = kind,
+ .count = count,
},
};
}
@@ -1606,7 +1633,8 @@ pub const TestContext = struct {
(case_msg.src.column == std.math.maxInt(u32) or
actual_msg.column == case_msg.src.column) and
std.mem.eql(u8, expected_msg, actual_msg.msg) and
- case_msg.src.kind == .@"error")
+ case_msg.src.kind == .@"error" and
+ actual_msg.count == case_msg.src.count)
{
handled_errors[i] = true;
break;
@@ -1616,7 +1644,8 @@ pub const TestContext = struct {
if (ex_tag != .plain) continue;
if (std.mem.eql(u8, case_msg.plain.msg, plain.msg) and
- case_msg.plain.kind == .@"error")
+ case_msg.plain.kind == .@"error" and
+ case_msg.plain.count == plain.count)
{
handled_errors[i] = true;
break;