aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2021-04-24 10:44:41 -0700
committerAndrew Kelley <andrew@ziglang.org>2021-04-24 10:44:41 -0700
commite86cee258cb0eefca14a94f6b3abb39e8a5f2ef9 (patch)
tree6d9aa3b21685b1581787246f953db94cdb486693 /src
parent224fbb23c44628b215662c6199dff11cc2851f04 (diff)
parent8530b6b7242ebf43b5cb4ae3a2644593f4961a5e (diff)
downloadzig-e86cee258cb0eefca14a94f6b3abb39e8a5f2ef9.tar.gz
zig-e86cee258cb0eefca14a94f6b3abb39e8a5f2ef9.zip
Merge remote-tracking branch 'origin/master' into stage2-whole-file-astgen
In particular I wanted the change that makes `suspend;` illegal in the parser.
Diffstat (limited to 'src')
-rw-r--r--src/Compilation.zig34
-rw-r--r--src/codegen.zig1
-rw-r--r--src/link/MachO.zig3
-rw-r--r--src/link/MachO/Archive.zig5
-rw-r--r--src/link/MachO/Object.zig47
-rw-r--r--src/link/MachO/Symbol.zig2
-rw-r--r--src/link/MachO/Zld.zig224
-rw-r--r--src/link/MachO/reloc/aarch64.zig4
-rw-r--r--src/main.zig6
-rw-r--r--src/register_manager.zig111
-rw-r--r--src/stage1/bigfloat.cpp9
-rw-r--r--src/stage1/bigint.cpp4
-rw-r--r--src/stage1/bigint.hpp2
-rw-r--r--src/stage1/codegen.cpp5
-rw-r--r--src/stage1/ir.cpp33
-rw-r--r--src/stage1/parser.cpp5
-rw-r--r--src/stage1/softfloat_ext.cpp38
-rw-r--r--src/stage1/softfloat_ext.hpp3
-rw-r--r--src/translate_c.zig16
-rw-r--r--src/translate_c/ast.zig6
20 files changed, 377 insertions, 181 deletions
diff --git a/src/Compilation.zig b/src/Compilation.zig
index cb71bb8e0b..be488a20d6 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -2856,25 +2856,29 @@ pub fn addCCArgs(
try argv.append("-fPIC");
}
},
- .shared_library, .assembly, .ll, .bc, .unknown, .static_library, .object, .zig => {},
+ .shared_library, .ll, .bc, .unknown, .static_library, .object, .zig => {},
+ .assembly => {
+ // Argh, why doesn't the assembler accept the list of CPU features?!
+ // I don't see a way to do this other than hard coding everything.
+ switch (target.cpu.arch) {
+ .riscv32, .riscv64 => {
+ if (std.Target.riscv.featureSetHas(target.cpu.features, .relax)) {
+ try argv.append("-mrelax");
+ } else {
+ try argv.append("-mno-relax");
+ }
+ },
+ else => {
+ // TODO
+ },
+ }
+ if (target.cpu.model.llvm_name) |ln|
+ try argv.append(try std.fmt.allocPrint(arena, "-mcpu={s}", .{ln}));
+ },
}
if (out_dep_path) |p| {
try argv.appendSlice(&[_][]const u8{ "-MD", "-MV", "-MF", p });
}
- // Argh, why doesn't the assembler accept the list of CPU features?!
- // I don't see a way to do this other than hard coding everything.
- switch (target.cpu.arch) {
- .riscv32, .riscv64 => {
- if (std.Target.riscv.featureSetHas(target.cpu.features, .relax)) {
- try argv.append("-mrelax");
- } else {
- try argv.append("-mno-relax");
- }
- },
- else => {
- // TODO
- },
- }
if (target.os.tag == .freestanding) {
try argv.append("-ffreestanding");
diff --git a/src/codegen.zig b/src/codegen.zig
index b8e1524a28..40b383c24f 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -1247,7 +1247,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
},
.stack_offset => |off| {
log.debug("reusing stack offset {} => {*}", .{ off, inst });
- return true;
},
else => return false,
}
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 9a94b90137..aaf88ad815 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -645,8 +645,7 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
break :blk true;
}
- if (self.base.options.link_libcpp or
- self.base.options.output_mode == .Lib or
+ if (self.base.options.output_mode == .Lib or
self.base.options.linker_script != null)
{
// Fallback to LLD in this handful of cases on x86_64 only.
diff --git a/src/link/MachO/Archive.zig b/src/link/MachO/Archive.zig
index 86e160ba4d..5a0b9609ad 100644
--- a/src/link/MachO/Archive.zig
+++ b/src/link/MachO/Archive.zig
@@ -208,14 +208,13 @@ pub fn parseObject(self: Archive, offset: u32) !Object {
const object_name = try parseName(self.allocator, object_header, reader);
defer self.allocator.free(object_name);
- const object_basename = std.fs.path.basename(object_name);
- log.debug("extracting object '{s}' from archive '{s}'", .{ object_basename, self.name.? });
+ log.debug("extracting object '{s}' from archive '{s}'", .{ object_name, self.name.? });
const name = name: {
var buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined;
const path = try std.os.realpath(self.name.?, &buffer);
- break :name try std.fmt.allocPrint(self.allocator, "{s}({s})", .{ path, object_basename });
+ break :name try std.fmt.allocPrint(self.allocator, "{s}({s})", .{ path, object_name });
};
var object = Object.init(self.allocator);
diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig
index 7b5f23756a..6703a5bfb7 100644
--- a/src/link/MachO/Object.zig
+++ b/src/link/MachO/Object.zig
@@ -32,7 +32,9 @@ symtab_cmd_index: ?u16 = null,
dysymtab_cmd_index: ?u16 = null,
build_version_cmd_index: ?u16 = null,
data_in_code_cmd_index: ?u16 = null,
+
text_section_index: ?u16 = null,
+mod_init_func_section_index: ?u16 = null,
// __DWARF segment sections
dwarf_debug_info_index: ?u16 = null,
@@ -49,6 +51,7 @@ stabs: std.ArrayListUnmanaged(Stab) = .{},
tu_path: ?[]const u8 = null,
tu_mtime: ?u64 = null,
+initializers: std.ArrayListUnmanaged(CppStatic) = .{},
data_in_code_entries: std.ArrayListUnmanaged(macho.data_in_code_entry) = .{},
pub const Section = struct {
@@ -68,6 +71,11 @@ pub const Section = struct {
}
};
+const CppStatic = struct {
+ symbol: u32,
+ target_addr: u64,
+};
+
const Stab = struct {
tag: Tag,
symbol: u32,
@@ -170,6 +178,7 @@ pub fn deinit(self: *Object) void {
self.strtab.deinit(self.allocator);
self.stabs.deinit(self.allocator);
self.data_in_code_entries.deinit(self.allocator);
+ self.initializers.deinit(self.allocator);
if (self.name) |n| {
self.allocator.free(n);
@@ -216,6 +225,7 @@ pub fn parse(self: *Object) !void {
try self.parseSections();
if (self.symtab_cmd_index != null) try self.parseSymtab();
if (self.data_in_code_cmd_index != null) try self.readDataInCode();
+ try self.parseInitializers();
try self.parseDebugInfo();
}
@@ -250,6 +260,10 @@ pub fn readLoadCommands(self: *Object, reader: anytype) !void {
if (mem.eql(u8, sectname, "__text")) {
self.text_section_index = index;
}
+ } else if (mem.eql(u8, segname, "__DATA")) {
+ if (mem.eql(u8, sectname, "__mod_init_func")) {
+ self.mod_init_func_section_index = index;
+ }
}
sect.offset += offset;
@@ -298,28 +312,53 @@ pub fn parseSections(self: *Object) !void {
var section = Section{
.inner = sect,
.code = code,
- .relocs = undefined,
+ .relocs = null,
};
// Parse relocations
- section.relocs = if (sect.nreloc > 0) relocs: {
+ if (sect.nreloc > 0) {
var raw_relocs = try self.allocator.alloc(u8, @sizeOf(macho.relocation_info) * sect.nreloc);
defer self.allocator.free(raw_relocs);
_ = try self.file.?.preadAll(raw_relocs, sect.reloff);
- break :relocs try reloc.parse(
+ section.relocs = try reloc.parse(
self.allocator,
self.arch.?,
section.code,
mem.bytesAsSlice(macho.relocation_info, raw_relocs),
);
- } else null;
+ }
self.sections.appendAssumeCapacity(section);
}
}
+pub fn parseInitializers(self: *Object) !void {
+ const index = self.mod_init_func_section_index orelse return;
+ const section = self.sections.items[index];
+
+ log.debug("parsing initializers in {s}", .{self.name.?});
+
+ // Parse C++ initializers
+ const relocs = section.relocs orelse unreachable;
+ try self.initializers.ensureCapacity(self.allocator, relocs.len);
+ for (relocs) |rel| {
+ self.initializers.appendAssumeCapacity(.{
+ .symbol = rel.target.symbol,
+ .target_addr = undefined,
+ });
+ }
+
+ mem.reverse(CppStatic, self.initializers.items);
+
+ for (self.initializers.items) |initializer| {
+ const sym = self.symtab.items[initializer.symbol];
+ const sym_name = self.getString(sym.n_strx);
+ log.debug(" | {s}", .{sym_name});
+ }
+}
+
pub fn parseSymtab(self: *Object) !void {
const symtab_cmd = self.load_commands.items[self.symtab_cmd_index.?].Symtab;
diff --git a/src/link/MachO/Symbol.zig b/src/link/MachO/Symbol.zig
index f65a694f75..9e6c2bf68a 100644
--- a/src/link/MachO/Symbol.zig
+++ b/src/link/MachO/Symbol.zig
@@ -52,7 +52,7 @@ pub fn isUndf(sym: macho.nlist_64) bool {
}
pub fn isWeakDef(sym: macho.nlist_64) bool {
- return sym.n_desc == macho.N_WEAK_DEF;
+ return (sym.n_desc & macho.N_WEAK_DEF) != 0;
}
/// Symbol is local if it is defined and not an extern.
diff --git a/src/link/MachO/Zld.zig b/src/link/MachO/Zld.zig
index ae475ab30d..a585b1fd1e 100644
--- a/src/link/MachO/Zld.zig
+++ b/src/link/MachO/Zld.zig
@@ -72,6 +72,7 @@ tlv_bss_section_index: ?u16 = null,
la_symbol_ptr_section_index: ?u16 = null,
data_section_index: ?u16 = null,
bss_section_index: ?u16 = null,
+common_section_index: ?u16 = null,
symtab: std.StringArrayHashMapUnmanaged(Symbol) = .{},
strtab: std.ArrayListUnmanaged(u8) = .{},
@@ -224,6 +225,7 @@ pub fn link(self: *Zld, files: []const []const u8, out_path: []const u8) !void {
self.allocateLinkeditSegment();
try self.allocateSymbols();
try self.allocateStubsAndGotEntries();
+ try self.allocateCppStatics();
try self.writeStubHelperCommon();
try self.resolveRelocsAndWriteSections();
try self.flush();
@@ -465,23 +467,43 @@ fn updateMetadata(self: *Zld) !void {
},
macho.S_ZEROFILL => {
if (!mem.eql(u8, segname, "__DATA")) continue;
- if (self.bss_section_index != null) continue;
+ if (mem.eql(u8, sectname, "__common")) {
+ if (self.common_section_index != null) continue;
- self.bss_section_index = @intCast(u16, data_seg.sections.items.len);
- try data_seg.addSection(self.allocator, .{
- .sectname = makeStaticString("__bss"),
- .segname = makeStaticString("__DATA"),
- .addr = 0,
- .size = 0,
- .offset = 0,
- .@"align" = 0,
- .reloff = 0,
- .nreloc = 0,
- .flags = macho.S_ZEROFILL,
- .reserved1 = 0,
- .reserved2 = 0,
- .reserved3 = 0,
- });
+ self.common_section_index = @intCast(u16, data_seg.sections.items.len);
+ try data_seg.addSection(self.allocator, .{
+ .sectname = makeStaticString("__common"),
+ .segname = makeStaticString("__DATA"),
+ .addr = 0,
+ .size = 0,
+ .offset = 0,
+ .@"align" = 0,
+ .reloff = 0,
+ .nreloc = 0,
+ .flags = macho.S_ZEROFILL,
+ .reserved1 = 0,
+ .reserved2 = 0,
+ .reserved3 = 0,
+ });
+ } else {
+ if (self.bss_section_index != null) continue;
+
+ self.bss_section_index = @intCast(u16, data_seg.sections.items.len);
+ try data_seg.addSection(self.allocator, .{
+ .sectname = makeStaticString("__bss"),
+ .segname = makeStaticString("__DATA"),
+ .addr = 0,
+ .size = 0,
+ .offset = 0,
+ .@"align" = 0,
+ .reloff = 0,
+ .nreloc = 0,
+ .flags = macho.S_ZEROFILL,
+ .reserved1 = 0,
+ .reserved2 = 0,
+ .reserved3 = 0,
+ });
+ }
},
macho.S_THREAD_LOCAL_VARIABLES => {
if (!mem.eql(u8, segname, "__DATA")) continue;
@@ -568,7 +590,9 @@ fn updateMetadata(self: *Zld) !void {
const segname = parseName(&source_sect.segname);
const sectname = parseName(&source_sect.sectname);
+
log.debug("section '{s}/{s}' will be unmapped", .{ segname, sectname });
+
try self.unhandled_sections.putNoClobber(self.allocator, .{
.object_id = object_id,
.source_sect_id = source_sect_id,
@@ -585,6 +609,7 @@ const MatchingSection = struct {
fn getMatchingSection(self: *Zld, section: macho.section_64) ?MatchingSection {
const segname = parseName(&section.segname);
const sectname = parseName(&section.sectname);
+
const res: ?MatchingSection = blk: {
switch (section.flags) {
macho.S_4BYTE_LITERALS, macho.S_8BYTE_LITERALS, macho.S_16BYTE_LITERALS => {
@@ -612,6 +637,12 @@ fn getMatchingSection(self: *Zld, section: macho.section_64) ?MatchingSection {
};
},
macho.S_ZEROFILL => {
+ if (mem.eql(u8, sectname, "__common")) {
+ break :blk .{
+ .seg = self.data_segment_cmd_index.?,
+ .sect = self.common_section_index.?,
+ };
+ }
break :blk .{
.seg = self.data_segment_cmd_index.?,
.sect = self.bss_section_index.?,
@@ -667,6 +698,7 @@ fn getMatchingSection(self: *Zld, section: macho.section_64) ?MatchingSection {
},
}
};
+
return res;
}
@@ -737,11 +769,12 @@ fn sortSections(self: *Zld) !void {
// __DATA segment
const indices = &[_]*?u16{
&self.la_symbol_ptr_section_index,
- &self.tlv_section_index,
&self.data_section_index,
+ &self.tlv_section_index,
&self.tlv_data_section_index,
&self.tlv_bss_section_index,
&self.bss_section_index,
+ &self.common_section_index,
};
for (indices) |maybe_index| {
const new_index: u16 = if (maybe_index.*) |index| blk: {
@@ -959,6 +992,21 @@ fn allocateStubsAndGotEntries(self: *Zld) !void {
}
}
+fn allocateCppStatics(self: *Zld) !void {
+ for (self.objects.items) |*object| {
+ for (object.initializers.items) |*initializer| {
+ const sym = object.symtab.items[initializer.symbol];
+ const sym_name = object.getString(sym.n_strx);
+ initializer.target_addr = object.locals.get(sym_name).?.address;
+
+ log.debug("resolving C++ initializer '{s}' at 0x{x}", .{
+ sym_name,
+ initializer.target_addr,
+ });
+ }
+ }
+}
+
fn writeStubHelperCommon(self: *Zld) !void {
const text_segment = &self.load_commands.items[self.text_segment_cmd_index.?].Segment;
const stub_helper = &text_segment.sections.items[self.stub_helper_section_index.?];
@@ -1236,11 +1284,12 @@ fn resolveSymbolsInObject(self: *Zld, object_id: u16) !void {
continue;
} else if (Symbol.isGlobal(sym)) {
const sym_name = object.getString(sym.n_strx);
+ const is_weak = Symbol.isWeakDef(sym) or Symbol.isPext(sym);
const global = self.symtab.getEntry(sym_name) orelse {
// Put new global symbol into the symbol table.
const name = try self.allocator.dupe(u8, sym_name);
try self.symtab.putNoClobber(self.allocator, name, .{
- .tag = if (Symbol.isWeakDef(sym)) .weak else .strong,
+ .tag = if (is_weak) .weak else .strong,
.name = name,
.address = 0,
.section = 0,
@@ -1251,15 +1300,20 @@ fn resolveSymbolsInObject(self: *Zld, object_id: u16) !void {
};
switch (global.value.tag) {
- .weak => continue, // If symbol is weak, nothing to do.
+ .weak => {
+ if (is_weak) continue; // Nothing to do for weak symbol.
+ },
.strong => {
- log.err("symbol '{s}' defined multiple times", .{sym_name});
- return error.MultipleSymbolDefinitions;
+ if (!is_weak) {
+ log.debug("strong symbol '{s}' defined multiple times", .{sym_name});
+ return error.MultipleSymbolDefinitions;
+ }
+ continue;
},
else => {},
}
- global.value.tag = .strong;
+ global.value.tag = if (is_weak) .weak else .strong;
global.value.file = object_id;
global.value.index = @intCast(u32, sym_id);
} else if (Symbol.isUndef(sym)) {
@@ -1340,6 +1394,21 @@ fn resolveSymbols(self: *Zld) !void {
.section = 0,
.file = 0,
});
+
+ {
+ log.debug("symtab", .{});
+ for (self.symtab.items()) |sym| {
+ switch (sym.value.tag) {
+ .weak, .strong => {
+ log.debug(" | {s} => {s}", .{ sym.key, self.objects.items[sym.value.file.?].name.? });
+ },
+ .import => {
+ log.debug(" | {s} => libSystem.B.dylib", .{sym.key});
+ },
+ else => unreachable,
+ }
+ }
+ }
}
fn resolveStubsAndGotEntries(self: *Zld) !void {
@@ -1412,9 +1481,14 @@ fn resolveRelocsAndWriteSections(self: *Zld) !void {
log.debug("relocating object {s}", .{object.name});
for (object.sections.items) |sect, source_sect_id| {
+ if (sect.inner.flags == macho.S_MOD_INIT_FUNC_POINTERS or
+ sect.inner.flags == macho.S_MOD_TERM_FUNC_POINTERS) continue;
+
const segname = parseName(&sect.inner.segname);
const sectname = parseName(&sect.inner.sectname);
+ log.debug("relocating section '{s},{s}'", .{ segname, sectname });
+
// Get mapping
const target_mapping = self.mappings.get(.{
.object_id = @intCast(u16, object_id),
@@ -1532,6 +1606,7 @@ fn resolveRelocsAndWriteSections(self: *Zld) !void {
target_sect_off,
target_sect_off + sect.code.len,
});
+
// Zero-out the space
var zeroes = try self.allocator.alloc(u8, sect.code.len);
defer self.allocator.free(zeroes);
@@ -1571,25 +1646,33 @@ fn relocTargetAddr(self: *Zld, object_id: u16, target: reloc.Relocation.Target)
const target_sect = target_seg.sections.items[target_mapping.target_sect_id];
const target_addr = target_sect.addr + target_mapping.offset;
break :blk sym.n_value - source_sect.addr + target_addr;
- } else {
- if (self.stubs.get(sym_name)) |index| {
- log.debug(" | symbol stub '{s}'", .{sym_name});
- const segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment;
- const stubs = segment.sections.items[self.stubs_section_index.?];
- break :blk stubs.addr + index * stubs.reserved2;
- } else if (mem.eql(u8, sym_name, "__tlv_bootstrap")) {
- log.debug(" | symbol '__tlv_bootstrap'", .{});
- const segment = self.load_commands.items[self.data_segment_cmd_index.?].Segment;
- const tlv = segment.sections.items[self.tlv_section_index.?];
- break :blk tlv.addr;
- } else {
- const global = self.symtab.get(sym_name) orelse {
- log.err("failed to resolve symbol '{s}' as a relocation target", .{sym_name});
- return error.FailedToResolveRelocationTarget;
- };
- log.debug(" | global symbol '{s}'", .{sym_name});
- break :blk global.address;
+ } else if (self.symtab.get(sym_name)) |global| {
+ switch (global.tag) {
+ .weak, .strong => {
+ log.debug(" | global symbol '{s}'", .{sym_name});
+ break :blk global.address;
+ },
+ .import => {
+ if (self.stubs.get(sym_name)) |index| {
+ log.debug(" | symbol stub '{s}'", .{sym_name});
+ const segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment;
+ const stubs = segment.sections.items[self.stubs_section_index.?];
+ break :blk stubs.addr + index * stubs.reserved2;
+ } else if (mem.eql(u8, sym_name, "__tlv_bootstrap")) {
+ log.debug(" | symbol '__tlv_bootstrap'", .{});
+ const segment = self.load_commands.items[self.data_segment_cmd_index.?].Segment;
+ const tlv = segment.sections.items[self.tlv_section_index.?];
+ break :blk tlv.addr;
+ } else {
+ log.err("failed to resolve symbol '{s}' as a relocation target", .{sym_name});
+ return error.FailedToResolveRelocationTarget;
+ }
+ },
+ else => unreachable,
}
+ } else {
+ log.err("failed to resolve symbol '{s}' as a relocation target", .{sym_name});
+ return error.FailedToResolveRelocationTarget;
}
},
.section => |sect_id| {
@@ -2008,6 +2091,12 @@ fn populateMetadata(self: *Zld) !void {
}
fn flush(self: *Zld) !void {
+ if (self.common_section_index) |index| {
+ const seg = &self.load_commands.items[self.data_segment_cmd_index.?].Segment;
+ const sect = &seg.sections.items[index];
+ sect.offset = 0;
+ }
+
if (self.bss_section_index) |index| {
const seg = &self.load_commands.items[self.data_segment_cmd_index.?].Segment;
const sect = &seg.sections.items[index];
@@ -2040,6 +2129,24 @@ fn flush(self: *Zld) !void {
try self.file.?.pwriteAll(buffer, sect.offset);
}
+ if (self.mod_init_func_section_index) |index| {
+ const seg = self.load_commands.items[self.data_const_segment_cmd_index.?].Segment;
+ const sect = &seg.sections.items[index];
+
+ var initializers = std.ArrayList(u64).init(self.allocator);
+ defer initializers.deinit();
+
+ // TODO sort the initializers globally
+ for (self.objects.items) |object| {
+ for (object.initializers.items) |initializer| {
+ try initializers.append(initializer.target_addr);
+ }
+ }
+
+ _ = try self.file.?.pwriteAll(mem.sliceAsBytes(initializers.items), sect.offset);
+ sect.size = @intCast(u32, initializers.items.len * @sizeOf(u64));
+ }
+
try self.writeGotEntries();
try self.setEntryPoint();
try self.writeRebaseInfoTable();
@@ -2139,35 +2246,18 @@ fn writeRebaseInfoTable(self: *Zld) !void {
// TODO audit and investigate this.
const seg = self.load_commands.items[self.data_const_segment_cmd_index.?].Segment;
const sect = seg.sections.items[idx];
- const npointers = sect.size * @sizeOf(u64);
const base_offset = sect.addr - seg.inner.vmaddr;
const segment_id = @intCast(u16, self.data_const_segment_cmd_index.?);
- try pointers.ensureCapacity(pointers.items.len + npointers);
- var i: usize = 0;
- while (i < npointers) : (i += 1) {
- pointers.appendAssumeCapacity(.{
- .offset = base_offset + i * @sizeOf(u64),
- .segment_id = segment_id,
- });
- }
- }
-
- if (self.mod_term_func_section_index) |idx| {
- // TODO audit and investigate this.
- const seg = self.load_commands.items[self.data_const_segment_cmd_index.?].Segment;
- const sect = seg.sections.items[idx];
- const npointers = sect.size * @sizeOf(u64);
- const base_offset = sect.addr - seg.inner.vmaddr;
- const segment_id = @intCast(u16, self.data_const_segment_cmd_index.?);
-
- try pointers.ensureCapacity(pointers.items.len + npointers);
- var i: usize = 0;
- while (i < npointers) : (i += 1) {
- pointers.appendAssumeCapacity(.{
- .offset = base_offset + i * @sizeOf(u64),
- .segment_id = segment_id,
- });
+ var index: u64 = 0;
+ for (self.objects.items) |object| {
+ for (object.initializers.items) |_| {
+ try pointers.append(.{
+ .offset = base_offset + index * @sizeOf(u64),
+ .segment_id = segment_id,
+ });
+ index += 1;
+ }
}
}
@@ -2447,7 +2537,7 @@ fn writeDebugInfo(self: *Zld) !void {
.n_type = macho.N_OSO,
.n_sect = 0,
.n_desc = 1,
- .n_value = tu_mtime,
+ .n_value = 0, //tu_mtime, TODO figure out why precalculated mtime value doesn't work
});
for (object.stabs.items) |stab| {
diff --git a/src/link/MachO/reloc/aarch64.zig b/src/link/MachO/reloc/aarch64.zig
index a7dd0919b4..d8e7cebddd 100644
--- a/src/link/MachO/reloc/aarch64.zig
+++ b/src/link/MachO/reloc/aarch64.zig
@@ -226,7 +226,9 @@ pub const Parser = struct {
try parser.parseTlvpLoadPageOff(rel);
},
.ARM64_RELOC_POINTER_TO_GOT => {
- return error.ToDoRelocPointerToGot;
+ // TODO Handle pointer to GOT. This reloc seems to appear in
+ // __LD,__compact_unwind section which we currently don't handle.
+ log.debug("Unhandled relocation ARM64_RELOC_POINTER_TO_GOT", .{});
},
}
}
diff --git a/src/main.zig b/src/main.zig
index 3edabca95f..5e3823abd6 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -355,6 +355,8 @@ const usage_build_generic =
\\ -rpath [path] Add directory to the runtime library search path
\\ -feach-lib-rpath Ensure adding rpath for each used dynamic library
\\ -fno-each-lib-rpath Prevent adding rpath for each used dynamic library
+ \\ -fallow-shlib-undefined Allows undefined symbols in shared libraries
+ \\ -fno-allow-shlib-undefined Disallows undefined symbols in shared libraries
\\ --eh-frame-hdr Enable C++ exception handling by passing --eh-frame-hdr to linker
\\ --emit-relocs Enable output of relocation sections for post build tools
\\ -dynamic Force output to be dynamically linked
@@ -988,6 +990,10 @@ fn buildOutputType(
link_eh_frame_hdr = true;
} else if (mem.eql(u8, arg, "--emit-relocs")) {
link_emit_relocs = true;
+ } else if (mem.eql(u8, arg, "-fallow-shlib-undefined")) {
+ linker_allow_shlib_undefined = true;
+ } else if (mem.eql(u8, arg, "-fno-allow-shlib-undefined")) {
+ linker_allow_shlib_undefined = false;
} else if (mem.eql(u8, arg, "-Bsymbolic")) {
linker_bind_global_refs_locally = true;
} else if (mem.eql(u8, arg, "--verbose-link")) {
diff --git a/src/register_manager.zig b/src/register_manager.zig
index e11f2c3111..01f83aa2f5 100644
--- a/src/register_manager.zig
+++ b/src/register_manager.zig
@@ -36,7 +36,7 @@ pub fn RegisterManager(
}
fn isTracked(reg: Register) bool {
- return std.mem.indexOfScalar(Register, callee_preserved_regs, reg) != null;
+ return reg.allocIndex() != null;
}
fn markRegUsed(self: *Self, reg: Register) void {
@@ -55,6 +55,7 @@ pub fn RegisterManager(
self.free_registers |= @as(FreeRegInt, 1) << shift;
}
+ /// Returns true when this register is not tracked
pub fn isRegFree(self: Self, reg: Register) bool {
if (FreeRegInt == u0) return true;
const index = reg.allocIndex() orelse return true;
@@ -63,7 +64,8 @@ pub fn RegisterManager(
}
/// Returns whether this register was allocated in the course
- /// of this function
+ /// of this function.
+ /// Returns false when this register is not tracked
pub fn isRegAllocated(self: Self, reg: Register) bool {
if (FreeRegInt == u0) return false;
const index = reg.allocIndex() orelse return false;
@@ -71,57 +73,89 @@ pub fn RegisterManager(
return self.allocated_registers & @as(FreeRegInt, 1) << shift != 0;
}
- /// Before calling, must ensureCapacity + 1 on self.registers.
+ /// Before calling, must ensureCapacity + count on self.registers.
/// Returns `null` if all registers are allocated.
- pub fn tryAllocReg(self: *Self, inst: *ir.Inst) ?Register {
- const free_index = @ctz(FreeRegInt, self.free_registers);
- if (free_index >= callee_preserved_regs.len) {
+ pub fn tryAllocRegs(self: *Self, comptime count: comptime_int, insts: [count]*ir.Inst) ?[count]Register {
+ if (self.tryAllocRegsWithoutTracking(count)) |regs| {
+ for (regs) |reg, i| {
+ self.markRegUsed(reg);
+ self.registers.putAssumeCapacityNoClobber(reg, insts[i]);
+ }
+
+ return regs;
+ } else {
return null;
}
+ }
- // This is necessary because the return type of @ctz is 1
- // bit longer than ShiftInt if callee_preserved_regs.len
- // is a power of two. This int cast is always safe because
- // free_index < callee_preserved_regs.len
- const shift = @intCast(ShiftInt, free_index);
- const mask = @as(FreeRegInt, 1) << shift;
- self.free_registers &= ~mask;
- self.allocated_registers |= mask;
+ /// Before calling, must ensureCapacity + 1 on self.registers.
+ /// Returns `null` if all registers are allocated.
+ pub fn tryAllocReg(self: *Self, inst: *ir.Inst) ?Register {
+ return if (tryAllocRegs(self, 1, .{inst})) |regs| regs[0] else null;
+ }
- const reg = callee_preserved_regs[free_index];
- self.registers.putAssumeCapacityNoClobber(reg, inst);
- log.debug("alloc {} => {*}", .{ reg, inst });
- return reg;
+ /// Before calling, must ensureCapacity + count on self.registers.
+ pub fn allocRegs(self: *Self, comptime count: comptime_int, insts: [count]*ir.Inst) ![count]Register {
+ comptime assert(count > 0 and count <= callee_preserved_regs.len);
+
+ return self.tryAllocRegs(count, insts) orelse blk: {
+ // We'll take over the first count registers. Spill
+ // the instructions that were previously there to a
+ // stack allocations.
+ var regs: [count]Register = undefined;
+ std.mem.copy(Register, &regs, callee_preserved_regs[0..count]);
+
+ for (regs) |reg, i| {
+ if (self.isRegFree(reg)) {
+ self.markRegUsed(reg);
+ self.registers.putAssumeCapacityNoClobber(reg, insts[i]);
+ } else {
+ const regs_entry = self.registers.getEntry(reg).?;
+ const spilled_inst = regs_entry.value;
+ regs_entry.value = insts[i];
+ try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst);
+ }
+ }
+
+ break :blk regs;
+ };
}
/// Before calling, must ensureCapacity + 1 on self.registers.
pub fn allocReg(self: *Self, inst: *ir.Inst) !Register {
- return self.tryAllocReg(inst) orelse b: {
- // We'll take over the first register. Move the instruction that was previously
- // there to a stack allocation.
- const reg = callee_preserved_regs[0];
- const regs_entry = self.registers.getEntry(reg).?;
- const spilled_inst = regs_entry.value;
- regs_entry.value = inst;
- try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst);
+ return (try allocRegs(self, 1, .{inst}))[0];
+ }
- break :b reg;
- };
+ /// Does not track the registers.
+ /// Returns `null` if not enough registers are free.
+ pub fn tryAllocRegsWithoutTracking(self: *Self, comptime count: comptime_int) ?[count]Register {
+ comptime if (callee_preserved_regs.len == 0) return null;
+ comptime assert(count > 0 and count <= callee_preserved_regs.len);
+
+ const free_registers = @popCount(FreeRegInt, self.free_registers);
+ if (free_registers < count) return null;
+
+ var regs: [count]Register = undefined;
+ var i: usize = 0;
+ for (callee_preserved_regs) |reg| {
+ if (i >= count) break;
+ if (self.isRegFree(reg)) {
+ regs[i] = reg;
+ i += 1;
+ }
+ }
+ return regs;
}
/// Does not track the register.
/// Returns `null` if all registers are allocated.
- pub fn findUnusedReg(self: *Self) ?Register {
- const free_index = @ctz(FreeRegInt, self.free_registers);
- if (free_index >= callee_preserved_regs.len) {
- return null;
- }
- return callee_preserved_regs[free_index];
+ pub fn tryAllocRegWithoutTracking(self: *Self) ?Register {
+ return if (tryAllocRegsWithoutTracking(self, 1)) |regs| regs[0] else null;
}
/// Does not track the register.
pub fn allocRegWithoutTracking(self: *Self) !Register {
- return self.findUnusedReg() orelse b: {
+ return self.tryAllocRegWithoutTracking() orelse b: {
// We'll take over the first register. Move the instruction that was previously
// there to a stack allocation.
const reg = callee_preserved_regs[0];
@@ -190,7 +224,10 @@ pub fn RegisterManager(
}
const MockRegister = enum(u2) {
- r0, r1, r2, r3,
+ r0,
+ r1,
+ r2,
+ r3,
pub fn allocIndex(self: MockRegister) ?u2 {
inline for (mock_callee_preserved_regs) |cpreg, i| {
@@ -213,7 +250,7 @@ const MockFunction = struct {
self.register_manager.deinit(self.allocator);
self.spilled.deinit(self.allocator);
}
-
+
pub fn spillInstruction(self: *Self, src: LazySrcLoc, reg: MockRegister, inst: *ir.Inst) !void {
try self.spilled.append(self.allocator, reg);
}
diff --git a/src/stage1/bigfloat.cpp b/src/stage1/bigfloat.cpp
index 58b0aff54a..840cdccc8b 100644
--- a/src/stage1/bigfloat.cpp
+++ b/src/stage1/bigfloat.cpp
@@ -9,6 +9,7 @@
#include "bigint.hpp"
#include "buffer.hpp"
#include "softfloat.hpp"
+#include "softfloat_ext.hpp"
#include "parse_f128.h"
#include <stdio.h>
#include <math.h>
@@ -60,9 +61,7 @@ void bigfloat_init_bigint(BigFloat *dest, const BigInt *op) {
if (i == 0) {
if (op->is_negative) {
- float128_t zero_f128;
- ui32_to_f128M(0, &zero_f128);
- f128M_sub(&zero_f128, &dest->value, &dest->value);
+ f128M_neg(&dest->value, &dest->value);
}
return;
}
@@ -89,9 +88,7 @@ void bigfloat_add(BigFloat *dest, const BigFloat *op1, const BigFloat *op2) {
}
void bigfloat_negate(BigFloat *dest, const BigFloat *op) {
- float128_t zero_f128;
- ui32_to_f128M(0, &zero_f128);
- f128M_sub(&zero_f128, &op->value, &dest->value);
+ f128M_neg(&op->value, &dest->value);
}
void bigfloat_sub(BigFloat *dest, const BigFloat *op1, const BigFloat *op2) {
diff --git a/src/stage1/bigint.cpp b/src/stage1/bigint.cpp
index 79a05e95a5..acb3e18e41 100644
--- a/src/stage1/bigint.cpp
+++ b/src/stage1/bigint.cpp
@@ -1446,10 +1446,10 @@ void bigint_negate(BigInt *dest, const BigInt *op) {
bigint_normalize(dest);
}
-void bigint_negate_wrap(BigInt *dest, const BigInt *op, size_t bit_count) {
+void bigint_negate_wrap(BigInt *dest, const BigInt *op, size_t bit_count, bool is_signed) {
BigInt zero;
bigint_init_unsigned(&zero, 0);
- bigint_sub_wrap(dest, &zero, op, bit_count, true);
+ bigint_sub_wrap(dest, &zero, op, bit_count, is_signed);
}
void bigint_not(BigInt *dest, const BigInt *op, size_t bit_count, bool is_signed) {
diff --git a/src/stage1/bigint.hpp b/src/stage1/bigint.hpp
index 044ea66423..aa37b9302a 100644
--- a/src/stage1/bigint.hpp
+++ b/src/stage1/bigint.hpp
@@ -75,7 +75,7 @@ void bigint_shl_trunc(BigInt *dest, const BigInt *op1, const BigInt *op2, size_t
void bigint_shr(BigInt *dest, const BigInt *op1, const BigInt *op2);
void bigint_negate(BigInt *dest, const BigInt *op);
-void bigint_negate_wrap(BigInt *dest, const BigInt *op, size_t bit_count);
+void bigint_negate_wrap(BigInt *dest, const BigInt *op, size_t bit_count, bool is_signed);
void bigint_not(BigInt *dest, const BigInt *op, size_t bit_count, bool is_signed);
void bigint_truncate(BigInt *dest, const BigInt *op, size_t bit_count, bool is_signed);
diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp
index 968caaf19b..1f30cd0a85 100644
--- a/src/stage1/codegen.cpp
+++ b/src/stage1/codegen.cpp
@@ -7436,7 +7436,10 @@ static LLVMValueRef gen_const_val(CodeGen *g, ZigValue *const_val, const char *n
case ZigTypeIdFloat:
switch (type_entry->data.floating.bit_count) {
case 16:
- return LLVMConstReal(get_llvm_type(g, type_entry), zig_f16_to_double(const_val->data.x_f16));
+ {
+ LLVMValueRef as_int = LLVMConstInt(LLVMInt16Type(), const_val->data.x_f16.v, false);
+ return LLVMConstBitCast(as_int, get_llvm_type(g, type_entry));
+ }
case 32:
return LLVMConstReal(get_llvm_type(g, type_entry), const_val->data.x_f32);
case 64:
diff --git a/src/stage1/ir.cpp b/src/stage1/ir.cpp
index 71a233c964..c59f63399c 100644
--- a/src/stage1/ir.cpp
+++ b/src/stage1/ir.cpp
@@ -9534,7 +9534,7 @@ static IrInstSrc *ir_gen_nosuspend(IrBuilderSrc *irb, Scope *parent_scope, AstNo
Scope *child_scope = create_nosuspend_scope(irb->codegen, node, parent_scope);
// purposefully pass null for result_loc and let EndExpr handle it
- return ir_gen_node_extra(irb, node->data.comptime_expr.expr, child_scope, lval, nullptr);
+ return ir_gen_node_extra(irb, node->data.nosuspend_expr.expr, child_scope, lval, nullptr);
}
static IrInstSrc *ir_gen_return_from_block(IrBuilderSrc *irb, Scope *break_scope, AstNode *node, ScopeBlock *block_scope) {
@@ -10199,14 +10199,12 @@ static IrInstSrc *ir_gen_suspend(IrBuilderSrc *irb, Scope *parent_scope, AstNode
}
IrInstSrcSuspendBegin *begin = ir_build_suspend_begin_src(irb, parent_scope, node);
- if (node->data.suspend.block != nullptr) {
- ScopeSuspend *suspend_scope = create_suspend_scope(irb->codegen, node, parent_scope);
- Scope *child_scope = &suspend_scope->base;
- IrInstSrc *susp_res = ir_gen_node(irb, node->data.suspend.block, child_scope);
- if (susp_res == irb->codegen->invalid_inst_src)
- return irb->codegen->invalid_inst_src;
- ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, node->data.suspend.block, susp_res));
- }
+ ScopeSuspend *suspend_scope = create_suspend_scope(irb->codegen, node, parent_scope);
+ Scope *child_scope = &suspend_scope->base;
+ IrInstSrc *susp_res = ir_gen_node(irb, node->data.suspend.block, child_scope);
+ if (susp_res == irb->codegen->invalid_inst_src)
+ return irb->codegen->invalid_inst_src;
+ ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, node->data.suspend.block, susp_res));
return ir_mark_gen(ir_build_suspend_finish_src(irb, parent_scope, node, begin));
}
@@ -11363,11 +11361,8 @@ static void float_negate(ZigValue *out_val, ZigValue *op) {
} else if (op->type->id == ZigTypeIdFloat) {
switch (op->type->data.floating.bit_count) {
case 16:
- {
- const float16_t zero = zig_double_to_f16(0);
- out_val->data.x_f16 = f16_sub(zero, op->data.x_f16);
- return;
- }
+ out_val->data.x_f16 = f16_neg(op->data.x_f16);
+ return;
case 32:
out_val->data.x_f32 = -op->data.x_f32;
return;
@@ -11375,9 +11370,7 @@ static void float_negate(ZigValue *out_val, ZigValue *op) {
out_val->data.x_f64 = -op->data.x_f64;
return;
case 128:
- float128_t zero_f128;
- ui32_to_f128M(0, &zero_f128);
- f128M_sub(&zero_f128, &op->data.x_f128, &out_val->data.x_f128);
+ f128M_neg(&op->data.x_f128, &out_val->data.x_f128);
return;
default:
zig_unreachable();
@@ -21665,8 +21658,8 @@ static ErrorMsg *ir_eval_negation_scalar(IrAnalyze *ira, IrInst* source_instr, Z
{
bool is_float = (scalar_type->id == ZigTypeIdFloat || scalar_type->id == ZigTypeIdComptimeFloat);
- bool ok_type = ((scalar_type->id == ZigTypeIdInt && scalar_type->data.integral.is_signed) ||
- scalar_type->id == ZigTypeIdComptimeInt || (is_float && !is_wrap_op));
+ bool ok_type = scalar_type->id == ZigTypeIdInt || scalar_type->id == ZigTypeIdComptimeInt ||
+ (is_float && !is_wrap_op);
if (!ok_type) {
const char *fmt = is_wrap_op ? "invalid wrapping negation type: '%s'" : "invalid negation type: '%s'";
@@ -21677,7 +21670,7 @@ static ErrorMsg *ir_eval_negation_scalar(IrAnalyze *ira, IrInst* source_instr, Z
float_negate(scalar_out_val, operand_val);
} else if (is_wrap_op) {
bigint_negate_wrap(&scalar_out_val->data.x_bigint, &operand_val->data.x_bigint,
- scalar_type->data.integral.bit_count);
+ scalar_type->data.integral.bit_count, scalar_type->data.integral.is_signed);
} else {
bigint_negate(&scalar_out_val->data.x_bigint, &operand_val->data.x_bigint);
}
diff --git a/src/stage1/parser.cpp b/src/stage1/parser.cpp
index c37b3ffefb..d57277cd51 100644
--- a/src/stage1/parser.cpp
+++ b/src/stage1/parser.cpp
@@ -946,10 +946,7 @@ static AstNode *ast_parse_statement(ParseContext *pc) {
Token *suspend = eat_token_if(pc, TokenIdKeywordSuspend);
if (suspend != nullptr) {
- AstNode *statement = nullptr;
- if (eat_token_if(pc, TokenIdSemicolon) == nullptr)
- statement = ast_expect(pc, ast_parse_block_expr_statement);
-
+ AstNode *statement = ast_expect(pc, ast_parse_block_expr_statement);
AstNode *res = ast_create_node(pc, NodeTypeSuspend, suspend);
res->data.suspend.block = statement;
return res;
diff --git a/src/stage1/softfloat_ext.cpp b/src/stage1/softfloat_ext.cpp
index 8408a15116..d0b8d1a5b3 100644
--- a/src/stage1/softfloat_ext.cpp
+++ b/src/stage1/softfloat_ext.cpp
@@ -1,17 +1,21 @@
#include "softfloat_ext.hpp"
+#include "zigendian.h"
extern "C" {
#include "softfloat.h"
}
void f128M_abs(const float128_t *aPtr, float128_t *zPtr) {
- float128_t zero_float;
- ui32_to_f128M(0, &zero_float);
- if (f128M_lt(aPtr, &zero_float)) {
- f128M_sub(&zero_float, aPtr, zPtr);
- } else {
- *zPtr = *aPtr;
- }
+ // Clear the sign bit.
+#if ZIG_BYTE_ORDER == ZIG_LITTLE_ENDIAN
+ zPtr->v[1] = aPtr->v[1] & ~(UINT64_C(1) << 63);
+ zPtr->v[0] = aPtr->v[0];
+#elif ZIG_BYTE_ORDER == ZIG_BIG_ENDIAN
+ zPtr->v[0] = aPtr->v[0] & ~(UINT64_C(1) << 63);
+ zPtr->v[1] = aPtr->v[1];
+#else
+#error Unsupported endian
+#endif
}
void f128M_trunc(const float128_t *aPtr, float128_t *zPtr) {
@@ -22,4 +26,24 @@ void f128M_trunc(const float128_t *aPtr, float128_t *zPtr) {
} else {
f128M_roundToInt(aPtr, softfloat_round_min, false, zPtr);
}
+}
+
+float16_t f16_neg(const float16_t a) {
+ union { uint16_t ui; float16_t f; } uA;
+ // Toggle the sign bit.
+ uA.ui = a.v ^ (UINT16_C(1) << 15);
+ return uA.f;
+}
+
+void f128M_neg(const float128_t *aPtr, float128_t *zPtr) {
+ // Toggle the sign bit.
+#if ZIG_BYTE_ORDER == ZIG_LITTLE_ENDIAN
+ zPtr->v[1] = aPtr->v[1] ^ (UINT64_C(1) << 63);
+ zPtr->v[0] = aPtr->v[0];
+#elif ZIG_BYTE_ORDER == ZIG_BIG_ENDIAN
+ zPtr->v[0] = aPtr->v[0] ^ (UINT64_C(1) << 63);
+ zPtr->v[1] = aPtr->v[1];
+#else
+#error Unsupported endian
+#endif
} \ No newline at end of file
diff --git a/src/stage1/softfloat_ext.hpp b/src/stage1/softfloat_ext.hpp
index 0a1f958933..42922a5226 100644
--- a/src/stage1/softfloat_ext.hpp
+++ b/src/stage1/softfloat_ext.hpp
@@ -5,5 +5,8 @@
void f128M_abs(const float128_t *aPtr, float128_t *zPtr);
void f128M_trunc(const float128_t *aPtr, float128_t *zPtr);
+void f128M_neg(const float128_t *aPtr, float128_t *zPtr);
+
+float16_t f16_neg(const float16_t a);
#endif \ No newline at end of file
diff --git a/src/translate_c.zig b/src/translate_c.zig
index 9a1215abd6..ac5c52ee0d 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -1353,10 +1353,14 @@ fn transCreatePointerArithmeticSignedOp(
const bitcast_node = try usizeCastForWrappingPtrArithmetic(c.arena, rhs_node);
- const arith_args = .{ .lhs = lhs_node, .rhs = bitcast_node };
- const arith_node = try if (is_add) Tag.add.create(c.arena, arith_args) else Tag.sub.create(c.arena, arith_args);
-
- return maybeSuppressResult(c, scope, result_used, arith_node);
+ return transCreateNodeInfixOp(
+ c,
+ scope,
+ if (is_add) .add else .sub,
+ lhs_node,
+ bitcast_node,
+ result_used,
+ );
}
fn transBinaryOperator(
@@ -2161,8 +2165,8 @@ fn transCCast(
return Tag.as.create(c.arena, .{ .lhs = dst_node, .rhs = bool_to_int });
}
if (cIsEnum(dst_type)) {
- // @intToEnum(dest_type, val)
- return Tag.int_to_enum.create(c.arena, .{ .lhs = dst_node, .rhs = expr });
+ // import("std").meta.cast(dest_type, val)
+ return Tag.std_meta_cast.create(c.arena, .{ .lhs = dst_node, .rhs = expr });
}
if (cIsEnum(src_type) and !cIsEnum(dst_type)) {
// @enumToInt(val)
diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig
index 4be0fead97..61d28bb22d 100644
--- a/src/translate_c/ast.zig
+++ b/src/translate_c/ast.zig
@@ -1665,7 +1665,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
},
.array_access => {
const payload = node.castTag(.array_access).?.data;
- const lhs = try renderNode(c, payload.lhs);
+ const lhs = try renderNodeGrouped(c, payload.lhs);
const l_bracket = try c.addToken(.l_bracket, "[");
const index_expr = try renderNode(c, payload.rhs);
_ = try c.addToken(.r_bracket, "]");
@@ -1728,7 +1728,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
},
.field_access => {
const payload = node.castTag(.field_access).?.data;
- const lhs = try renderNode(c, payload.lhs);
+ const lhs = try renderNodeGrouped(c, payload.lhs);
return renderFieldAccess(c, lhs, payload.field_name);
},
.@"struct", .@"union" => return renderRecord(c, node),
@@ -2073,7 +2073,7 @@ fn renderNullSentinelArrayType(c: *Context, len: usize, elem_type: Node) !NodeIn
.main_token = l_bracket,
.data = .{
.lhs = len_expr,
- .rhs = try c.addExtra(std.zig.ast.Node.ArrayTypeSentinel {
+ .rhs = try c.addExtra(std.zig.ast.Node.ArrayTypeSentinel{
.sentinel = sentinel_expr,
.elem_type = elem_type_expr,
}),