aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/AstGen.zig39
-rw-r--r--src/Autodoc.zig17
-rw-r--r--src/BuiltinFn.zig8
-rw-r--r--src/Compilation.zig4
-rw-r--r--src/Manifest.zig499
-rw-r--r--src/Module.zig111
-rw-r--r--src/Package.zig309
-rw-r--r--src/Sema.zig121
-rw-r--r--src/TypedValue.zig5
-rw-r--r--src/Zir.zig6
-rw-r--r--src/arch/aarch64/CodeGen.zig318
-rw-r--r--src/arch/aarch64/Emit.zig16
-rw-r--r--src/arch/arm/CodeGen.zig146
-rw-r--r--src/arch/riscv64/CodeGen.zig56
-rw-r--r--src/arch/sparc64/CodeGen.zig45
-rw-r--r--src/arch/wasm/CodeGen.zig64
-rw-r--r--src/arch/wasm/Emit.zig29
-rw-r--r--src/arch/x86_64/CodeGen.zig254
-rw-r--r--src/arch/x86_64/Emit.zig16
-rw-r--r--src/codegen.zig189
-rw-r--r--src/codegen/c.zig70
-rw-r--r--src/codegen/llvm.zig5
-rw-r--r--src/codegen/spirv.zig30
-rw-r--r--src/link.zig83
-rw-r--r--src/link/C.zig4
-rw-r--r--src/link/Coff.zig543
-rw-r--r--src/link/Coff/Atom.zig59
-rw-r--r--src/link/Coff/Relocation.zig18
-rw-r--r--src/link/Dwarf.zig579
-rw-r--r--src/link/Elf.zig1298
-rw-r--r--src/link/Elf/Atom.zig100
-rw-r--r--src/link/MachO.zig765
-rw-r--r--src/link/MachO/Atom.zig92
-rw-r--r--src/link/MachO/DebugSymbols.zig12
-rw-r--r--src/link/MachO/Relocation.zig16
-rw-r--r--src/link/MachO/load_commands.zig2
-rw-r--r--src/link/MachO/zld.zig11
-rw-r--r--src/link/Plan9.zig249
-rw-r--r--src/link/SpirV.zig18
-rw-r--r--src/link/Wasm.zig573
-rw-r--r--src/link/Wasm/Atom.zig46
-rw-r--r--src/link/Wasm/Object.zig15
-rw-r--r--src/main.zig35
-rw-r--r--src/mingw.zig1
-rw-r--r--src/print_zir.zig1
-rw-r--r--src/translate_c.zig5
-rw-r--r--src/type.zig631
47 files changed, 3841 insertions, 3672 deletions
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 15b3611a1e..10673a2b37 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -2530,6 +2530,7 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.bit_size_of,
.typeof_log2_int_type,
.ptr_to_int,
+ .qual_cast,
.align_of,
.bool_to_int,
.embed_file,
@@ -4278,7 +4279,34 @@ fn testDecl(
var num_namespaces_out: u32 = 0;
var capturing_namespace: ?*Scope.Namespace = null;
while (true) switch (s.tag) {
- .local_val, .local_ptr => unreachable, // a test cannot be in a local scope
+ .local_val => {
+ const local_val = s.cast(Scope.LocalVal).?;
+ if (local_val.name == name_str_index) {
+ local_val.used = test_name_token;
+ return astgen.failTokNotes(test_name_token, "cannot test a {s}", .{
+ @tagName(local_val.id_cat),
+ }, &[_]u32{
+ try astgen.errNoteTok(local_val.token_src, "{s} declared here", .{
+ @tagName(local_val.id_cat),
+ }),
+ });
+ }
+ s = local_val.parent;
+ },
+ .local_ptr => {
+ const local_ptr = s.cast(Scope.LocalPtr).?;
+ if (local_ptr.name == name_str_index) {
+ local_ptr.used = test_name_token;
+ return astgen.failTokNotes(test_name_token, "cannot test a {s}", .{
+ @tagName(local_ptr.id_cat),
+ }, &[_]u32{
+ try astgen.errNoteTok(local_ptr.token_src, "{s} declared here", .{
+ @tagName(local_ptr.id_cat),
+ }),
+ });
+ }
+ s = local_ptr.parent;
+ },
.gen_zir => s = s.cast(GenZir).?.parent,
.defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
.namespace, .enum_namespace => {
@@ -8010,6 +8038,7 @@ fn builtinCall(
.float_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .float_cast),
.int_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .int_cast),
.ptr_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .ptr_cast),
+ .qual_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .qual_cast),
.truncate => return typeCast(gz, scope, ri, node, params[0], params[1], .truncate),
// zig fmt: on
@@ -8692,6 +8721,7 @@ fn callExpr(
defer arg_block.unstack();
// `call_inst` is reused to provide the param type.
+ arg_block.rl_ty_inst = call_inst;
const arg_ref = try expr(&arg_block, &arg_block.base, .{ .rl = .{ .coerced_ty = call_inst }, .ctx = .fn_arg }, param_node);
_ = try arg_block.addBreak(.break_inline, call_index, arg_ref);
@@ -10840,7 +10870,12 @@ const GenZir = struct {
// we emit ZIR for the block break instructions to have the result values,
// and then rvalue() on that to pass the value to the result location.
switch (parent_ri.rl) {
- .ty, .coerced_ty => |ty_inst| {
+ .coerced_ty => |ty_inst| {
+ // Type coercion needs to happend before breaks.
+ gz.rl_ty_inst = ty_inst;
+ gz.break_result_info = .{ .rl = .{ .ty = ty_inst } };
+ },
+ .ty => |ty_inst| {
gz.rl_ty_inst = ty_inst;
gz.break_result_info = parent_ri;
},
diff --git a/src/Autodoc.zig b/src/Autodoc.zig
index 8afc9c859b..0c2c39bbcc 100644
--- a/src/Autodoc.zig
+++ b/src/Autodoc.zig
@@ -1400,6 +1400,7 @@ fn walkInstruction(
.float_cast,
.int_cast,
.ptr_cast,
+ .qual_cast,
.truncate,
.align_cast,
.has_decl,
@@ -2200,17 +2201,10 @@ fn walkInstruction(
false,
);
- _ = operand;
-
- // WIP
-
- printWithContext(
- file,
- inst_index,
- "TODO: implement `{s}` for walkInstruction\n\n",
- .{@tagName(tags[inst_index])},
- );
- return self.cteTodo(@tagName(tags[inst_index]));
+ return DocData.WalkResult{
+ .typeRef = operand.expr,
+ .expr = .{ .@"struct" = &.{} },
+ };
},
.struct_init_anon => {
const pl_node = data[inst_index].pl_node;
@@ -2537,6 +2531,7 @@ fn walkInstruction(
const var_init_ref = @intToEnum(Ref, file.zir.extra[extra_index]);
const var_init = try self.walkRef(file, parent_scope, parent_src, var_init_ref, need_type);
value.expr = var_init.expr;
+ value.typeRef = var_init.typeRef;
}
return value;
diff --git a/src/BuiltinFn.zig b/src/BuiltinFn.zig
index b71d96c3dd..80eb739185 100644
--- a/src/BuiltinFn.zig
+++ b/src/BuiltinFn.zig
@@ -75,6 +75,7 @@ pub const Tag = enum {
prefetch,
ptr_cast,
ptr_to_int,
+ qual_cast,
rem,
return_address,
select,
@@ -675,6 +676,13 @@ pub const list = list: {
},
},
.{
+ "@qualCast",
+ .{
+ .tag = .qual_cast,
+ .param_count = 2,
+ },
+ },
+ .{
"@rem",
.{
.tag = .rem,
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 09c6e1c686..e09b8f18ab 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -385,7 +385,7 @@ pub const AllErrors = struct {
count: u32 = 1,
/// Does not include the trailing newline.
source_line: ?[]const u8,
- notes: []Message = &.{},
+ notes: []const Message = &.{},
reference_trace: []Message = &.{},
/// Splits the error message up into lines to properly indent them
@@ -3299,7 +3299,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
const gpa = comp.gpa;
const module = comp.bin_file.options.module.?;
const decl = module.declPtr(decl_index);
- comp.bin_file.updateDeclLineNumber(module, decl) catch |err| {
+ comp.bin_file.updateDeclLineNumber(module, decl_index) catch |err| {
try module.failed_decls.ensureUnusedCapacity(gpa, 1);
module.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create(
gpa,
diff --git a/src/Manifest.zig b/src/Manifest.zig
new file mode 100644
index 0000000000..c3f77aec98
--- /dev/null
+++ b/src/Manifest.zig
@@ -0,0 +1,499 @@
+pub const basename = "build.zig.zon";
+pub const Hash = std.crypto.hash.sha2.Sha256;
+
+pub const Dependency = struct {
+ url: []const u8,
+ url_tok: Ast.TokenIndex,
+ hash: ?[]const u8,
+ hash_tok: Ast.TokenIndex,
+};
+
+pub const ErrorMessage = struct {
+ msg: []const u8,
+ tok: Ast.TokenIndex,
+ off: u32,
+};
+
+pub const MultihashFunction = enum(u16) {
+ identity = 0x00,
+ sha1 = 0x11,
+ @"sha2-256" = 0x12,
+ @"sha2-512" = 0x13,
+ @"sha3-512" = 0x14,
+ @"sha3-384" = 0x15,
+ @"sha3-256" = 0x16,
+ @"sha3-224" = 0x17,
+ @"sha2-384" = 0x20,
+ @"sha2-256-trunc254-padded" = 0x1012,
+ @"sha2-224" = 0x1013,
+ @"sha2-512-224" = 0x1014,
+ @"sha2-512-256" = 0x1015,
+ @"blake2b-256" = 0xb220,
+ _,
+};
+
+pub const multihash_function: MultihashFunction = switch (Hash) {
+ std.crypto.hash.sha2.Sha256 => .@"sha2-256",
+ else => @compileError("unreachable"),
+};
+comptime {
+ // We avoid unnecessary uleb128 code in hexDigest by asserting here the
+ // values are small enough to be contained in the one-byte encoding.
+ assert(@enumToInt(multihash_function) < 127);
+ assert(Hash.digest_length < 127);
+}
+pub const multihash_len = 1 + 1 + Hash.digest_length;
+
+name: []const u8,
+version: std.SemanticVersion,
+dependencies: std.StringArrayHashMapUnmanaged(Dependency),
+
+errors: []ErrorMessage,
+arena_state: std.heap.ArenaAllocator.State,
+
+pub const Error = Allocator.Error;
+
+pub fn parse(gpa: Allocator, ast: std.zig.Ast) Error!Manifest {
+ const node_tags = ast.nodes.items(.tag);
+ const node_datas = ast.nodes.items(.data);
+ assert(node_tags[0] == .root);
+ const main_node_index = node_datas[0].lhs;
+
+ var arena_instance = std.heap.ArenaAllocator.init(gpa);
+ errdefer arena_instance.deinit();
+
+ var p: Parse = .{
+ .gpa = gpa,
+ .ast = ast,
+ .arena = arena_instance.allocator(),
+ .errors = .{},
+
+ .name = undefined,
+ .version = undefined,
+ .dependencies = .{},
+ .buf = .{},
+ };
+ defer p.buf.deinit(gpa);
+ defer p.errors.deinit(gpa);
+ defer p.dependencies.deinit(gpa);
+
+ p.parseRoot(main_node_index) catch |err| switch (err) {
+ error.ParseFailure => assert(p.errors.items.len > 0),
+ else => |e| return e,
+ };
+
+ return .{
+ .name = p.name,
+ .version = p.version,
+ .dependencies = try p.dependencies.clone(p.arena),
+ .errors = try p.arena.dupe(ErrorMessage, p.errors.items),
+ .arena_state = arena_instance.state,
+ };
+}
+
+pub fn deinit(man: *Manifest, gpa: Allocator) void {
+ man.arena_state.promote(gpa).deinit();
+ man.* = undefined;
+}
+
+const hex_charset = "0123456789abcdef";
+
+pub fn hex64(x: u64) [16]u8 {
+ var result: [16]u8 = undefined;
+ var i: usize = 0;
+ while (i < 8) : (i += 1) {
+ const byte = @truncate(u8, x >> @intCast(u6, 8 * i));
+ result[i * 2 + 0] = hex_charset[byte >> 4];
+ result[i * 2 + 1] = hex_charset[byte & 15];
+ }
+ return result;
+}
+
+test hex64 {
+ const s = "[" ++ hex64(0x12345678_abcdef00) ++ "]";
+ try std.testing.expectEqualStrings("[00efcdab78563412]", s);
+}
+
+pub fn hexDigest(digest: [Hash.digest_length]u8) [multihash_len * 2]u8 {
+ var result: [multihash_len * 2]u8 = undefined;
+
+ result[0] = hex_charset[@enumToInt(multihash_function) >> 4];
+ result[1] = hex_charset[@enumToInt(multihash_function) & 15];
+
+ result[2] = hex_charset[Hash.digest_length >> 4];
+ result[3] = hex_charset[Hash.digest_length & 15];
+
+ for (digest) |byte, i| {
+ result[4 + i * 2] = hex_charset[byte >> 4];
+ result[5 + i * 2] = hex_charset[byte & 15];
+ }
+ return result;
+}
+
+const Parse = struct {
+ gpa: Allocator,
+ ast: std.zig.Ast,
+ arena: Allocator,
+ buf: std.ArrayListUnmanaged(u8),
+ errors: std.ArrayListUnmanaged(ErrorMessage),
+
+ name: []const u8,
+ version: std.SemanticVersion,
+ dependencies: std.StringArrayHashMapUnmanaged(Dependency),
+
+ const InnerError = error{ ParseFailure, OutOfMemory };
+
+ fn parseRoot(p: *Parse, node: Ast.Node.Index) !void {
+ const ast = p.ast;
+ const main_tokens = ast.nodes.items(.main_token);
+ const main_token = main_tokens[node];
+
+ var buf: [2]Ast.Node.Index = undefined;
+ const struct_init = ast.fullStructInit(&buf, node) orelse {
+ return fail(p, main_token, "expected top level expression to be a struct", .{});
+ };
+
+ var have_name = false;
+ var have_version = false;
+
+ for (struct_init.ast.fields) |field_init| {
+ const name_token = ast.firstToken(field_init) - 2;
+ const field_name = try identifierTokenString(p, name_token);
+ // We could get fancy with reflection and comptime logic here but doing
+ // things manually provides an opportunity to do any additional verification
+ // that is desirable on a per-field basis.
+ if (mem.eql(u8, field_name, "dependencies")) {
+ try parseDependencies(p, field_init);
+ } else if (mem.eql(u8, field_name, "name")) {
+ p.name = try parseString(p, field_init);
+ have_name = true;
+ } else if (mem.eql(u8, field_name, "version")) {
+ const version_text = try parseString(p, field_init);
+ p.version = std.SemanticVersion.parse(version_text) catch |err| v: {
+ try appendError(p, main_tokens[field_init], "unable to parse semantic version: {s}", .{@errorName(err)});
+ break :v undefined;
+ };
+ have_version = true;
+ } else {
+ // Ignore unknown fields so that we can add fields in future zig
+ // versions without breaking older zig versions.
+ }
+ }
+
+ if (!have_name) {
+ try appendError(p, main_token, "missing top-level 'name' field", .{});
+ }
+
+ if (!have_version) {
+ try appendError(p, main_token, "missing top-level 'version' field", .{});
+ }
+ }
+
+ fn parseDependencies(p: *Parse, node: Ast.Node.Index) !void {
+ const ast = p.ast;
+ const main_tokens = ast.nodes.items(.main_token);
+
+ var buf: [2]Ast.Node.Index = undefined;
+ const struct_init = ast.fullStructInit(&buf, node) orelse {
+ const tok = main_tokens[node];
+ return fail(p, tok, "expected dependencies expression to be a struct", .{});
+ };
+
+ for (struct_init.ast.fields) |field_init| {
+ const name_token = ast.firstToken(field_init) - 2;
+ const dep_name = try identifierTokenString(p, name_token);
+ const dep = try parseDependency(p, field_init);
+ try p.dependencies.put(p.gpa, dep_name, dep);
+ }
+ }
+
+ fn parseDependency(p: *Parse, node: Ast.Node.Index) !Dependency {
+ const ast = p.ast;
+ const main_tokens = ast.nodes.items(.main_token);
+
+ var buf: [2]Ast.Node.Index = undefined;
+ const struct_init = ast.fullStructInit(&buf, node) orelse {
+ const tok = main_tokens[node];
+ return fail(p, tok, "expected dependency expression to be a struct", .{});
+ };
+
+ var dep: Dependency = .{
+ .url = undefined,
+ .url_tok = undefined,
+ .hash = null,
+ .hash_tok = undefined,
+ };
+ var have_url = false;
+
+ for (struct_init.ast.fields) |field_init| {
+ const name_token = ast.firstToken(field_init) - 2;
+ const field_name = try identifierTokenString(p, name_token);
+ // We could get fancy with reflection and comptime logic here but doing
+ // things manually provides an opportunity to do any additional verification
+ // that is desirable on a per-field basis.
+ if (mem.eql(u8, field_name, "url")) {
+ dep.url = parseString(p, field_init) catch |err| switch (err) {
+ error.ParseFailure => continue,
+ else => |e| return e,
+ };
+ dep.url_tok = main_tokens[field_init];
+ have_url = true;
+ } else if (mem.eql(u8, field_name, "hash")) {
+ dep.hash = parseHash(p, field_init) catch |err| switch (err) {
+ error.ParseFailure => continue,
+ else => |e| return e,
+ };
+ dep.hash_tok = main_tokens[field_init];
+ } else {
+ // Ignore unknown fields so that we can add fields in future zig
+ // versions without breaking older zig versions.
+ }
+ }
+
+ if (!have_url) {
+ try appendError(p, main_tokens[node], "dependency is missing 'url' field", .{});
+ }
+
+ return dep;
+ }
+
+ fn parseString(p: *Parse, node: Ast.Node.Index) ![]const u8 {
+ const ast = p.ast;
+ const node_tags = ast.nodes.items(.tag);
+ const main_tokens = ast.nodes.items(.main_token);
+ if (node_tags[node] != .string_literal) {
+ return fail(p, main_tokens[node], "expected string literal", .{});
+ }
+ const str_lit_token = main_tokens[node];
+ const token_bytes = ast.tokenSlice(str_lit_token);
+ p.buf.clearRetainingCapacity();
+ try parseStrLit(p, str_lit_token, &p.buf, token_bytes, 0);
+ const duped = try p.arena.dupe(u8, p.buf.items);
+ return duped;
+ }
+
+ fn parseHash(p: *Parse, node: Ast.Node.Index) ![]const u8 {
+ const ast = p.ast;
+ const main_tokens = ast.nodes.items(.main_token);
+ const tok = main_tokens[node];
+ const h = try parseString(p, node);
+
+ if (h.len >= 2) {
+ const their_multihash_func = std.fmt.parseInt(u8, h[0..2], 16) catch |err| {
+ return fail(p, tok, "invalid multihash value: unable to parse hash function: {s}", .{
+ @errorName(err),
+ });
+ };
+ if (@intToEnum(MultihashFunction, their_multihash_func) != multihash_function) {
+ return fail(p, tok, "unsupported hash function: only sha2-256 is supported", .{});
+ }
+ }
+
+ const hex_multihash_len = 2 * Manifest.multihash_len;
+ if (h.len != hex_multihash_len) {
+ return fail(p, tok, "wrong hash size. expected: {d}, found: {d}", .{
+ hex_multihash_len, h.len,
+ });
+ }
+
+ return h;
+ }
+
+ /// TODO: try to DRY this with AstGen.identifierTokenString
+ fn identifierTokenString(p: *Parse, token: Ast.TokenIndex) InnerError![]const u8 {
+ const ast = p.ast;
+ const token_tags = ast.tokens.items(.tag);
+ assert(token_tags[token] == .identifier);
+ const ident_name = ast.tokenSlice(token);
+ if (!mem.startsWith(u8, ident_name, "@")) {
+ return ident_name;
+ }
+ p.buf.clearRetainingCapacity();
+ try parseStrLit(p, token, &p.buf, ident_name, 1);
+ const duped = try p.arena.dupe(u8, p.buf.items);
+ return duped;
+ }
+
+ /// TODO: try to DRY this with AstGen.parseStrLit
+ fn parseStrLit(
+ p: *Parse,
+ token: Ast.TokenIndex,
+ buf: *std.ArrayListUnmanaged(u8),
+ bytes: []const u8,
+ offset: u32,
+ ) InnerError!void {
+ const raw_string = bytes[offset..];
+ var buf_managed = buf.toManaged(p.gpa);
+ const result = std.zig.string_literal.parseWrite(buf_managed.writer(), raw_string);
+ buf.* = buf_managed.moveToUnmanaged();
+ switch (try result) {
+ .success => {},
+ .failure => |err| try p.appendStrLitError(err, token, bytes, offset),
+ }
+ }
+
+ /// TODO: try to DRY this with AstGen.failWithStrLitError
+ fn appendStrLitError(
+ p: *Parse,
+ err: std.zig.string_literal.Error,
+ token: Ast.TokenIndex,
+ bytes: []const u8,
+ offset: u32,
+ ) Allocator.Error!void {
+ const raw_string = bytes[offset..];
+ switch (err) {
+ .invalid_escape_character => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "invalid escape character: '{c}'",
+ .{raw_string[bad_index]},
+ );
+ },
+ .expected_hex_digit => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "expected hex digit, found '{c}'",
+ .{raw_string[bad_index]},
+ );
+ },
+ .empty_unicode_escape_sequence => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "empty unicode escape sequence",
+ .{},
+ );
+ },
+ .expected_hex_digit_or_rbrace => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "expected hex digit or '}}', found '{c}'",
+ .{raw_string[bad_index]},
+ );
+ },
+ .invalid_unicode_codepoint => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "unicode escape does not correspond to a valid codepoint",
+ .{},
+ );
+ },
+ .expected_lbrace => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "expected '{{', found '{c}",
+ .{raw_string[bad_index]},
+ );
+ },
+ .expected_rbrace => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "expected '}}', found '{c}",
+ .{raw_string[bad_index]},
+ );
+ },
+ .expected_single_quote => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "expected single quote ('), found '{c}",
+ .{raw_string[bad_index]},
+ );
+ },
+ .invalid_character => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "invalid byte in string or character literal: '{c}'",
+ .{raw_string[bad_index]},
+ );
+ },
+ }
+ }
+
+ fn fail(
+ p: *Parse,
+ tok: Ast.TokenIndex,
+ comptime fmt: []const u8,
+ args: anytype,
+ ) InnerError {
+ try appendError(p, tok, fmt, args);
+ return error.ParseFailure;
+ }
+
+ fn appendError(p: *Parse, tok: Ast.TokenIndex, comptime fmt: []const u8, args: anytype) !void {
+ return appendErrorOff(p, tok, 0, fmt, args);
+ }
+
+ fn appendErrorOff(
+ p: *Parse,
+ tok: Ast.TokenIndex,
+ byte_offset: u32,
+ comptime fmt: []const u8,
+ args: anytype,
+ ) Allocator.Error!void {
+ try p.errors.append(p.gpa, .{
+ .msg = try std.fmt.allocPrint(p.arena, fmt, args),
+ .tok = tok,
+ .off = byte_offset,
+ });
+ }
+};
+
+const Manifest = @This();
+const std = @import("std");
+const mem = std.mem;
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+const Ast = std.zig.Ast;
+const testing = std.testing;
+
+test "basic" {
+ const gpa = testing.allocator;
+
+ const example =
+ \\.{
+ \\ .name = "foo",
+ \\ .version = "3.2.1",
+ \\ .dependencies = .{
+ \\ .bar = .{
+ \\ .url = "https://example.com/baz.tar.gz",
+ \\ .hash = "1220f1b680b6065fcfc94fe777f22e73bcb7e2767e5f4d99d4255fe76ded69c7a35f",
+ \\ },
+ \\ },
+ \\}
+ ;
+
+ var ast = try std.zig.Ast.parse(gpa, example, .zon);
+ defer ast.deinit(gpa);
+
+ try testing.expect(ast.errors.len == 0);
+
+ var manifest = try Manifest.parse(gpa, ast);
+ defer manifest.deinit(gpa);
+
+ try testing.expectEqualStrings("foo", manifest.name);
+
+ try testing.expectEqual(@as(std.SemanticVersion, .{
+ .major = 3,
+ .minor = 2,
+ .patch = 1,
+ }), manifest.version);
+
+ try testing.expect(manifest.dependencies.count() == 1);
+ try testing.expectEqualStrings("bar", manifest.dependencies.keys()[0]);
+ try testing.expectEqualStrings(
+ "https://example.com/baz.tar.gz",
+ manifest.dependencies.values()[0].url,
+ );
+ try testing.expectEqualStrings(
+ "1220f1b680b6065fcfc94fe777f22e73bcb7e2767e5f4d99d4255fe76ded69c7a35f",
+ manifest.dependencies.values()[0].hash orelse return error.TestFailed,
+ );
+}
diff --git a/src/Module.zig b/src/Module.zig
index b17c140231..3bb15e78c3 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -328,8 +328,6 @@ pub const ErrorInt = u32;
pub const Export = struct {
options: std.builtin.ExportOptions,
src: LazySrcLoc,
- /// Represents the position of the export, if any, in the output file.
- link: link.File.Export,
/// The Decl that performs the export. Note that this is *not* the Decl being exported.
owner_decl: Decl.Index,
/// The Decl containing the export statement. Inline function calls
@@ -533,16 +531,8 @@ pub const Decl = struct {
/// What kind of a declaration is this.
kind: Kind,
- /// Represents the position of the code in the output file.
- /// This is populated regardless of semantic analysis and code generation.
- link: link.File.LinkBlock,
-
- /// Represents the function in the linked output file, if the `Decl` is a function.
- /// This is stored here and not in `Fn` because `Decl` survives across updates but
- /// `Fn` does not.
- /// TODO Look into making `Fn` a longer lived structure and moving this field there
- /// to save on memory usage.
- fn_link: link.File.LinkFn,
+ /// TODO remove this once Wasm backend catches up
+ fn_link: ?link.File.Wasm.FnData = null,
/// The shallow set of other decls whose typed_value could possibly change if this Decl's
/// typed_value is modified.
@@ -2067,7 +2057,7 @@ pub const File = struct {
if (file.tree_loaded) return &file.tree;
const source = try file.getSource(gpa);
- file.tree = try std.zig.parse(gpa, source.bytes);
+ file.tree = try Ast.parse(gpa, source.bytes, .zig);
file.tree_loaded = true;
return &file.tree;
}
@@ -3672,7 +3662,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
file.source = source;
file.source_loaded = true;
- file.tree = try std.zig.parse(gpa, source);
+ file.tree = try Ast.parse(gpa, source, .zig);
defer if (!file.tree_loaded) file.tree.deinit(gpa);
if (file.tree.errors.len != 0) {
@@ -3987,7 +3977,7 @@ pub fn populateBuiltinFile(mod: *Module) !void {
else => |e| return e,
}
- file.tree = try std.zig.parse(gpa, file.source);
+ file.tree = try Ast.parse(gpa, file.source, .zig);
file.tree_loaded = true;
assert(file.tree.errors.len == 0); // builtin.zig must parse
@@ -4098,7 +4088,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
// The exports this Decl performs will be re-discovered, so we remove them here
// prior to re-analysis.
- mod.deleteDeclExports(decl_index);
+ try mod.deleteDeclExports(decl_index);
// Similarly, `@setAlignStack` invocations will be re-discovered.
if (decl.getFunction()) |func| {
@@ -4585,7 +4575,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
// We don't fully codegen the decl until later, but we do need to reserve a global
// offset table index for it. This allows us to codegen decls out of dependency
// order, increasing how many computations can be done in parallel.
- try mod.comp.bin_file.allocateDeclIndexes(decl_index);
try mod.comp.work_queue.writeItem(.{ .codegen_func = func });
if (type_changed and mod.emit_h != null) {
try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index });
@@ -4697,7 +4686,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
// codegen backend wants full access to the Decl Type.
try sema.resolveTypeFully(decl.ty);
- try mod.comp.bin_file.allocateDeclIndexes(decl_index);
try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl_index });
if (type_changed and mod.emit_h != null) {
@@ -5185,20 +5173,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err
decl.zir_decl_index = @intCast(u32, decl_sub_index);
if (decl.getFunction()) |_| {
switch (comp.bin_file.tag) {
- .coff => {
- // TODO Implement for COFF
- },
- .elf => if (decl.fn_link.elf.len != 0) {
- // TODO Look into detecting when this would be unnecessary by storing enough state
- // in `Decl` to notice that the line number did not change.
- comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
- },
- .macho => if (decl.fn_link.macho.len != 0) {
- // TODO Look into detecting when this would be unnecessary by storing enough state
- // in `Decl` to notice that the line number did not change.
- comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
- },
- .plan9 => {
+ .coff, .elf, .macho, .plan9 => {
// TODO Look into detecting when this would be unnecessary by storing enough state
// in `Decl` to notice that the line number did not change.
comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
@@ -5267,33 +5242,15 @@ pub fn clearDecl(
assert(emit_h.decl_table.swapRemove(decl_index));
}
_ = mod.compile_log_decls.swapRemove(decl_index);
- mod.deleteDeclExports(decl_index);
+ try mod.deleteDeclExports(decl_index);
if (decl.has_tv) {
if (decl.ty.isFnOrHasRuntimeBits()) {
mod.comp.bin_file.freeDecl(decl_index);
- // TODO instead of a union, put this memory trailing Decl objects,
- // and allow it to be variably sized.
- decl.link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = link.File.Coff.Atom.empty },
- .elf => .{ .elf = link.File.Elf.TextBlock.empty },
- .macho => .{ .macho = link.File.MachO.Atom.empty },
- .plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
- .c => .{ .c = {} },
- .wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
- .spirv => .{ .spirv = {} },
- .nvptx => .{ .nvptx = {} },
- };
decl.fn_link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = {} },
- .elf => .{ .elf = link.File.Dwarf.SrcFn.empty },
- .macho => .{ .macho = link.File.Dwarf.SrcFn.empty },
- .plan9 => .{ .plan9 = {} },
- .c => .{ .c = {} },
- .wasm => .{ .wasm = link.File.Wasm.FnData.empty },
- .spirv => .{ .spirv = .{} },
- .nvptx => .{ .nvptx = {} },
+ .wasm => link.File.Wasm.FnData.empty,
+ else => null,
};
}
if (decl.getInnerNamespace()) |namespace| {
@@ -5315,23 +5272,6 @@ pub fn deleteUnusedDecl(mod: *Module, decl_index: Decl.Index) void {
const decl = mod.declPtr(decl_index);
log.debug("deleteUnusedDecl {d} ({s})", .{ decl_index, decl.name });
- // TODO: remove `allocateDeclIndexes` and make the API that the linker backends
- // are required to notice the first time `updateDecl` happens and keep track
- // of it themselves. However they can rely on getting a `freeDecl` call if any
- // `updateDecl` or `updateFunc` calls happen. This will allow us to avoid any call
- // into the linker backend here, since the linker backend will never have been told
- // about the Decl in the first place.
- // Until then, we did call `allocateDeclIndexes` on this anonymous Decl and so we
- // must call `freeDecl` in the linker backend now.
- switch (mod.comp.bin_file.tag) {
- .c => {}, // this linker backend has already migrated to the new API
- else => if (decl.has_tv) {
- if (decl.ty.isFnOrHasRuntimeBits()) {
- mod.comp.bin_file.freeDecl(decl_index);
- }
- },
- }
-
assert(!mod.declIsRoot(decl_index));
assert(decl.src_namespace.anon_decls.swapRemove(decl_index));
@@ -5377,7 +5317,7 @@ pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void {
/// Delete all the Export objects that are caused by this Decl. Re-analysis of
/// this Decl will cause them to be re-created (or not).
-fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) void {
+fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void {
var export_owners = (mod.export_owners.fetchSwapRemove(decl_index) orelse return).value;
for (export_owners.items) |exp| {
@@ -5400,16 +5340,16 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) void {
}
}
if (mod.comp.bin_file.cast(link.File.Elf)) |elf| {
- elf.deleteExport(exp.link.elf);
+ elf.deleteDeclExport(decl_index, exp.options.name);
}
if (mod.comp.bin_file.cast(link.File.MachO)) |macho| {
- macho.deleteExport(exp.link.macho);
+ try macho.deleteDeclExport(decl_index, exp.options.name);
}
if (mod.comp.bin_file.cast(link.File.Wasm)) |wasm| {
- wasm.deleteExport(exp.link.wasm);
+ wasm.deleteDeclExport(decl_index);
}
if (mod.comp.bin_file.cast(link.File.Coff)) |coff| {
- coff.deleteExport(exp.link.coff);
+ coff.deleteDeclExport(decl_index, exp.options.name);
}
if (mod.failed_exports.fetchSwapRemove(exp)) |failed_kv| {
failed_kv.value.destroy(mod.gpa);
@@ -5712,25 +5652,9 @@ pub fn allocateNewDecl(
.deletion_flag = false,
.zir_decl_index = 0,
.src_scope = src_scope,
- .link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = link.File.Coff.Atom.empty },
- .elf => .{ .elf = link.File.Elf.TextBlock.empty },
- .macho => .{ .macho = link.File.MachO.Atom.empty },
- .plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
- .c => .{ .c = {} },
- .wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
- .spirv => .{ .spirv = {} },
- .nvptx => .{ .nvptx = {} },
- },
.fn_link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = {} },
- .elf => .{ .elf = link.File.Dwarf.SrcFn.empty },
- .macho => .{ .macho = link.File.Dwarf.SrcFn.empty },
- .plan9 => .{ .plan9 = {} },
- .c => .{ .c = {} },
- .wasm => .{ .wasm = link.File.Wasm.FnData.empty },
- .spirv => .{ .spirv = .{} },
- .nvptx => .{ .nvptx = {} },
+ .wasm => link.File.Wasm.FnData.empty,
+ else => null,
},
.generation = 0,
.is_pub = false,
@@ -5816,7 +5740,6 @@ pub fn initNewAnonDecl(
// the Decl will be garbage collected by the `codegen_decl` task instead of sent
// to the linker.
if (typed_value.ty.isFnOrHasRuntimeBits()) {
- try mod.comp.bin_file.allocateDeclIndexes(new_decl_index);
try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = new_decl_index });
}
}
diff --git a/src/Package.zig b/src/Package.zig
index ebe84b8444..401eef2121 100644
--- a/src/Package.zig
+++ b/src/Package.zig
@@ -1,12 +1,13 @@
const Package = @This();
+const builtin = @import("builtin");
const std = @import("std");
const fs = std.fs;
const mem = std.mem;
const Allocator = mem.Allocator;
const assert = std.debug.assert;
-const Hash = std.crypto.hash.sha2.Sha256;
const log = std.log.scoped(.package);
+const main = @import("main.zig");
const Compilation = @import("Compilation.zig");
const Module = @import("Module.zig");
@@ -14,6 +15,7 @@ const ThreadPool = @import("ThreadPool.zig");
const WaitGroup = @import("WaitGroup.zig");
const Cache = @import("Cache.zig");
const build_options = @import("build_options");
+const Manifest = @import("Manifest.zig");
pub const Table = std.StringHashMapUnmanaged(*Package);
@@ -140,10 +142,10 @@ pub fn addAndAdopt(parent: *Package, gpa: Allocator, child: *Package) !void {
}
pub const build_zig_basename = "build.zig";
-pub const ini_basename = build_zig_basename ++ ".ini";
pub fn fetchAndAddDependencies(
pkg: *Package,
+ arena: Allocator,
thread_pool: *ThreadPool,
http_client: *std.http.Client,
directory: Compilation.Directory,
@@ -152,89 +154,77 @@ pub fn fetchAndAddDependencies(
dependencies_source: *std.ArrayList(u8),
build_roots_source: *std.ArrayList(u8),
name_prefix: []const u8,
+ color: main.Color,
) !void {
const max_bytes = 10 * 1024 * 1024;
const gpa = thread_pool.allocator;
- const build_zig_ini = directory.handle.readFileAlloc(gpa, ini_basename, max_bytes) catch |err| switch (err) {
+ const build_zig_zon_bytes = directory.handle.readFileAllocOptions(
+ arena,
+ Manifest.basename,
+ max_bytes,
+ null,
+ 1,
+ 0,
+ ) catch |err| switch (err) {
error.FileNotFound => {
// Handle the same as no dependencies.
return;
},
else => |e| return e,
};
- defer gpa.free(build_zig_ini);
- const ini: std.Ini = .{ .bytes = build_zig_ini };
- var any_error = false;
- var it = ini.iterateSection("\n[dependency]\n");
- while (it.next()) |dep| {
- var line_it = mem.split(u8, dep, "\n");
- var opt_name: ?[]const u8 = null;
- var opt_url: ?[]const u8 = null;
- var expected_hash: ?[]const u8 = null;
- while (line_it.next()) |kv| {
- const eq_pos = mem.indexOfScalar(u8, kv, '=') orelse continue;
- const key = kv[0..eq_pos];
- const value = kv[eq_pos + 1 ..];
- if (mem.eql(u8, key, "name")) {
- opt_name = value;
- } else if (mem.eql(u8, key, "url")) {
- opt_url = value;
- } else if (mem.eql(u8, key, "hash")) {
- expected_hash = value;
- } else {
- const loc = std.zig.findLineColumn(ini.bytes, @ptrToInt(key.ptr) - @ptrToInt(ini.bytes.ptr));
- std.log.warn("{s}/{s}:{d}:{d} unrecognized key: '{s}'", .{
- directory.path orelse ".",
- "build.zig.ini",
- loc.line,
- loc.column,
- key,
- });
- }
- }
+ var ast = try std.zig.Ast.parse(gpa, build_zig_zon_bytes, .zon);
+ defer ast.deinit(gpa);
- const name = opt_name orelse {
- const loc = std.zig.findLineColumn(ini.bytes, @ptrToInt(dep.ptr) - @ptrToInt(ini.bytes.ptr));
- std.log.err("{s}/{s}:{d}:{d} missing key: 'name'", .{
- directory.path orelse ".",
- "build.zig.ini",
- loc.line,
- loc.column,
- });
- any_error = true;
- continue;
- };
+ if (ast.errors.len > 0) {
+ const file_path = try directory.join(arena, &.{Manifest.basename});
+ try main.printErrsMsgToStdErr(gpa, arena, ast, file_path, color);
+ return error.PackageFetchFailed;
+ }
- const url = opt_url orelse {
- const loc = std.zig.findLineColumn(ini.bytes, @ptrToInt(dep.ptr) - @ptrToInt(ini.bytes.ptr));
- std.log.err("{s}/{s}:{d}:{d} missing key: 'name'", .{
- directory.path orelse ".",
- "build.zig.ini",
- loc.line,
- loc.column,
- });
- any_error = true;
- continue;
+ var manifest = try Manifest.parse(gpa, ast);
+ defer manifest.deinit(gpa);
+
+ if (manifest.errors.len > 0) {
+ const ttyconf: std.debug.TTY.Config = switch (color) {
+ .auto => std.debug.detectTTYConfig(std.io.getStdErr()),
+ .on => .escape_codes,
+ .off => .no_color,
};
+ const file_path = try directory.join(arena, &.{Manifest.basename});
+ for (manifest.errors) |msg| {
+ Report.renderErrorMessage(ast, file_path, ttyconf, msg, &.{});
+ }
+ return error.PackageFetchFailed;
+ }
- const sub_prefix = try std.fmt.allocPrint(gpa, "{s}{s}.", .{ name_prefix, name });
- defer gpa.free(sub_prefix);
+ const report: Report = .{
+ .ast = &ast,
+ .directory = directory,
+ .color = color,
+ .arena = arena,
+ };
+
+ var any_error = false;
+ const deps_list = manifest.dependencies.values();
+ for (manifest.dependencies.keys()) |name, i| {
+ const dep = deps_list[i];
+
+ const sub_prefix = try std.fmt.allocPrint(arena, "{s}{s}.", .{ name_prefix, name });
const fqn = sub_prefix[0 .. sub_prefix.len - 1];
const sub_pkg = try fetchAndUnpack(
thread_pool,
http_client,
global_cache_directory,
- url,
- expected_hash,
- ini,
- directory,
+ dep,
+ report,
build_roots_source,
fqn,
);
try pkg.fetchAndAddDependencies(
+ arena,
thread_pool,
http_client,
sub_pkg.root_src_directory,
@@ -243,6 +233,7 @@ pub fn fetchAndAddDependencies(
dependencies_source,
build_roots_source,
sub_prefix,
+ color,
);
try addAndAdopt(pkg, gpa, sub_pkg);
@@ -252,7 +243,7 @@ pub fn fetchAndAddDependencies(
});
}
- if (any_error) return error.InvalidBuildZigIniFile;
+ if (any_error) return error.InvalidBuildManifestFile;
}
pub fn createFilePkg(
@@ -263,7 +254,7 @@ pub fn createFilePkg(
contents: []const u8,
) !*Package {
const rand_int = std.crypto.random.int(u64);
- const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ hex64(rand_int);
+ const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ Manifest.hex64(rand_int);
{
var tmp_dir = try cache_directory.handle.makeOpenPath(tmp_dir_sub_path, .{});
defer tmp_dir.close();
@@ -281,14 +272,73 @@ pub fn createFilePkg(
return createWithDir(gpa, name, cache_directory, o_dir_sub_path, basename);
}
+const Report = struct {
+ ast: *const std.zig.Ast,
+ directory: Compilation.Directory,
+ color: main.Color,
+ arena: Allocator,
+
+ fn fail(
+ report: Report,
+ tok: std.zig.Ast.TokenIndex,
+ comptime fmt_string: []const u8,
+ fmt_args: anytype,
+ ) error{ PackageFetchFailed, OutOfMemory } {
+ return failWithNotes(report, &.{}, tok, fmt_string, fmt_args);
+ }
+
+ fn failWithNotes(
+ report: Report,
+ notes: []const Compilation.AllErrors.Message,
+ tok: std.zig.Ast.TokenIndex,
+ comptime fmt_string: []const u8,
+ fmt_args: anytype,
+ ) error{ PackageFetchFailed, OutOfMemory } {
+ const ttyconf: std.debug.TTY.Config = switch (report.color) {
+ .auto => std.debug.detectTTYConfig(std.io.getStdErr()),
+ .on => .escape_codes,
+ .off => .no_color,
+ };
+ const file_path = try report.directory.join(report.arena, &.{Manifest.basename});
+ renderErrorMessage(report.ast.*, file_path, ttyconf, .{
+ .tok = tok,
+ .off = 0,
+ .msg = try std.fmt.allocPrint(report.arena, fmt_string, fmt_args),
+ }, notes);
+ return error.PackageFetchFailed;
+ }
+
+ fn renderErrorMessage(
+ ast: std.zig.Ast,
+ file_path: []const u8,
+ ttyconf: std.debug.TTY.Config,
+ msg: Manifest.ErrorMessage,
+ notes: []const Compilation.AllErrors.Message,
+ ) void {
+ const token_starts = ast.tokens.items(.start);
+ const start_loc = ast.tokenLocation(0, msg.tok);
+ Compilation.AllErrors.Message.renderToStdErr(.{ .src = .{
+ .msg = msg.msg,
+ .src_path = file_path,
+ .line = @intCast(u32, start_loc.line),
+ .column = @intCast(u32, start_loc.column),
+ .span = .{
+ .start = token_starts[msg.tok],
+ .end = @intCast(u32, token_starts[msg.tok] + ast.tokenSlice(msg.tok).len),
+ .main = token_starts[msg.tok] + msg.off,
+ },
+ .source_line = ast.source[start_loc.line_start..start_loc.line_end],
+ .notes = notes,
+ } }, ttyconf);
+ }
+};
+
fn fetchAndUnpack(
thread_pool: *ThreadPool,
http_client: *std.http.Client,
global_cache_directory: Compilation.Directory,
- url: []const u8,
- expected_hash: ?[]const u8,
- ini: std.Ini,
- comp_directory: Compilation.Directory,
+ dep: Manifest.Dependency,
+ report: Report,
build_roots_source: *std.ArrayList(u8),
fqn: []const u8,
) !*Package {
@@ -297,17 +347,9 @@ fn fetchAndUnpack(
// Check if the expected_hash is already present in the global package
// cache, and thereby avoid both fetching and unpacking.
- if (expected_hash) |h| cached: {
- if (h.len != 2 * Hash.digest_length) {
- return reportError(
- ini,
- comp_directory,
- h.ptr,
- "wrong hash size. expected: {d}, found: {d}",
- .{ Hash.digest_length, h.len },
- );
- }
- const hex_digest = h[0 .. 2 * Hash.digest_length];
+ if (dep.hash) |h| cached: {
+ const hex_multihash_len = 2 * Manifest.multihash_len;
+ const hex_digest = h[0..hex_multihash_len];
const pkg_dir_sub_path = "p" ++ s ++ hex_digest;
var pkg_dir = global_cache_directory.handle.openDir(pkg_dir_sub_path, .{}) catch |err| switch (err) {
error.FileNotFound => break :cached,
@@ -344,10 +386,10 @@ fn fetchAndUnpack(
return ptr;
}
- const uri = try std.Uri.parse(url);
+ const uri = try std.Uri.parse(dep.url);
const rand_int = std.crypto.random.int(u64);
- const tmp_dir_sub_path = "tmp" ++ s ++ hex64(rand_int);
+ const tmp_dir_sub_path = "tmp" ++ s ++ Manifest.hex64(rand_int);
const actual_hash = a: {
var tmp_directory: Compilation.Directory = d: {
@@ -376,13 +418,9 @@ fn fetchAndUnpack(
// by default, so the same logic applies for buffering the reader as for gzip.
try unpackTarball(gpa, &req, tmp_directory.handle, std.compress.xz);
} else {
- return reportError(
- ini,
- comp_directory,
- uri.path.ptr,
- "unknown file extension for path '{s}'",
- .{uri.path},
- );
+ return report.fail(dep.url_tok, "unknown file extension for path '{s}'", .{
+ uri.path,
+ });
}
// TODO: delete files not included in the package prior to computing the package hash.
@@ -393,28 +431,21 @@ fn fetchAndUnpack(
break :a try computePackageHash(thread_pool, .{ .dir = tmp_directory.handle });
};
- const pkg_dir_sub_path = "p" ++ s ++ hexDigest(actual_hash);
+ const pkg_dir_sub_path = "p" ++ s ++ Manifest.hexDigest(actual_hash);
try renameTmpIntoCache(global_cache_directory.handle, tmp_dir_sub_path, pkg_dir_sub_path);
- if (expected_hash) |h| {
- const actual_hex = hexDigest(actual_hash);
+ const actual_hex = Manifest.hexDigest(actual_hash);
+ if (dep.hash) |h| {
if (!mem.eql(u8, h, &actual_hex)) {
- return reportError(
- ini,
- comp_directory,
- h.ptr,
- "hash mismatch: expected: {s}, found: {s}",
- .{ h, actual_hex },
- );
+ return report.fail(dep.hash_tok, "hash mismatch: expected: {s}, found: {s}", .{
+ h, actual_hex,
+ });
}
} else {
- return reportError(
- ini,
- comp_directory,
- url.ptr,
- "url field is missing corresponding hash field: hash={s}",
- .{std.fmt.fmtSliceHexLower(&actual_hash)},
- );
+ const notes: [1]Compilation.AllErrors.Message = .{.{ .plain = .{
+ .msg = try std.fmt.allocPrint(report.arena, "expected .hash = \"{s}\",", .{&actual_hex}),
+ } }};
+ return report.failWithNotes(&notes, dep.url_tok, "url field is missing corresponding hash field", .{});
}
const build_root = try global_cache_directory.join(gpa, &.{pkg_dir_sub_path});
@@ -440,35 +471,21 @@ fn unpackTarball(
try std.tar.pipeToFileSystem(out_dir, decompress.reader(), .{
.strip_components = 1,
+ // TODO: we would like to set this to executable_bit_only, but two
+ // things need to happen before that:
+ // 1. the tar implementation needs to support it
+ // 2. the hashing algorithm here needs to support detecting the is_executable
+ // bit on Windows from the ACLs (see the isExecutable function).
+ .mode_mode = .ignore,
});
}
-fn reportError(
- ini: std.Ini,
- comp_directory: Compilation.Directory,
- src_ptr: [*]const u8,
- comptime fmt_string: []const u8,
- fmt_args: anytype,
-) error{PackageFetchFailed} {
- const loc = std.zig.findLineColumn(ini.bytes, @ptrToInt(src_ptr) - @ptrToInt(ini.bytes.ptr));
- if (comp_directory.path) |p| {
- std.debug.print("{s}{c}{s}:{d}:{d}: error: " ++ fmt_string ++ "\n", .{
- p, fs.path.sep, ini_basename, loc.line + 1, loc.column + 1,
- } ++ fmt_args);
- } else {
- std.debug.print("{s}:{d}:{d}: error: " ++ fmt_string ++ "\n", .{
- ini_basename, loc.line + 1, loc.column + 1,
- } ++ fmt_args);
- }
- return error.PackageFetchFailed;
-}
-
const HashedFile = struct {
path: []const u8,
- hash: [Hash.digest_length]u8,
+ hash: [Manifest.Hash.digest_length]u8,
failure: Error!void,
- const Error = fs.File.OpenError || fs.File.ReadError;
+ const Error = fs.File.OpenError || fs.File.ReadError || fs.File.StatError;
fn lessThan(context: void, lhs: *const HashedFile, rhs: *const HashedFile) bool {
_ = context;
@@ -479,7 +496,7 @@ const HashedFile = struct {
fn computePackageHash(
thread_pool: *ThreadPool,
pkg_dir: fs.IterableDir,
-) ![Hash.digest_length]u8 {
+) ![Manifest.Hash.digest_length]u8 {
const gpa = thread_pool.allocator;
// We'll use an arena allocator for the path name strings since they all
@@ -522,7 +539,7 @@ fn computePackageHash(
std.sort.sort(*HashedFile, all_files.items, {}, HashedFile.lessThan);
- var hasher = Hash.init(.{});
+ var hasher = Manifest.Hash.init(.{});
var any_failures = false;
for (all_files.items) |hashed_file| {
hashed_file.failure catch |err| {
@@ -543,7 +560,9 @@ fn workerHashFile(dir: fs.Dir, hashed_file: *HashedFile, wg: *WaitGroup) void {
fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void {
var buf: [8000]u8 = undefined;
var file = try dir.openFile(hashed_file.path, .{});
- var hasher = Hash.init(.{});
+ var hasher = Manifest.Hash.init(.{});
+ hasher.update(hashed_file.path);
+ hasher.update(&.{ 0, @boolToInt(try isExecutable(file)) });
while (true) {
const bytes_read = try file.read(&buf);
if (bytes_read == 0) break;
@@ -552,31 +571,17 @@ fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void
hasher.final(&hashed_file.hash);
}
-const hex_charset = "0123456789abcdef";
-
-fn hex64(x: u64) [16]u8 {
- var result: [16]u8 = undefined;
- var i: usize = 0;
- while (i < 8) : (i += 1) {
- const byte = @truncate(u8, x >> @intCast(u6, 8 * i));
- result[i * 2 + 0] = hex_charset[byte >> 4];
- result[i * 2 + 1] = hex_charset[byte & 15];
- }
- return result;
-}
-
-test hex64 {
- const s = "[" ++ hex64(0x12345678_abcdef00) ++ "]";
- try std.testing.expectEqualStrings("[00efcdab78563412]", s);
-}
-
-fn hexDigest(digest: [Hash.digest_length]u8) [Hash.digest_length * 2]u8 {
- var result: [Hash.digest_length * 2]u8 = undefined;
- for (digest) |byte, i| {
- result[i * 2 + 0] = hex_charset[byte >> 4];
- result[i * 2 + 1] = hex_charset[byte & 15];
+fn isExecutable(file: fs.File) !bool {
+ if (builtin.os.tag == .windows) {
+ // TODO check the ACL on Windows.
+ // Until this is implemented, this could be a false negative on
+ // Windows, which is why we do not yet set executable_bit_only above
+ // when unpacking the tarball.
+ return false;
+ } else {
+ const stat = try file.stat();
+ return (stat.mode & std.os.S.IXUSR) != 0;
}
- return result;
}
fn renameTmpIntoCache(
diff --git a/src/Sema.zig b/src/Sema.zig
index 2e57de2406..b7b3a55063 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -1015,6 +1015,7 @@ fn analyzeBodyInner(
.float_cast => try sema.zirFloatCast(block, inst),
.int_cast => try sema.zirIntCast(block, inst),
.ptr_cast => try sema.zirPtrCast(block, inst),
+ .qual_cast => try sema.zirQualCast(block, inst),
.truncate => try sema.zirTruncate(block, inst),
.align_cast => try sema.zirAlignCast(block, inst),
.has_decl => try sema.zirHasDecl(block, inst),
@@ -3294,7 +3295,7 @@ fn ensureResultUsed(
const msg = msg: {
const msg = try sema.errMsg(block, src, "error is ignored", .{});
errdefer msg.destroy(sema.gpa);
- try sema.errNote(block, src, msg, "consider using `try`, `catch`, or `if`", .{});
+ try sema.errNote(block, src, msg, "consider using 'try', 'catch', or 'if'", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
@@ -3325,7 +3326,7 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
const msg = msg: {
const msg = try sema.errMsg(block, src, "error is discarded", .{});
errdefer msg.destroy(sema.gpa);
- try sema.errNote(block, src, msg, "consider using `try`, `catch`, or `if`", .{});
+ try sema.errNote(block, src, msg, "consider using 'try', 'catch', or 'if'", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
@@ -5564,16 +5565,6 @@ pub fn analyzeExport(
.visibility = borrowed_options.visibility,
},
.src = src,
- .link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = .{} },
- .elf => .{ .elf = .{} },
- .macho => .{ .macho = .{} },
- .plan9 => .{ .plan9 = null },
- .c => .{ .c = {} },
- .wasm => .{ .wasm = .{} },
- .spirv => .{ .spirv = {} },
- .nvptx => .{ .nvptx = {} },
- },
.owner_decl = sema.owner_decl_index,
.src_decl = block.src_decl,
.exported_decl = exported_decl_index,
@@ -6446,7 +6437,12 @@ fn analyzeCall(
.extern_fn => return sema.fail(block, call_src, "{s} call of extern function", .{
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
}),
- else => unreachable,
+ else => {
+ assert(callee_ty.isPtrAtRuntime());
+ return sema.fail(block, call_src, "{s} call of function pointer", .{
+ @as([]const u8, if (is_comptime_call) "comptime" else "inline"),
+ });
+ },
};
if (func_ty_info.is_var_args) {
return sema.fail(block, call_src, "{s} call of variadic function", .{
@@ -6879,6 +6875,8 @@ fn analyzeInlineCallArg(
if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err);
return err;
};
+ } else if (!is_comptime_call and zir_tags[inst] == .param_comptime) {
+ _ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime");
}
const casted_arg = sema.coerceExtra(arg_block, param_ty, uncasted_arg, arg_src, .{ .param_src = .{
.func_inst = func_inst,
@@ -6952,6 +6950,9 @@ fn analyzeInlineCallArg(
.val = arg_val,
};
} else {
+ if (zir_tags[inst] == .param_anytype_comptime) {
+ _ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime");
+ }
sema.inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg);
}
@@ -7510,7 +7511,6 @@ fn resolveGenericInstantiationType(
// Queue up a `codegen_func` work item for the new Fn. The `comptime_args` field
// will be populated, ensuring it will have `analyzeBody` called with the ZIR
// parameters mapped appropriately.
- try mod.comp.bin_file.allocateDeclIndexes(new_decl_index);
try mod.comp.work_queue.writeItem(.{ .codegen_func = new_func });
return new_func;
}
@@ -8473,7 +8473,7 @@ fn handleExternLibName(
return sema.fail(
block,
src_loc,
- "dependency on dynamic library '{s}' requires enabling Position Independent Code. Fixed by `-l{s}` or `-fPIC`.",
+ "dependency on dynamic library '{s}' requires enabling Position Independent Code. Fixed by '-l{s}' or '-fPIC'.",
.{ lib_name, lib_name },
);
}
@@ -9010,7 +9010,18 @@ fn zirParam(
if (is_comptime and sema.preallocated_new_func != null) {
// We have a comptime value for this parameter so it should be elided from the
// function type of the function instruction in this block.
- const coerced_arg = try sema.coerce(block, param_ty, arg, src);
+ const coerced_arg = sema.coerce(block, param_ty, arg, .unneeded) catch |err| switch (err) {
+ error.NeededSourceLocation => {
+ // We are instantiating a generic function and a comptime arg
+ // cannot be coerced to the param type, but since we don't
+ // have the callee source location return `GenericPoison`
+ // so that the instantiation is failed and the coercion
+ // is handled by comptime call logic instead.
+ assert(sema.is_generic_instantiation);
+ return error.GenericPoison;
+ },
+ else => return err,
+ };
sema.inst_map.putAssumeCapacity(inst, coerced_arg);
return;
}
@@ -19525,13 +19536,34 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const operand_info = operand_ty.ptrInfo().data;
const dest_info = dest_ty.ptrInfo().data;
if (!operand_info.mutable and dest_info.mutable) {
- return sema.fail(block, src, "cast discards const qualifier", .{});
+ const msg = msg: {
+ const msg = try sema.errMsg(block, src, "cast discards const qualifier", .{});
+ errdefer msg.destroy(sema.gpa);
+
+ try sema.errNote(block, src, msg, "consider using '@qualCast'", .{});
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
}
if (operand_info.@"volatile" and !dest_info.@"volatile") {
- return sema.fail(block, src, "cast discards volatile qualifier", .{});
+ const msg = msg: {
+ const msg = try sema.errMsg(block, src, "cast discards volatile qualifier", .{});
+ errdefer msg.destroy(sema.gpa);
+
+ try sema.errNote(block, src, msg, "consider using '@qualCast'", .{});
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
}
if (operand_info.@"addrspace" != dest_info.@"addrspace") {
- return sema.fail(block, src, "cast changes pointer address space", .{});
+ const msg = msg: {
+ const msg = try sema.errMsg(block, src, "cast changes pointer address space", .{});
+ errdefer msg.destroy(sema.gpa);
+
+ try sema.errNote(block, src, msg, "consider using '@addrSpaceCast'", .{});
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
}
const dest_is_slice = dest_ty.isSlice();
@@ -19586,6 +19618,8 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
try sema.errNote(block, dest_ty_src, msg, "'{}' has alignment '{d}'", .{
dest_ty.fmt(sema.mod), dest_align,
});
+
+ try sema.errNote(block, src, msg, "consider using '@alignCast'", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
@@ -19621,6 +19655,49 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
return block.addBitCast(aligned_dest_ty, ptr);
}
+fn zirQualCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const src = inst_data.src();
+ const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
+ const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs);
+ const operand = try sema.resolveInst(extra.rhs);
+ const operand_ty = sema.typeOf(operand);
+
+ try sema.checkPtrType(block, dest_ty_src, dest_ty);
+ try sema.checkPtrOperand(block, operand_src, operand_ty);
+
+ var operand_payload = operand_ty.ptrInfo();
+ var dest_info = dest_ty.ptrInfo();
+
+ operand_payload.data.mutable = dest_info.data.mutable;
+ operand_payload.data.@"volatile" = dest_info.data.@"volatile";
+
+ const altered_operand_ty = Type.initPayload(&operand_payload.base);
+ if (!altered_operand_ty.eql(dest_ty, sema.mod)) {
+ const msg = msg: {
+ const msg = try sema.errMsg(block, src, "'@qualCast' can only modify 'const' and 'volatile' qualifiers", .{});
+ errdefer msg.destroy(sema.gpa);
+
+ dest_info.data.mutable = !operand_ty.isConstPtr();
+ dest_info.data.@"volatile" = operand_ty.isVolatilePtr();
+ const altered_dest_ty = Type.initPayload(&dest_info.base);
+ try sema.errNote(block, src, msg, "expected type '{}'", .{altered_dest_ty.fmt(sema.mod)});
+ try sema.errNote(block, src, msg, "got type '{}'", .{operand_ty.fmt(sema.mod)});
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+ }
+
+ if (try sema.resolveMaybeUndefVal(operand)) |operand_val| {
+ return sema.addConstant(dest_ty, operand_val);
+ }
+
+ try sema.requireRuntimeBlock(block, src, null);
+ return block.addBitCast(dest_ty, operand);
+}
+
fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
@@ -25137,7 +25214,7 @@ fn coerceExtra(
(try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(), dest_ty, false, target, dest_ty_src, inst_src)) == .ok)
{
try sema.errNote(block, inst_src, msg, "cannot convert error union to payload type", .{});
- try sema.errNote(block, inst_src, msg, "consider using `try`, `catch`, or `if`", .{});
+ try sema.errNote(block, inst_src, msg, "consider using 'try', 'catch', or 'if'", .{});
}
// ?T to T
@@ -25146,7 +25223,7 @@ fn coerceExtra(
(try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(&buf), dest_ty, false, target, dest_ty_src, inst_src)) == .ok)
{
try sema.errNote(block, inst_src, msg, "cannot convert optional to payload type", .{});
- try sema.errNote(block, inst_src, msg, "consider using `.?`, `orelse`, or `if`", .{});
+ try sema.errNote(block, inst_src, msg, "consider using '.?', 'orelse', or 'if'", .{});
}
try in_memory_result.report(sema, block, inst_src, msg);
@@ -26072,7 +26149,7 @@ fn coerceVarArgParam(
.Array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}),
.Float => float: {
const target = sema.mod.getTarget();
- const double_bits = @import("type.zig").CType.sizeInBits(.double, target);
+ const double_bits = target.c_type_bit_size(.double);
const inst_bits = uncasted_ty.floatBits(sema.mod.getTarget());
if (inst_bits >= double_bits) break :float inst;
switch (double_bits) {
diff --git a/src/TypedValue.zig b/src/TypedValue.zig
index 6e096ee90a..cb28274f10 100644
--- a/src/TypedValue.zig
+++ b/src/TypedValue.zig
@@ -176,7 +176,9 @@ pub fn print(
var i: u32 = 0;
while (i < max_len) : (i += 1) {
- buf[i] = std.math.cast(u8, val.fieldValue(ty, i).toUnsignedInt(target)) orelse break :str;
+ const elem = val.fieldValue(ty, i);
+ if (elem.isUndef()) break :str;
+ buf[i] = std.math.cast(u8, elem.toUnsignedInt(target)) orelse break :str;
}
const truncated = if (len > max_string_len) " (truncated)" else "";
@@ -390,6 +392,7 @@ pub fn print(
while (i < max_len) : (i += 1) {
var elem_buf: Value.ElemValueBuffer = undefined;
const elem_val = payload.ptr.elemValueBuffer(mod, i, &elem_buf);
+ if (elem_val.isUndef()) break :str;
buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(target)) orelse break :str;
}
diff --git a/src/Zir.zig b/src/Zir.zig
index 94e6a9a11a..b93422177e 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -857,6 +857,9 @@ pub const Inst = struct {
/// Implements the `@ptrCast` builtin.
/// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand.
ptr_cast,
+ /// Implements the `@qualCast` builtin.
+ /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand.
+ qual_cast,
/// Implements the `@truncate` builtin.
/// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand.
truncate,
@@ -1195,6 +1198,7 @@ pub const Inst = struct {
.float_cast,
.int_cast,
.ptr_cast,
+ .qual_cast,
.truncate,
.align_cast,
.has_field,
@@ -1484,6 +1488,7 @@ pub const Inst = struct {
.float_cast,
.int_cast,
.ptr_cast,
+ .qual_cast,
.truncate,
.align_cast,
.has_field,
@@ -1755,6 +1760,7 @@ pub const Inst = struct {
.float_cast = .pl_node,
.int_cast = .pl_node,
.ptr_cast = .pl_node,
+ .qual_cast = .pl_node,
.truncate = .pl_node,
.align_cast = .pl_node,
.typeof_builtin = .pl_node,
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index b333ffc666..473a62fd83 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -24,7 +24,7 @@ const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
const GenerateSymbolError = codegen.GenerateSymbolError;
-const FnResult = codegen.FnResult;
+const Result = codegen.Result;
const DebugInfoOutput = codegen.DebugInfoOutput;
const bits = @import("bits.zig");
@@ -181,6 +181,7 @@ const DbgInfoReloc = struct {
else => unreachable,
}
}
+
fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) error{OutOfMemory}!void {
switch (function.debug_output) {
.dwarf => |dw| {
@@ -202,13 +203,7 @@ const DbgInfoReloc = struct {
else => unreachable, // not a possible argument
};
- try dw.genArgDbgInfo(
- reloc.name,
- reloc.ty,
- function.bin_file.tag,
- function.mod_fn.owner_decl,
- loc,
- );
+ try dw.genArgDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, loc);
},
.plan9 => {},
.none => {},
@@ -254,14 +249,7 @@ const DbgInfoReloc = struct {
break :blk .nop;
},
};
- try dw.genVarDbgInfo(
- reloc.name,
- reloc.ty,
- function.bin_file.tag,
- function.mod_fn.owner_decl,
- is_ptr,
- loc,
- );
+ try dw.genVarDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, is_ptr, loc);
},
.plan9 => {},
.none => {},
@@ -349,7 +337,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!FnResult {
+) GenerateSymbolError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@@ -392,8 +380,8 @@ pub fn generate(
defer function.dbg_info_relocs.deinit(bin_file.allocator);
var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) {
- error.CodegenFail => return FnResult{ .fail = function.err_msg.? },
- error.OutOfRegisters => return FnResult{
+ error.CodegenFail => return Result{ .fail = function.err_msg.? },
+ error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
},
else => |e| return e,
@@ -406,8 +394,8 @@ pub fn generate(
function.max_end_stack = call_info.stack_byte_count;
function.gen() catch |err| switch (err) {
- error.CodegenFail => return FnResult{ .fail = function.err_msg.? },
- error.OutOfRegisters => return FnResult{
+ error.CodegenFail => return Result{ .fail = function.err_msg.? },
+ error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
},
else => |e| return e,
@@ -439,14 +427,14 @@ pub fn generate(
defer emit.deinit();
emit.emitMir() catch |err| switch (err) {
- error.EmitFail => return FnResult{ .fail = emit.err_msg.? },
+ error.EmitFail => return Result{ .fail = emit.err_msg.? },
else => |e| return e,
};
if (function.err_msg) |em| {
- return FnResult{ .fail = em };
+ return Result{ .fail = em };
} else {
- return FnResult{ .appended = {} };
+ return Result.ok;
}
}
@@ -527,6 +515,28 @@ fn gen(self: *Self) !void {
self.ret_mcv = MCValue{ .stack_offset = stack_offset };
}
+ for (self.args) |*arg, arg_index| {
+ // Copy register arguments to the stack
+ switch (arg.*) {
+ .register => |reg| {
+ // The first AIR instructions of the main body are guaranteed
+ // to be the functions arguments
+ const inst = self.air.getMainBody()[arg_index];
+ assert(self.air.instructions.items(.tag)[inst] == .arg);
+
+ const ty = self.air.typeOfIndex(inst);
+
+ const abi_size = @intCast(u32, ty.abiSize(self.target.*));
+ const abi_align = ty.abiAlignment(self.target.*);
+ const stack_offset = try self.allocMem(abi_size, abi_align, inst);
+ try self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
+
+ arg.* = MCValue{ .stack_offset = stack_offset };
+ },
+ else => {},
+ }
+ }
+
_ = try self.addInst(.{
.tag = .dbg_prologue_end,
.data = .{ .nop = {} },
@@ -3996,11 +4006,17 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
.direct => .load_memory_ptr_direct,
.import => unreachable,
};
- const mod = self.bin_file.options.module.?;
- const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
- .macho => owner_decl.link.macho.sym_index,
- .coff => owner_decl.link.coff.sym_index,
+ .macho => blk: {
+ const macho_file = self.bin_file.cast(link.File.MachO).?;
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk macho_file.getAtom(atom).getSymbolIndex().?;
+ },
+ .coff => blk: {
+ const coff_file = self.bin_file.cast(link.File.Coff).?;
+ const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk coff_file.getAtom(atom).getSymbolIndex().?;
+ },
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@@ -4163,45 +4179,19 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
self.arg_index += 1;
const ty = self.air.typeOfIndex(inst);
- const result = self.args[arg_index];
+ const tag = self.air.instructions.items(.tag)[inst];
const src_index = self.air.instructions.items(.data)[inst].arg.src_index;
const name = self.mod_fn.getParamName(self.bin_file.options.module.?, src_index);
- const mcv = switch (result) {
- // Copy registers to the stack
- .register => |reg| blk: {
- const mod = self.bin_file.options.module.?;
- const abi_size = math.cast(u32, ty.abiSize(self.target.*)) orelse {
- return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)});
- };
- const abi_align = ty.abiAlignment(self.target.*);
- const stack_offset = try self.allocMem(abi_size, abi_align, inst);
- try self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
-
- break :blk MCValue{ .stack_offset = stack_offset };
- },
- else => result,
- };
-
- const tag = self.air.instructions.items(.tag)[inst];
try self.dbg_info_relocs.append(self.gpa, .{
.tag = tag,
.ty = ty,
.name = name,
- .mcv = result,
+ .mcv = self.args[arg_index],
});
- if (self.liveness.isUnused(inst))
- return self.finishAirBookkeeping();
-
- switch (mcv) {
- .register => |reg| {
- self.register_manager.getRegAssumeFree(reg, inst);
- },
- else => {},
- }
-
- return self.finishAir(inst, mcv, .{ .none, .none, .none });
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else self.args[arg_index];
+ return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airBreakpoint(self: *Self) !void {
@@ -4302,90 +4292,71 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// on linking.
const mod = self.bin_file.options.module.?;
if (self.air.value(callee)) |func_value| {
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- if (func_value.castTag(.function)) |func_payload| {
- const func = func_payload.data;
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- const fn_owner_decl = mod.declPtr(func.owner_decl);
- const got_addr = blk: {
- const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
- break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
- };
+ if (func_value.castTag(.function)) |func_payload| {
+ const func = func_payload.data;
+ if (self.bin_file.cast(link.File.Elf)) |elf_file| {
+ const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
+ const atom = elf_file.getAtom(atom_index);
+ const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr });
-
- _ = try self.addInst(.{
- .tag = .blr,
- .data = .{ .reg = .x30 },
+ } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
+ const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl);
+ const sym_index = macho_file.getAtom(atom).getSymbolIndex().?;
+ try self.genSetReg(Type.initTag(.u64), .x30, .{
+ .linker_load = .{
+ .type = .got,
+ .sym_index = sym_index,
+ },
});
- } else if (func_value.castTag(.extern_fn)) |_| {
- return self.fail("TODO implement calling extern functions", .{});
- } else {
- return self.fail("TODO implement calling bitcasted functions", .{});
- }
- } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- if (func_value.castTag(.function)) |func_payload| {
- const func = func_payload.data;
- const fn_owner_decl = mod.declPtr(func.owner_decl);
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
+ const atom = try coff_file.getOrCreateAtomForDecl(func.owner_decl);
+ const sym_index = coff_file.getAtom(atom).getSymbolIndex().?;
try self.genSetReg(Type.initTag(.u64), .x30, .{
.linker_load = .{
.type = .got,
- .sym_index = fn_owner_decl.link.macho.sym_index,
+ .sym_index = sym_index,
},
});
- // blr x30
- _ = try self.addInst(.{
- .tag = .blr,
- .data = .{ .reg = .x30 },
+ } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
+ const decl_block_index = try p9.seeDecl(func.owner_decl);
+ const decl_block = p9.getDeclBlock(decl_block_index);
+ const ptr_bits = self.target.cpu.arch.ptrBitWidth();
+ const ptr_bytes: u64 = @divExact(ptr_bits, 8);
+ const got_addr = p9.bases.data;
+ const got_index = decl_block.got_index.?;
+ const fn_got_addr = got_addr + got_index * ptr_bytes;
+ try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = fn_got_addr });
+ } else unreachable;
+
+ _ = try self.addInst(.{
+ .tag = .blr,
+ .data = .{ .reg = .x30 },
+ });
+ } else if (func_value.castTag(.extern_fn)) |func_payload| {
+ const extern_fn = func_payload.data;
+ const decl_name = mod.declPtr(extern_fn.owner_decl).name;
+ if (extern_fn.lib_name) |lib_name| {
+ log.debug("TODO enforce that '{s}' is expected in '{s}' library", .{
+ decl_name,
+ lib_name,
});
- } else if (func_value.castTag(.extern_fn)) |func_payload| {
- const extern_fn = func_payload.data;
- const decl_name = mod.declPtr(extern_fn.owner_decl).name;
- if (extern_fn.lib_name) |lib_name| {
- log.debug("TODO enforce that '{s}' is expected in '{s}' library", .{
- decl_name,
- lib_name,
- });
- }
- const sym_index = try macho_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
+ }
+ if (self.bin_file.cast(link.File.MachO)) |macho_file| {
+ const sym_index = try macho_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ const atom_index = macho_file.getAtom(atom).getSymbolIndex().?;
_ = try self.addInst(.{
.tag = .call_extern,
.data = .{
.relocation = .{
- .atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.sym_index,
+ .atom_index = atom_index,
.sym_index = sym_index,
},
},
});
- } else {
- return self.fail("TODO implement calling bitcasted functions", .{});
- }
- } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- if (func_value.castTag(.function)) |func_payload| {
- const func = func_payload.data;
- const fn_owner_decl = mod.declPtr(func.owner_decl);
- try self.genSetReg(Type.initTag(.u64), .x30, .{
- .linker_load = .{
- .type = .got,
- .sym_index = fn_owner_decl.link.coff.sym_index,
- },
- });
- // blr x30
- _ = try self.addInst(.{
- .tag = .blr,
- .data = .{ .reg = .x30 },
- });
- } else if (func_value.castTag(.extern_fn)) |func_payload| {
- const extern_fn = func_payload.data;
- const decl_name = mod.declPtr(extern_fn.owner_decl).name;
- if (extern_fn.lib_name) |lib_name| {
- log.debug("TODO enforce that '{s}' is expected in '{s}' library", .{
- decl_name,
- lib_name,
- });
- }
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const sym_index = try coff_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
try self.genSetReg(Type.initTag(.u64), .x30, .{
.linker_load = .{
@@ -4393,35 +4364,16 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.sym_index = sym_index,
},
});
- // blr x30
_ = try self.addInst(.{
.tag = .blr,
.data = .{ .reg = .x30 },
});
} else {
- return self.fail("TODO implement calling bitcasted functions", .{});
- }
- } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- if (func_value.castTag(.function)) |func_payload| {
- try p9.seeDecl(func_payload.data.owner_decl);
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- const got_addr = p9.bases.data;
- const got_index = mod.declPtr(func_payload.data.owner_decl).link.plan9.got_index.?;
- const fn_got_addr = got_addr + got_index * ptr_bytes;
-
- try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = fn_got_addr });
-
- _ = try self.addInst(.{
- .tag = .blr,
- .data = .{ .reg = .x30 },
- });
- } else if (func_value.castTag(.extern_fn)) |_| {
return self.fail("TODO implement calling extern functions", .{});
- } else {
- return self.fail("TODO implement calling bitcasted functions", .{});
}
- } else unreachable;
+ } else {
+ return self.fail("TODO implement calling bitcasted functions", .{});
+ }
} else {
assert(ty.zigTypeTag() == .Pointer);
const mcv = try self.resolveInst(callee);
@@ -5534,11 +5486,17 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
.direct => .load_memory_ptr_direct,
.import => unreachable,
};
- const mod = self.bin_file.options.module.?;
- const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
- .macho => owner_decl.link.macho.sym_index,
- .coff => owner_decl.link.coff.sym_index,
+ .macho => blk: {
+ const macho_file = self.bin_file.cast(link.File.MachO).?;
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk macho_file.getAtom(atom).getSymbolIndex().?;
+ },
+ .coff => blk: {
+ const coff_file = self.bin_file.cast(link.File.Coff).?;
+ const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk coff_file.getAtom(atom).getSymbolIndex().?;
+ },
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@@ -5648,11 +5606,17 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.direct => .load_memory_direct,
.import => .load_memory_import,
};
- const mod = self.bin_file.options.module.?;
- const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
- .macho => owner_decl.link.macho.sym_index,
- .coff => owner_decl.link.coff.sym_index,
+ .macho => blk: {
+ const macho_file = self.bin_file.cast(link.File.MachO).?;
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk macho_file.getAtom(atom).getSymbolIndex().?;
+ },
+ .coff => blk: {
+ const coff_file = self.bin_file.cast(link.File.Coff).?;
+ const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk coff_file.getAtom(atom).getSymbolIndex().?;
+ },
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@@ -5842,11 +5806,17 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
.direct => .load_memory_ptr_direct,
.import => unreachable,
};
- const mod = self.bin_file.options.module.?;
- const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
- .macho => owner_decl.link.macho.sym_index,
- .coff => owner_decl.link.coff.sym_index,
+ .macho => blk: {
+ const macho_file = self.bin_file.cast(link.File.MachO).?;
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk macho_file.getAtom(atom).getSymbolIndex().?;
+ },
+ .coff => blk: {
+ const coff_file = self.bin_file.cast(link.File.Coff).?;
+ const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk coff_file.getAtom(atom).getSymbolIndex().?;
+ },
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@@ -6165,28 +6135,27 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
- const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
- return MCValue{ .memory = got_addr };
- } else if (self.bin_file.cast(link.File.MachO)) |_| {
- // Because MachO is PIE-always-on, we defer memory address resolution until
- // the linker has enough info to perform relocations.
- assert(decl.link.macho.sym_index != 0);
+ const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
+ const atom = elf_file.getAtom(atom_index);
+ return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
+ } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
+ const atom = try macho_file.getOrCreateAtomForDecl(decl_index);
+ const sym_index = macho_file.getAtom(atom).getSymbolIndex().?;
return MCValue{ .linker_load = .{
.type = .got,
- .sym_index = decl.link.macho.sym_index,
+ .sym_index = sym_index,
} };
- } else if (self.bin_file.cast(link.File.Coff)) |_| {
- // Because COFF is PIE-always-on, we defer memory address resolution until
- // the linker has enough info to perform relocations.
- assert(decl.link.coff.sym_index != 0);
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
+ const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index);
+ const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
return MCValue{ .linker_load = .{
.type = .got,
- .sym_index = decl.link.coff.sym_index,
+ .sym_index = sym_index,
} };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- try p9.seeDecl(decl_index);
- const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
+ const decl_block_index = try p9.seeDecl(decl_index);
+ const decl_block = p9.getDeclBlock(decl_block_index);
+ const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
@@ -6199,8 +6168,7 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
};
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const vaddr = elf_file.local_symbols.items[local_sym_index].st_value;
- return MCValue{ .memory = vaddr };
+ return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
return MCValue{ .linker_load = .{
.type = .direct,
diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig
index 3812597789..3c2a81d5d1 100644
--- a/src/arch/aarch64/Emit.zig
+++ b/src/arch/aarch64/Emit.zig
@@ -670,9 +670,9 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void {
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
// Add relocation to the decl.
- const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+ const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const target = macho_file.getGlobalByIndex(relocation.sym_index);
- try atom.addRelocation(macho_file, .{
+ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = @enumToInt(std.macho.reloc_type_arm64.ARM64_RELOC_BRANCH26),
.target = target,
.offset = offset,
@@ -883,10 +883,10 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
}
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
- const atom = macho_file.getAtomForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
+ const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
// TODO this causes segfault in stage1
// try atom.addRelocations(macho_file, 2, .{
- try atom.addRelocation(macho_file, .{
+ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.target = .{ .sym_index = data.sym_index, .file = null },
.offset = offset,
.addend = 0,
@@ -902,7 +902,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
else => unreachable,
},
});
- try atom.addRelocation(macho_file, .{
+ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.target = .{ .sym_index = data.sym_index, .file = null },
.offset = offset + 4,
.addend = 0,
@@ -919,7 +919,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
},
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
- const atom = coff_file.getAtomForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
+ const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
const target = switch (tag) {
.load_memory_got,
.load_memory_ptr_got,
@@ -929,7 +929,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
.load_memory_import => coff_file.getGlobalByIndex(data.sym_index),
else => unreachable,
};
- try atom.addRelocation(coff_file, .{
+ try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.target = target,
.offset = offset,
.addend = 0,
@@ -946,7 +946,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
else => unreachable,
},
});
- try atom.addRelocation(coff_file, .{
+ try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.target = target,
.offset = offset + 4,
.addend = 0,
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index fec844a867..57a8aed699 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -23,7 +23,7 @@ const leb128 = std.leb;
const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
-const FnResult = codegen.FnResult;
+const Result = codegen.Result;
const GenerateSymbolError = codegen.GenerateSymbolError;
const DebugInfoOutput = codegen.DebugInfoOutput;
@@ -282,13 +282,7 @@ const DbgInfoReloc = struct {
else => unreachable, // not a possible argument
};
- try dw.genArgDbgInfo(
- reloc.name,
- reloc.ty,
- function.bin_file.tag,
- function.mod_fn.owner_decl,
- loc,
- );
+ try dw.genArgDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, loc);
},
.plan9 => {},
.none => {},
@@ -331,14 +325,7 @@ const DbgInfoReloc = struct {
break :blk .nop;
},
};
- try dw.genVarDbgInfo(
- reloc.name,
- reloc.ty,
- function.bin_file.tag,
- function.mod_fn.owner_decl,
- is_ptr,
- loc,
- );
+ try dw.genVarDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, is_ptr, loc);
},
.plan9 => {},
.none => {},
@@ -356,7 +343,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!FnResult {
+) GenerateSymbolError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@@ -399,8 +386,8 @@ pub fn generate(
defer function.dbg_info_relocs.deinit(bin_file.allocator);
var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) {
- error.CodegenFail => return FnResult{ .fail = function.err_msg.? },
- error.OutOfRegisters => return FnResult{
+ error.CodegenFail => return Result{ .fail = function.err_msg.? },
+ error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
},
else => |e| return e,
@@ -413,8 +400,8 @@ pub fn generate(
function.max_end_stack = call_info.stack_byte_count;
function.gen() catch |err| switch (err) {
- error.CodegenFail => return FnResult{ .fail = function.err_msg.? },
- error.OutOfRegisters => return FnResult{
+ error.CodegenFail => return Result{ .fail = function.err_msg.? },
+ error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
},
else => |e| return e,
@@ -446,14 +433,14 @@ pub fn generate(
defer emit.deinit();
emit.emitMir() catch |err| switch (err) {
- error.EmitFail => return FnResult{ .fail = emit.err_msg.? },
+ error.EmitFail => return Result{ .fail = emit.err_msg.? },
else => |e| return e,
};
if (function.err_msg) |em| {
- return FnResult{ .fail = em };
+ return Result{ .fail = em };
} else {
- return FnResult{ .appended = {} };
+ return Result.ok;
}
}
@@ -4253,59 +4240,56 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Due to incremental compilation, how function calls are generated depends
// on linking.
- switch (self.bin_file.tag) {
- .elf => {
- if (self.air.value(callee)) |func_value| {
- if (func_value.castTag(.function)) |func_payload| {
- const func = func_payload.data;
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- const mod = self.bin_file.options.module.?;
- const fn_owner_decl = mod.declPtr(func.owner_decl);
- const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
- const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
- break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
- } else unreachable;
- try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr });
- } else if (func_value.castTag(.extern_fn)) |_| {
- return self.fail("TODO implement calling extern functions", .{});
- } else {
- return self.fail("TODO implement calling bitcasted functions", .{});
- }
+ if (self.air.value(callee)) |func_value| {
+ if (func_value.castTag(.function)) |func_payload| {
+ const func = func_payload.data;
+
+ if (self.bin_file.cast(link.File.Elf)) |elf_file| {
+ const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
+ const atom = elf_file.getAtom(atom_index);
+ const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
+ try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr });
+ } else if (self.bin_file.cast(link.File.MachO)) |_| {
+ unreachable; // unsupported architecture for MachO
} else {
- assert(ty.zigTypeTag() == .Pointer);
- const mcv = try self.resolveInst(callee);
-
- try self.genSetReg(Type.initTag(.usize), .lr, mcv);
- }
-
- // TODO: add Instruction.supportedOn
- // function for ARM
- if (Target.arm.featureSetHas(self.target.cpu.features, .has_v5t)) {
- _ = try self.addInst(.{
- .tag = .blx,
- .data = .{ .reg = .lr },
+ return self.fail("TODO implement call on {s} for {s}", .{
+ @tagName(self.bin_file.tag),
+ @tagName(self.target.cpu.arch),
});
- } else {
- return self.fail("TODO fix blx emulation for ARM <v5", .{});
- // _ = try self.addInst(.{
- // .tag = .mov,
- // .data = .{ .rr_op = .{
- // .rd = .lr,
- // .rn = .r0,
- // .op = Instruction.Operand.reg(.pc, Instruction.Operand.Shift.none),
- // } },
- // });
- // _ = try self.addInst(.{
- // .tag = .bx,
- // .data = .{ .reg = .lr },
- // });
}
- },
- .macho => unreachable, // unsupported architecture for MachO
- .coff => return self.fail("TODO implement call in COFF for {}", .{self.target.cpu.arch}),
- .plan9 => return self.fail("TODO implement call on plan9 for {}", .{self.target.cpu.arch}),
- else => unreachable,
+ } else if (func_value.castTag(.extern_fn)) |_| {
+ return self.fail("TODO implement calling extern functions", .{});
+ } else {
+ return self.fail("TODO implement calling bitcasted functions", .{});
+ }
+ } else {
+ assert(ty.zigTypeTag() == .Pointer);
+ const mcv = try self.resolveInst(callee);
+
+ try self.genSetReg(Type.initTag(.usize), .lr, mcv);
+ }
+
+ // TODO: add Instruction.supportedOn
+ // function for ARM
+ if (Target.arm.featureSetHas(self.target.cpu.features, .has_v5t)) {
+ _ = try self.addInst(.{
+ .tag = .blx,
+ .data = .{ .reg = .lr },
+ });
+ } else {
+ return self.fail("TODO fix blx emulation for ARM <v5", .{});
+ // _ = try self.addInst(.{
+ // .tag = .mov,
+ // .data = .{ .rr_op = .{
+ // .rd = .lr,
+ // .rn = .r0,
+ // .op = Instruction.Operand.reg(.pc, Instruction.Operand.Shift.none),
+ // } },
+ // });
+ // _ = try self.addInst(.{
+ // .tag = .bx,
+ // .data = .{ .reg = .lr },
+ // });
}
const result: MCValue = result: {
@@ -6086,16 +6070,17 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
- const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
- return MCValue{ .memory = got_addr };
+ const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
+ const atom = elf_file.getAtom(atom_index);
+ return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable; // unsupported architecture for MachO
} else if (self.bin_file.cast(link.File.Coff)) |_| {
return self.fail("TODO codegen COFF const Decl pointer", .{});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- try p9.seeDecl(decl_index);
- const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
+ const decl_block_index = try p9.seeDecl(decl_index);
+ const decl_block = p9.getDeclBlock(decl_block_index);
+ const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
@@ -6109,8 +6094,7 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
};
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const vaddr = elf_file.local_symbols.items[local_sym_index].st_value;
- return MCValue{ .memory = vaddr };
+ return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable;
} else if (self.bin_file.cast(link.File.Coff)) |_| {
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index 2c63f171ad..8b8fca4859 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -22,7 +22,7 @@ const leb128 = std.leb;
const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
-const FnResult = @import("../../codegen.zig").FnResult;
+const Result = @import("../../codegen.zig").Result;
const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError;
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
@@ -225,7 +225,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!FnResult {
+) GenerateSymbolError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@@ -268,8 +268,8 @@ pub fn generate(
defer function.exitlude_jump_relocs.deinit(bin_file.allocator);
var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) {
- error.CodegenFail => return FnResult{ .fail = function.err_msg.? },
- error.OutOfRegisters => return FnResult{
+ error.CodegenFail => return Result{ .fail = function.err_msg.? },
+ error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
},
else => |e| return e,
@@ -282,8 +282,8 @@ pub fn generate(
function.max_end_stack = call_info.stack_byte_count;
function.gen() catch |err| switch (err) {
- error.CodegenFail => return FnResult{ .fail = function.err_msg.? },
- error.OutOfRegisters => return FnResult{
+ error.CodegenFail => return Result{ .fail = function.err_msg.? },
+ error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
},
else => |e| return e,
@@ -309,14 +309,14 @@ pub fn generate(
defer emit.deinit();
emit.emitMir() catch |err| switch (err) {
- error.EmitFail => return FnResult{ .fail = emit.err_msg.? },
+ error.EmitFail => return Result{ .fail = emit.err_msg.? },
else => |e| return e,
};
if (function.err_msg) |em| {
- return FnResult{ .fail = em };
+ return Result{ .fail = em };
} else {
- return FnResult{ .appended = {} };
+ return Result.ok;
}
}
@@ -1615,13 +1615,9 @@ fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void {
switch (self.debug_output) {
.dwarf => |dw| switch (mcv) {
- .register => |reg| try dw.genArgDbgInfo(
- name,
- ty,
- self.bin_file.tag,
- self.mod_fn.owner_decl,
- .{ .register = reg.dwarfLocOp() },
- ),
+ .register => |reg| try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, .{
+ .register = reg.dwarfLocOp(),
+ }),
.stack_offset => {},
else => {},
},
@@ -1721,16 +1717,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
-
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- const mod = self.bin_file.options.module.?;
- const fn_owner_decl = mod.declPtr(func.owner_decl);
- const got_addr = blk: {
- const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
- break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
- };
-
+ const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
+ const atom = elf_file.getAtom(atom_index);
+ const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
try self.genSetReg(Type.initTag(.usize), .ra, .{ .memory = got_addr });
_ = try self.addInst(.{
.tag = .jalr,
@@ -2557,18 +2546,17 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
const decl = mod.declPtr(decl_index);
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
- const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
- return MCValue{ .memory = got_addr };
+ const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
+ const atom = elf_file.getAtom(atom_index);
+ return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
- // TODO I'm hacking my way through here by repurposing .memory for storing
- // index to the GOT target symbol index.
- return MCValue{ .memory = decl.link.macho.sym_index };
+ unreachable;
} else if (self.bin_file.cast(link.File.Coff)) |_| {
return self.fail("TODO codegen COFF const Decl pointer", .{});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- try p9.seeDecl(decl_index);
- const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
+ const decl_block_index = try p9.seeDecl(decl_index);
+ const decl_block = p9.getDeclBlock(decl_block_index);
+ const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index 943d21c47b..418c67c580 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -20,7 +20,7 @@ const Emit = @import("Emit.zig");
const Liveness = @import("../../Liveness.zig");
const Type = @import("../../type.zig").Type;
const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError;
-const FnResult = @import("../../codegen.zig").FnResult;
+const Result = @import("../../codegen.zig").Result;
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
const build_options = @import("build_options");
@@ -265,7 +265,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!FnResult {
+) GenerateSymbolError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@@ -310,8 +310,8 @@ pub fn generate(
defer function.exitlude_jump_relocs.deinit(bin_file.allocator);
var call_info = function.resolveCallingConventionValues(fn_type, .callee) catch |err| switch (err) {
- error.CodegenFail => return FnResult{ .fail = function.err_msg.? },
- error.OutOfRegisters => return FnResult{
+ error.CodegenFail => return Result{ .fail = function.err_msg.? },
+ error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
},
else => |e| return e,
@@ -324,8 +324,8 @@ pub fn generate(
function.max_end_stack = call_info.stack_byte_count;
function.gen() catch |err| switch (err) {
- error.CodegenFail => return FnResult{ .fail = function.err_msg.? },
- error.OutOfRegisters => return FnResult{
+ error.CodegenFail => return Result{ .fail = function.err_msg.? },
+ error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
},
else => |e| return e,
@@ -351,14 +351,14 @@ pub fn generate(
defer emit.deinit();
emit.emitMir() catch |err| switch (err) {
- error.EmitFail => return FnResult{ .fail = emit.err_msg.? },
+ error.EmitFail => return Result{ .fail = emit.err_msg.? },
else => |e| return e,
};
if (function.err_msg) |em| {
- return FnResult{ .fail = em };
+ return Result{ .fail = em };
} else {
- return FnResult{ .appended = {} };
+ return Result.ok;
}
}
@@ -1216,12 +1216,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.bin_file.tag == link.File.Elf.base_tag) {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
- const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
- const mod = self.bin_file.options.module.?;
- break :blk @intCast(u32, got.p_vaddr + mod.declPtr(func.owner_decl).link.elf.offset_table_index * ptr_bytes);
+ const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
+ const atom = elf_file.getAtom(atom_index);
+ break :blk @intCast(u32, atom.getOffsetTableAddress(elf_file));
} else unreachable;
try self.genSetReg(Type.initTag(.usize), .o7, .{ .memory = got_addr });
@@ -3414,13 +3412,9 @@ fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void {
switch (self.debug_output) {
.dwarf => |dw| switch (mcv) {
- .register => |reg| try dw.genArgDbgInfo(
- name,
- ty,
- self.bin_file.tag,
- self.mod_fn.owner_decl,
- .{ .register = reg.dwarfLocOp() },
- ),
+ .register => |reg| try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, .{
+ .register = reg.dwarfLocOp(),
+ }),
else => {},
},
else => {},
@@ -4193,9 +4187,6 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
}
fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
-
// TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
if (tv.ty.zigTypeTag() == .Pointer) blk: {
if (tv.ty.castPtrToFn()) |_| break :blk;
@@ -4209,9 +4200,9 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
- const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
- return MCValue{ .memory = got_addr };
+ const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
+ const atom = elf_file.getAtom(atom_index);
+ return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
}
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index c27639e14a..ec494b1a57 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -627,13 +627,6 @@ test "Wasm - buildOpcode" {
try testing.expectEqual(@as(wasm.Opcode, .f64_reinterpret_i64), f64_reinterpret_i64);
}
-pub const Result = union(enum) {
- /// The codegen bytes have been appended to `Context.code`
- appended: void,
- /// The data is managed externally and are part of the `Result`
- externally_managed: []const u8,
-};
-
/// Hashmap to store generated `WValue` for each `Air.Inst.Ref`
pub const ValueTable = std.AutoArrayHashMapUnmanaged(Air.Inst.Ref, WValue);
@@ -1171,7 +1164,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: codegen.DebugInfoOutput,
-) codegen.GenerateSymbolError!codegen.FnResult {
+) codegen.GenerateSymbolError!codegen.Result {
_ = src_loc;
var code_gen: CodeGen = .{
.gpa = bin_file.allocator,
@@ -1190,18 +1183,18 @@ pub fn generate(
defer code_gen.deinit();
genFunc(&code_gen) catch |err| switch (err) {
- error.CodegenFail => return codegen.FnResult{ .fail = code_gen.err_msg },
+ error.CodegenFail => return codegen.Result{ .fail = code_gen.err_msg },
else => |e| return e,
};
- return codegen.FnResult{ .appended = {} };
+ return codegen.Result.ok;
}
fn genFunc(func: *CodeGen) InnerError!void {
const fn_info = func.decl.ty.fnInfo();
var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target);
defer func_type.deinit(func.gpa);
- func.decl.fn_link.wasm.type_index = try func.bin_file.putOrGetFuncType(func_type);
+ func.decl.fn_link.?.type_index = try func.bin_file.putOrGetFuncType(func_type);
var cc_result = try func.resolveCallingConventionValues(func.decl.ty);
defer cc_result.deinit(func.gpa);
@@ -1276,10 +1269,10 @@ fn genFunc(func: *CodeGen) InnerError!void {
var emit: Emit = .{
.mir = mir,
- .bin_file = &func.bin_file.base,
+ .bin_file = func.bin_file,
.code = func.code,
.locals = func.locals.items,
- .decl = func.decl,
+ .decl_index = func.decl_index,
.dbg_output = func.debug_output,
.prev_di_line = 0,
.prev_di_column = 0,
@@ -1713,9 +1706,11 @@ fn isByRef(ty: Type, target: std.Target) bool {
return true;
},
.Optional => {
- if (ty.optionalReprIsPayload()) return false;
+ if (ty.isPtrLikeOptional()) return false;
var buf: Type.Payload.ElemType = undefined;
- return ty.optionalChild(&buf).hasRuntimeBitsIgnoreComptime();
+ const pl_type = ty.optionalChild(&buf);
+ if (pl_type.zigTypeTag() == .ErrorSet) return false;
+ return pl_type.hasRuntimeBitsIgnoreComptime();
},
.Pointer => {
// Slices act like struct and will be passed by reference
@@ -2122,27 +2117,31 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const fn_info = fn_ty.fnInfo();
const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, func.target);
- const callee: ?*Decl = blk: {
+ const callee: ?Decl.Index = blk: {
const func_val = func.air.value(pl_op.operand) orelse break :blk null;
const module = func.bin_file.base.options.module.?;
if (func_val.castTag(.function)) |function| {
- break :blk module.declPtr(function.data.owner_decl);
+ _ = try func.bin_file.getOrCreateAtomForDecl(function.data.owner_decl);
+ break :blk function.data.owner_decl;
} else if (func_val.castTag(.extern_fn)) |extern_fn| {
const ext_decl = module.declPtr(extern_fn.data.owner_decl);
const ext_info = ext_decl.ty.fnInfo();
var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, func.target);
defer func_type.deinit(func.gpa);
- ext_decl.fn_link.wasm.type_index = try func.bin_file.putOrGetFuncType(func_type);
+ const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl);
+ const atom = func.bin_file.getAtomPtr(atom_index);
+ ext_decl.fn_link.?.type_index = try func.bin_file.putOrGetFuncType(func_type);
try func.bin_file.addOrUpdateImport(
mem.sliceTo(ext_decl.name, 0),
- ext_decl.link.wasm.sym_index,
+ atom.getSymbolIndex().?,
ext_decl.getExternFn().?.lib_name,
- ext_decl.fn_link.wasm.type_index,
+ ext_decl.fn_link.?.type_index,
);
- break :blk ext_decl;
+ break :blk extern_fn.data.owner_decl;
} else if (func_val.castTag(.decl_ref)) |decl_ref| {
- break :blk module.declPtr(decl_ref.data);
+ _ = try func.bin_file.getOrCreateAtomForDecl(decl_ref.data);
+ break :blk decl_ref.data;
}
return func.fail("Expected a function, but instead found type '{}'", .{func_val.tag()});
};
@@ -2163,7 +2162,8 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
}
if (callee) |direct| {
- try func.addLabel(.call, direct.link.wasm.sym_index);
+ const atom_index = func.bin_file.decls.get(direct).?;
+ try func.addLabel(.call, func.bin_file.getAtom(atom_index).sym_index);
} else {
// in this case we call a function pointer
// so load its value onto the stack
@@ -2476,7 +2476,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.dwarf => |dwarf| {
const src_index = func.air.instructions.items(.data)[inst].arg.src_index;
const name = func.mod_fn.getParamName(func.bin_file.base.options.module.?, src_index);
- try dwarf.genArgDbgInfo(name, arg_ty, .wasm, func.mod_fn.owner_decl, .{
+ try dwarf.genArgDbgInfo(name, arg_ty, func.mod_fn.owner_decl, .{
.wasm_local = arg.local.value,
});
},
@@ -2759,8 +2759,10 @@ fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Ind
}
module.markDeclAlive(decl);
+ const atom_index = try func.bin_file.getOrCreateAtomForDecl(decl_index);
+ const atom = func.bin_file.getAtom(atom_index);
- const target_sym_index = decl.link.wasm.sym_index;
+ const target_sym_index = atom.sym_index;
if (decl.ty.zigTypeTag() == .Fn) {
try func.bin_file.addTableFunction(target_sym_index);
return WValue{ .function_index = target_sym_index };
@@ -3869,14 +3871,20 @@ fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind:
/// NOTE: Leaves the result on the stack
fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue {
try func.emitWValue(operand);
+ var buf: Type.Payload.ElemType = undefined;
+ const payload_ty = optional_ty.optionalChild(&buf);
if (!optional_ty.optionalReprIsPayload()) {
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = optional_ty.optionalChild(&buf);
// When payload is zero-bits, we can treat operand as a value, rather than
// a pointer to the stack value
if (payload_ty.hasRuntimeBitsIgnoreComptime()) {
try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset(), .alignment = 1 });
}
+ } else if (payload_ty.isSlice()) {
+ switch (func.arch()) {
+ .wasm32 => try func.addMemArg(.i32_load, .{ .offset = operand.offset(), .alignment = 4 }),
+ .wasm64 => try func.addMemArg(.i64_load, .{ .offset = operand.offset(), .alignment = 8 }),
+ else => unreachable,
+ }
}
// Compare the null value with '0'
@@ -5539,7 +5547,7 @@ fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) !void {
break :blk .nop;
},
};
- try func.debug_output.dwarf.genVarDbgInfo(name, ty, .wasm, func.mod_fn.owner_decl, is_ptr, loc);
+ try func.debug_output.dwarf.genVarDbgInfo(name, ty, func.mod_fn.owner_decl, is_ptr, loc);
func.finishAir(inst, .none, &.{});
}
diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig
index 71d21d2797..a340ac5da8 100644
--- a/src/arch/wasm/Emit.zig
+++ b/src/arch/wasm/Emit.zig
@@ -11,8 +11,8 @@ const leb128 = std.leb;
/// Contains our list of instructions
mir: Mir,
-/// Reference to the file handler
-bin_file: *link.File,
+/// Reference to the Wasm module linker
+bin_file: *link.File.Wasm,
/// Possible error message. When set, the value is allocated and
/// must be freed manually.
error_msg: ?*Module.ErrorMsg = null,
@@ -21,7 +21,7 @@ code: *std.ArrayList(u8),
/// List of allocated locals.
locals: []const u8,
/// The declaration that code is being generated for.
-decl: *Module.Decl,
+decl_index: Module.Decl.Index,
// Debug information
/// Holds the debug information for this emission
@@ -252,8 +252,8 @@ fn offset(self: Emit) u32 {
fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
@setCold(true);
std.debug.assert(emit.error_msg == null);
- // TODO: Determine the source location.
- emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.allocator, emit.decl.srcLoc(), format, args);
+ const mod = emit.bin_file.base.options.module.?;
+ emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.base.allocator, mod.declPtr(emit.decl_index).srcLoc(), format, args);
return error.EmitFail;
}
@@ -304,8 +304,9 @@ fn emitGlobal(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) !void {
const global_offset = emit.offset();
try emit.code.appendSlice(&buf);
- // globals can have index 0 as it represents the stack pointer
- try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
+ const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
+ const atom = emit.bin_file.getAtomPtr(atom_index);
+ try atom.relocs.append(emit.bin_file.base.allocator, .{
.index = label,
.offset = global_offset,
.relocation_type = .R_WASM_GLOBAL_INDEX_LEB,
@@ -361,7 +362,9 @@ fn emitCall(emit: *Emit, inst: Mir.Inst.Index) !void {
try emit.code.appendSlice(&buf);
if (label != 0) {
- try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
+ const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
+ const atom = emit.bin_file.getAtomPtr(atom_index);
+ try atom.relocs.append(emit.bin_file.base.allocator, .{
.offset = call_offset,
.index = label,
.relocation_type = .R_WASM_FUNCTION_INDEX_LEB,
@@ -387,7 +390,9 @@ fn emitFunctionIndex(emit: *Emit, inst: Mir.Inst.Index) !void {
try emit.code.appendSlice(&buf);
if (symbol_index != 0) {
- try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
+ const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
+ const atom = emit.bin_file.getAtomPtr(atom_index);
+ try atom.relocs.append(emit.bin_file.base.allocator, .{
.offset = index_offset,
.index = symbol_index,
.relocation_type = .R_WASM_TABLE_INDEX_SLEB,
@@ -399,7 +404,7 @@ fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void {
const extra_index = emit.mir.instructions.items(.data)[inst].payload;
const mem = emit.mir.extraData(Mir.Memory, extra_index).data;
const mem_offset = emit.offset() + 1;
- const is_wasm32 = emit.bin_file.options.target.cpu.arch == .wasm32;
+ const is_wasm32 = emit.bin_file.base.options.target.cpu.arch == .wasm32;
if (is_wasm32) {
try emit.code.append(std.wasm.opcode(.i32_const));
var buf: [5]u8 = undefined;
@@ -413,7 +418,9 @@ fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void {
}
if (mem.pointer != 0) {
- try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
+ const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
+ const atom = emit.bin_file.getAtomPtr(atom_index);
+ try atom.relocs.append(emit.bin_file.base.allocator, .{
.offset = mem_offset,
.index = mem.pointer,
.relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_LEB else .R_WASM_MEMORY_ADDR_LEB64,
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 2ad31bf7ba..c11ea4e63e 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -16,7 +16,7 @@ const Compilation = @import("../../Compilation.zig");
const DebugInfoOutput = codegen.DebugInfoOutput;
const DW = std.dwarf;
const ErrorMsg = Module.ErrorMsg;
-const FnResult = codegen.FnResult;
+const Result = codegen.Result;
const GenerateSymbolError = codegen.GenerateSymbolError;
const Emit = @import("Emit.zig");
const Liveness = @import("../../Liveness.zig");
@@ -257,7 +257,7 @@ pub fn generate(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!FnResult {
+) GenerateSymbolError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@@ -305,8 +305,8 @@ pub fn generate(
defer if (builtin.mode == .Debug) function.mir_to_air_map.deinit();
var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) {
- error.CodegenFail => return FnResult{ .fail = function.err_msg.? },
- error.OutOfRegisters => return FnResult{
+ error.CodegenFail => return Result{ .fail = function.err_msg.? },
+ error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
},
else => |e| return e,
@@ -319,8 +319,8 @@ pub fn generate(
function.max_end_stack = call_info.stack_byte_count;
function.gen() catch |err| switch (err) {
- error.CodegenFail => return FnResult{ .fail = function.err_msg.? },
- error.OutOfRegisters => return FnResult{
+ error.CodegenFail => return Result{ .fail = function.err_msg.? },
+ error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
},
else => |e| return e,
@@ -345,14 +345,14 @@ pub fn generate(
};
defer emit.deinit();
emit.lowerMir() catch |err| switch (err) {
- error.EmitFail => return FnResult{ .fail = emit.err_msg.? },
+ error.EmitFail => return Result{ .fail = emit.err_msg.? },
else => |e| return e,
};
if (function.err_msg) |em| {
- return FnResult{ .fail = em };
+ return Result{ .fail = em };
} else {
- return FnResult{ .appended = {} };
+ return Result.ok;
}
}
@@ -2668,12 +2668,13 @@ fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue
switch (ptr) {
.linker_load => |load_struct| {
const abi_size = @intCast(u32, ptr_ty.abiSize(self.target.*));
- const mod = self.bin_file.options.module.?;
- const fn_owner_decl = mod.declPtr(self.mod_fn.owner_decl);
- const atom_index = if (self.bin_file.tag == link.File.MachO.base_tag)
- fn_owner_decl.link.macho.sym_index
- else
- fn_owner_decl.link.coff.sym_index;
+ const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: {
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk macho_file.getAtom(atom).getSymbolIndex().?;
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: {
+ const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk coff_file.getAtom(atom).getSymbolIndex().?;
+ } else unreachable;
const flags: u2 = switch (load_struct.type) {
.got => 0b00,
.direct => 0b01,
@@ -3835,7 +3836,7 @@ fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void {
},
else => unreachable, // not a valid function parameter
};
- try dw.genArgDbgInfo(name, ty, self.bin_file.tag, self.mod_fn.owner_decl, loc);
+ try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, loc);
},
.plan9 => {},
.none => {},
@@ -3875,7 +3876,7 @@ fn genVarDbgInfo(
break :blk .nop;
},
};
- try dw.genVarDbgInfo(name, ty, self.bin_file.tag, self.mod_fn.owner_decl, is_ptr, loc);
+ try dw.genVarDbgInfo(name, ty, self.mod_fn.owner_decl, is_ptr, loc);
},
.plan9 => {},
.none => {},
@@ -3992,49 +3993,26 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Due to incremental compilation, how function calls are generated depends
// on linking.
const mod = self.bin_file.options.module.?;
- if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- if (self.air.value(callee)) |func_value| {
- if (func_value.castTag(.function)) |func_payload| {
- const func = func_payload.data;
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- const fn_owner_decl = mod.declPtr(func.owner_decl);
- const got_addr = blk: {
- const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
- break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
- };
+ if (self.air.value(callee)) |func_value| {
+ if (func_value.castTag(.function)) |func_payload| {
+ const func = func_payload.data;
+
+ if (self.bin_file.cast(link.File.Elf)) |elf_file| {
+ const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
+ const atom = elf_file.getAtom(atom_index);
+ const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
- .data = .{ .imm = @truncate(u32, got_addr) },
+ .data = .{ .imm = got_addr },
});
- } else if (func_value.castTag(.extern_fn)) |_| {
- return self.fail("TODO implement calling extern functions", .{});
- } else {
- return self.fail("TODO implement calling bitcasted functions", .{});
- }
- } else {
- assert(ty.zigTypeTag() == .Pointer);
- const mcv = try self.resolveInst(callee);
- try self.genSetReg(Type.initTag(.usize), .rax, mcv);
- _ = try self.addInst(.{
- .tag = .call,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rax,
- .flags = 0b01,
- }),
- .data = undefined,
- });
- }
- } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- if (self.air.value(callee)) |func_value| {
- if (func_value.castTag(.function)) |func_payload| {
- const func = func_payload.data;
- const fn_owner_decl = mod.declPtr(func.owner_decl);
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
+ const atom_index = try coff_file.getOrCreateAtomForDecl(func.owner_decl);
+ const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
try self.genSetReg(Type.initTag(.usize), .rax, .{
.linker_load = .{
.type = .got,
- .sym_index = fn_owner_decl.link.coff.sym_index,
+ .sym_index = sym_index,
},
});
_ = try self.addInst(.{
@@ -4045,19 +4023,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
}),
.data = undefined,
});
- } else if (func_value.castTag(.extern_fn)) |func_payload| {
- const extern_fn = func_payload.data;
- const decl_name = mod.declPtr(extern_fn.owner_decl).name;
- if (extern_fn.lib_name) |lib_name| {
- log.debug("TODO enforce that '{s}' is expected in '{s}' library", .{
- decl_name,
- lib_name,
- });
- }
- const sym_index = try coff_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
+ } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
+ const atom_index = try macho_file.getOrCreateAtomForDecl(func.owner_decl);
+ const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
try self.genSetReg(Type.initTag(.usize), .rax, .{
.linker_load = .{
- .type = .import,
+ .type = .got,
.sym_index = sym_index,
},
});
@@ -4069,35 +4040,38 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
}),
.data = undefined,
});
- } else {
- return self.fail("TODO implement calling bitcasted functions", .{});
+ } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
+ const decl_block_index = try p9.seeDecl(func.owner_decl);
+ const decl_block = p9.getDeclBlock(decl_block_index);
+ const ptr_bits = self.target.cpu.arch.ptrBitWidth();
+ const ptr_bytes: u64 = @divExact(ptr_bits, 8);
+ const got_addr = p9.bases.data;
+ const got_index = decl_block.got_index.?;
+ const fn_got_addr = got_addr + got_index * ptr_bytes;
+ _ = try self.addInst(.{
+ .tag = .call,
+ .ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
+ .data = .{ .imm = @intCast(u32, fn_got_addr) },
+ });
+ } else unreachable;
+ } else if (func_value.castTag(.extern_fn)) |func_payload| {
+ const extern_fn = func_payload.data;
+ const decl_name = mod.declPtr(extern_fn.owner_decl).name;
+ if (extern_fn.lib_name) |lib_name| {
+ log.debug("TODO enforce that '{s}' is expected in '{s}' library", .{
+ decl_name,
+ lib_name,
+ });
}
- } else {
- assert(ty.zigTypeTag() == .Pointer);
- const mcv = try self.resolveInst(callee);
- try self.genSetReg(Type.initTag(.usize), .rax, mcv);
- _ = try self.addInst(.{
- .tag = .call,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rax,
- .flags = 0b01,
- }),
- .data = undefined,
- });
- }
- } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- if (self.air.value(callee)) |func_value| {
- if (func_value.castTag(.function)) |func_payload| {
- const func = func_payload.data;
- const fn_owner_decl = mod.declPtr(func.owner_decl);
- const sym_index = fn_owner_decl.link.macho.sym_index;
+
+ if (self.bin_file.cast(link.File.Coff)) |coff_file| {
+ const sym_index = try coff_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
try self.genSetReg(Type.initTag(.usize), .rax, .{
.linker_load = .{
- .type = .got,
+ .type = .import,
.sym_index = sym_index,
},
});
- // callq *%rax
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{
@@ -4106,71 +4080,37 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
}),
.data = undefined,
});
- } else if (func_value.castTag(.extern_fn)) |func_payload| {
- const extern_fn = func_payload.data;
- const decl_name = mod.declPtr(extern_fn.owner_decl).name;
- if (extern_fn.lib_name) |lib_name| {
- log.debug("TODO enforce that '{s}' is expected in '{s}' library", .{
- decl_name,
- lib_name,
- });
- }
+ } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const sym_index = try macho_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ const atom_index = macho_file.getAtom(atom).getSymbolIndex().?;
_ = try self.addInst(.{
.tag = .call_extern,
.ops = undefined,
- .data = .{
- .relocation = .{
- .atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.sym_index,
- .sym_index = sym_index,
- },
- },
+ .data = .{ .relocation = .{
+ .atom_index = atom_index,
+ .sym_index = sym_index,
+ } },
});
} else {
- return self.fail("TODO implement calling bitcasted functions", .{});
+ return self.fail("TODO implement calling extern functions", .{});
}
} else {
- assert(ty.zigTypeTag() == .Pointer);
- const mcv = try self.resolveInst(callee);
- try self.genSetReg(Type.initTag(.usize), .rax, mcv);
- _ = try self.addInst(.{
- .tag = .call,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rax,
- .flags = 0b01,
- }),
- .data = undefined,
- });
+ return self.fail("TODO implement calling bitcasted functions", .{});
}
- } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- if (self.air.value(callee)) |func_value| {
- if (func_value.castTag(.function)) |func_payload| {
- try p9.seeDecl(func_payload.data.owner_decl);
- const ptr_bits = self.target.cpu.arch.ptrBitWidth();
- const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- const got_addr = p9.bases.data;
- const got_index = mod.declPtr(func_payload.data.owner_decl).link.plan9.got_index.?;
- const fn_got_addr = got_addr + got_index * ptr_bytes;
- _ = try self.addInst(.{
- .tag = .call,
- .ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
- .data = .{ .imm = @intCast(u32, fn_got_addr) },
- });
- } else return self.fail("TODO implement calling extern fn on plan9", .{});
- } else {
- assert(ty.zigTypeTag() == .Pointer);
- const mcv = try self.resolveInst(callee);
- try self.genSetReg(Type.initTag(.usize), .rax, mcv);
- _ = try self.addInst(.{
- .tag = .call,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rax,
- .flags = 0b01,
- }),
- .data = undefined,
- });
- }
- } else unreachable;
+ } else {
+ assert(ty.zigTypeTag() == .Pointer);
+ const mcv = try self.resolveInst(callee);
+ try self.genSetReg(Type.initTag(.usize), .rax, mcv);
+ _ = try self.addInst(.{
+ .tag = .call,
+ .ops = Mir.Inst.Ops.encode(.{
+ .reg1 = .rax,
+ .flags = 0b01,
+ }),
+ .data = undefined,
+ });
+ }
if (info.stack_byte_count > 0) {
// Readjust the stack
@@ -6781,24 +6721,27 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
module.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
- const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
- return MCValue{ .memory = got_addr };
- } else if (self.bin_file.cast(link.File.MachO)) |_| {
- assert(decl.link.macho.sym_index != 0);
+ const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
+ const atom = elf_file.getAtom(atom_index);
+ return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
+ } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
+ const atom_index = try macho_file.getOrCreateAtomForDecl(decl_index);
+ const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
return MCValue{ .linker_load = .{
.type = .got,
- .sym_index = decl.link.macho.sym_index,
+ .sym_index = sym_index,
} };
- } else if (self.bin_file.cast(link.File.Coff)) |_| {
- assert(decl.link.coff.sym_index != 0);
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
+ const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index);
+ const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
return MCValue{ .linker_load = .{
.type = .got,
- .sym_index = decl.link.coff.sym_index,
+ .sym_index = sym_index,
} };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- try p9.seeDecl(decl_index);
- const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
+ const decl_block_index = try p9.seeDecl(decl_index);
+ const decl_block = p9.getDeclBlock(decl_block_index);
+ const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
@@ -6811,8 +6754,7 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
};
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const vaddr = elf_file.local_symbols.items[local_sym_index].st_value;
- return MCValue{ .memory = vaddr };
+ return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
return MCValue{ .linker_load = .{
.type = .direct,
diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig
index af3ed5e053..c4f9b4eb42 100644
--- a/src/arch/x86_64/Emit.zig
+++ b/src/arch/x86_64/Emit.zig
@@ -1001,8 +1001,8 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
0b01 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
else => unreachable,
};
- const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
- try atom.addRelocation(macho_file, .{
+ const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = reloc_type,
.target = .{ .sym_index = relocation.sym_index, .file = null },
.offset = @intCast(u32, end_offset - 4),
@@ -1011,8 +1011,8 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
.length = 2,
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
- const atom = coff_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
- try atom.addRelocation(coff_file, .{
+ const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+ try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.type = switch (ops.flags) {
0b00 => .got,
0b01 => .direct,
@@ -1140,9 +1140,9 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
// Add relocation to the decl.
- const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+ const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const target = macho_file.getGlobalByIndex(relocation.sym_index);
- try atom.addRelocation(macho_file, .{
+ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
.target = target,
.offset = offset,
@@ -1152,9 +1152,9 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
// Add relocation to the decl.
- const atom = coff_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+ const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const target = coff_file.getGlobalByIndex(relocation.sym_index);
- try atom.addRelocation(coff_file, .{
+ try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.type = .direct,
.target = target,
.offset = offset,
diff --git a/src/codegen.zig b/src/codegen.zig
index e8dd661684..c0a04765b0 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -21,16 +21,11 @@ const TypedValue = @import("TypedValue.zig");
const Value = @import("value.zig").Value;
const Zir = @import("Zir.zig");
-pub const FnResult = union(enum) {
- /// The `code` parameter passed to `generateSymbol` has the value appended.
- appended: void,
- fail: *ErrorMsg,
-};
pub const Result = union(enum) {
- /// The `code` parameter passed to `generateSymbol` has the value appended.
- appended: void,
- /// The value is available externally, `code` is unused.
- externally_managed: []const u8,
+ /// The `code` parameter passed to `generateSymbol` has the value ok.
+ ok: void,
+
+ /// There was a codegen error.
fail: *ErrorMsg,
};
@@ -89,7 +84,7 @@ pub fn generateFunction(
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
-) GenerateSymbolError!FnResult {
+) GenerateSymbolError!Result {
switch (bin_file.options.target.cpu.arch) {
.arm,
.armeb,
@@ -145,7 +140,7 @@ pub fn generateSymbol(
if (typed_value.val.isUndefDeep()) {
const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow;
try code.appendNTimes(0xaa, abi_size);
- return Result{ .appended = {} };
+ return Result.ok;
}
switch (typed_value.ty.zigTypeTag()) {
@@ -176,7 +171,7 @@ pub fn generateSymbol(
128 => writeFloat(f128, typed_value.val.toFloat(f128), target, endian, try code.addManyAsArray(16)),
else => unreachable,
}
- return Result{ .appended = {} };
+ return Result.ok;
},
.Array => switch (typed_value.val.tag()) {
.bytes => {
@@ -185,7 +180,7 @@ pub fn generateSymbol(
// The bytes payload already includes the sentinel, if any
try code.ensureUnusedCapacity(len);
code.appendSliceAssumeCapacity(bytes[0..len]);
- return Result{ .appended = {} };
+ return Result.ok;
},
.str_lit => {
const str_lit = typed_value.val.castTag(.str_lit).?.data;
@@ -197,7 +192,7 @@ pub fn generateSymbol(
const byte = @intCast(u8, sent_val.toUnsignedInt(target));
code.appendAssumeCapacity(byte);
}
- return Result{ .appended = {} };
+ return Result.ok;
},
.aggregate => {
const elem_vals = typed_value.val.castTag(.aggregate).?.data;
@@ -208,14 +203,11 @@ pub fn generateSymbol(
.ty = elem_ty,
.val = elem_val,
}, code, debug_output, reloc_info)) {
- .appended => {},
- .externally_managed => |slice| {
- code.appendSliceAssumeCapacity(slice);
- },
+ .ok => {},
.fail => |em| return Result{ .fail = em },
}
}
- return Result{ .appended = {} };
+ return Result.ok;
},
.repeated => {
const array = typed_value.val.castTag(.repeated).?.data;
@@ -229,10 +221,7 @@ pub fn generateSymbol(
.ty = elem_ty,
.val = array,
}, code, debug_output, reloc_info)) {
- .appended => {},
- .externally_managed => |slice| {
- code.appendSliceAssumeCapacity(slice);
- },
+ .ok => {},
.fail => |em| return Result{ .fail = em },
}
}
@@ -242,15 +231,12 @@ pub fn generateSymbol(
.ty = elem_ty,
.val = sentinel_val,
}, code, debug_output, reloc_info)) {
- .appended => {},
- .externally_managed => |slice| {
- code.appendSliceAssumeCapacity(slice);
- },
+ .ok => {},
.fail => |em| return Result{ .fail = em },
}
}
- return Result{ .appended = {} };
+ return Result.ok;
},
.empty_array_sentinel => {
const elem_ty = typed_value.ty.childType();
@@ -259,13 +245,10 @@ pub fn generateSymbol(
.ty = elem_ty,
.val = sentinel_val,
}, code, debug_output, reloc_info)) {
- .appended => {},
- .externally_managed => |slice| {
- code.appendSliceAssumeCapacity(slice);
- },
+ .ok => {},
.fail => |em| return Result{ .fail = em },
}
- return Result{ .appended = {} };
+ return Result.ok;
},
else => return Result{
.fail = try ErrorMsg.create(
@@ -289,7 +272,7 @@ pub fn generateSymbol(
},
else => unreachable,
}
- return Result{ .appended = {} };
+ return Result.ok;
},
.variable => {
const decl = typed_value.val.castTag(.variable).?.data.owner_decl;
@@ -309,10 +292,7 @@ pub fn generateSymbol(
.ty = slice_ptr_field_type,
.val = slice.ptr,
}, code, debug_output, reloc_info)) {
- .appended => {},
- .externally_managed => |external_slice| {
- code.appendSliceAssumeCapacity(external_slice);
- },
+ .ok => {},
.fail => |em| return Result{ .fail = em },
}
@@ -321,14 +301,11 @@ pub fn generateSymbol(
.ty = Type.initTag(.usize),
.val = slice.len,
}, code, debug_output, reloc_info)) {
- .appended => {},
- .externally_managed => |external_slice| {
- code.appendSliceAssumeCapacity(external_slice);
- },
+ .ok => {},
.fail => |em| return Result{ .fail = em },
}
- return Result{ .appended = {} };
+ return Result.ok;
},
.field_ptr => {
const field_ptr = typed_value.val.castTag(.field_ptr).?.data;
@@ -375,13 +352,10 @@ pub fn generateSymbol(
.ty = typed_value.ty,
.val = container_ptr,
}, code, debug_output, reloc_info)) {
- .appended => {},
- .externally_managed => |external_slice| {
- code.appendSliceAssumeCapacity(external_slice);
- },
+ .ok => {},
.fail => |em| return Result{ .fail = em },
}
- return Result{ .appended = {} };
+ return Result.ok;
},
else => return Result{
.fail = try ErrorMsg.create(
@@ -434,7 +408,7 @@ pub fn generateSymbol(
.signed => @bitCast(u8, @intCast(i8, typed_value.val.toSignedInt(target))),
};
try code.append(x);
- return Result{ .appended = {} };
+ return Result.ok;
}
if (info.bits > 64) {
var bigint_buffer: Value.BigIntSpace = undefined;
@@ -443,7 +417,7 @@ pub fn generateSymbol(
const start = code.items.len;
try code.resize(start + abi_size);
bigint.writeTwosComplement(code.items[start..][0..abi_size], endian);
- return Result{ .appended = {} };
+ return Result.ok;
}
switch (info.signedness) {
.unsigned => {
@@ -471,7 +445,7 @@ pub fn generateSymbol(
}
},
}
- return Result{ .appended = {} };
+ return Result.ok;
},
.Enum => {
var int_buffer: Value.Payload.U64 = undefined;
@@ -481,7 +455,7 @@ pub fn generateSymbol(
if (info.bits <= 8) {
const x = @intCast(u8, int_val.toUnsignedInt(target));
try code.append(x);
- return Result{ .appended = {} };
+ return Result.ok;
}
if (info.bits > 64) {
return Result{
@@ -519,12 +493,12 @@ pub fn generateSymbol(
}
},
}
- return Result{ .appended = {} };
+ return Result.ok;
},
.Bool => {
const x: u8 = @boolToInt(typed_value.val.toBool());
try code.append(x);
- return Result{ .appended = {} };
+ return Result.ok;
},
.Struct => {
if (typed_value.ty.containerLayout() == .Packed) {
@@ -549,12 +523,7 @@ pub fn generateSymbol(
.ty = field_ty,
.val = field_val,
}, &tmp_list, debug_output, reloc_info)) {
- .appended => {
- mem.copy(u8, code.items[current_pos..], tmp_list.items);
- },
- .externally_managed => |external_slice| {
- mem.copy(u8, code.items[current_pos..], external_slice);
- },
+ .ok => mem.copy(u8, code.items[current_pos..], tmp_list.items),
.fail => |em| return Result{ .fail = em },
}
} else {
@@ -563,7 +532,7 @@ pub fn generateSymbol(
bits += @intCast(u16, field_ty.bitSize(target));
}
- return Result{ .appended = {} };
+ return Result.ok;
}
const struct_begin = code.items.len;
@@ -576,10 +545,7 @@ pub fn generateSymbol(
.ty = field_ty,
.val = field_val,
}, code, debug_output, reloc_info)) {
- .appended => {},
- .externally_managed => |external_slice| {
- code.appendSliceAssumeCapacity(external_slice);
- },
+ .ok => {},
.fail => |em| return Result{ .fail = em },
}
const unpadded_field_end = code.items.len - struct_begin;
@@ -593,7 +559,7 @@ pub fn generateSymbol(
}
}
- return Result{ .appended = {} };
+ return Result.ok;
},
.Union => {
const union_obj = typed_value.val.castTag(.@"union").?.data;
@@ -612,10 +578,7 @@ pub fn generateSymbol(
.ty = typed_value.ty.unionTagType().?,
.val = union_obj.tag,
}, code, debug_output, reloc_info)) {
- .appended => {},
- .externally_managed => |external_slice| {
- code.appendSliceAssumeCapacity(external_slice);
- },
+ .ok => {},
.fail => |em| return Result{ .fail = em },
}
}
@@ -632,10 +595,7 @@ pub fn generateSymbol(
.ty = field_ty,
.val = union_obj.val,
}, code, debug_output, reloc_info)) {
- .appended => {},
- .externally_managed => |external_slice| {
- code.appendSliceAssumeCapacity(external_slice);
- },
+ .ok => {},
.fail => |em| return Result{ .fail = em },
}
@@ -650,15 +610,12 @@ pub fn generateSymbol(
.ty = union_ty.tag_ty,
.val = union_obj.tag,
}, code, debug_output, reloc_info)) {
- .appended => {},
- .externally_managed => |external_slice| {
- code.appendSliceAssumeCapacity(external_slice);
- },
+ .ok => {},
.fail => |em| return Result{ .fail = em },
}
}
- return Result{ .appended = {} };
+ return Result.ok;
},
.Optional => {
var opt_buf: Type.Payload.ElemType = undefined;
@@ -669,7 +626,7 @@ pub fn generateSymbol(
if (!payload_type.hasRuntimeBits()) {
try code.writer().writeByteNTimes(@boolToInt(is_pl), abi_size);
- return Result{ .appended = {} };
+ return Result.ok;
}
if (typed_value.ty.optionalReprIsPayload()) {
@@ -678,10 +635,7 @@ pub fn generateSymbol(
.ty = payload_type,
.val = payload.data,
}, code, debug_output, reloc_info)) {
- .appended => {},
- .externally_managed => |external_slice| {
- code.appendSliceAssumeCapacity(external_slice);
- },
+ .ok => {},
.fail => |em| return Result{ .fail = em },
}
} else if (!typed_value.val.isNull()) {
@@ -689,17 +643,14 @@ pub fn generateSymbol(
.ty = payload_type,
.val = typed_value.val,
}, code, debug_output, reloc_info)) {
- .appended => {},
- .externally_managed => |external_slice| {
- code.appendSliceAssumeCapacity(external_slice);
- },
+ .ok => {},
.fail => |em| return Result{ .fail = em },
}
} else {
try code.writer().writeByteNTimes(0, abi_size);
}
- return Result{ .appended = {} };
+ return Result.ok;
}
const value = if (typed_value.val.castTag(.opt_payload)) |payload| payload.data else Value.initTag(.undef);
@@ -708,14 +659,11 @@ pub fn generateSymbol(
.ty = payload_type,
.val = value,
}, code, debug_output, reloc_info)) {
- .appended => {},
- .externally_managed => |external_slice| {
- code.appendSliceAssumeCapacity(external_slice);
- },
+ .ok => {},
.fail => |em| return Result{ .fail = em },
}
- return Result{ .appended = {} };
+ return Result.ok;
},
.ErrorUnion => {
const error_ty = typed_value.ty.errorUnionSet();
@@ -740,10 +688,7 @@ pub fn generateSymbol(
.ty = error_ty,
.val = if (is_payload) Value.initTag(.zero) else typed_value.val,
}, code, debug_output, reloc_info)) {
- .appended => {},
- .externally_managed => |external_slice| {
- code.appendSliceAssumeCapacity(external_slice);
- },
+ .ok => {},
.fail => |em| return Result{ .fail = em },
}
}
@@ -756,10 +701,7 @@ pub fn generateSymbol(
.ty = payload_ty,
.val = payload_val,
}, code, debug_output, reloc_info)) {
- .appended => {},
- .externally_managed => |external_slice| {
- code.appendSliceAssumeCapacity(external_slice);
- },
+ .ok => {},
.fail => |em| return Result{ .fail = em },
}
const unpadded_end = code.items.len - begin;
@@ -778,10 +720,7 @@ pub fn generateSymbol(
.ty = error_ty,
.val = if (is_payload) Value.initTag(.zero) else typed_value.val,
}, code, debug_output, reloc_info)) {
- .appended => {},
- .externally_managed => |external_slice| {
- code.appendSliceAssumeCapacity(external_slice);
- },
+ .ok => {},
.fail => |em| return Result{ .fail = em },
}
const unpadded_end = code.items.len - begin;
@@ -793,7 +732,7 @@ pub fn generateSymbol(
}
}
- return Result{ .appended = {} };
+ return Result.ok;
},
.ErrorSet => {
switch (typed_value.val.tag()) {
@@ -806,7 +745,7 @@ pub fn generateSymbol(
try code.writer().writeByteNTimes(0, @intCast(usize, Type.anyerror.abiSize(target)));
},
}
- return Result{ .appended = {} };
+ return Result.ok;
},
.Vector => switch (typed_value.val.tag()) {
.bytes => {
@@ -814,7 +753,7 @@ pub fn generateSymbol(
const len = @intCast(usize, typed_value.ty.arrayLen());
try code.ensureUnusedCapacity(len);
code.appendSliceAssumeCapacity(bytes[0..len]);
- return Result{ .appended = {} };
+ return Result.ok;
},
.aggregate => {
const elem_vals = typed_value.val.castTag(.aggregate).?.data;
@@ -825,14 +764,11 @@ pub fn generateSymbol(
.ty = elem_ty,
.val = elem_val,
}, code, debug_output, reloc_info)) {
- .appended => {},
- .externally_managed => |slice| {
- code.appendSliceAssumeCapacity(slice);
- },
+ .ok => {},
.fail => |em| return Result{ .fail = em },
}
}
- return Result{ .appended = {} };
+ return Result.ok;
},
.repeated => {
const array = typed_value.val.castTag(.repeated).?.data;
@@ -845,14 +781,11 @@ pub fn generateSymbol(
.ty = elem_ty,
.val = array,
}, code, debug_output, reloc_info)) {
- .appended => {},
- .externally_managed => |slice| {
- code.appendSliceAssumeCapacity(slice);
- },
+ .ok => {},
.fail => |em| return Result{ .fail = em },
}
}
- return Result{ .appended = {} };
+ return Result.ok;
},
.str_lit => {
const str_lit = typed_value.val.castTag(.str_lit).?.data;
@@ -860,7 +793,7 @@ pub fn generateSymbol(
const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
try code.ensureUnusedCapacity(str_lit.len);
code.appendSliceAssumeCapacity(bytes);
- return Result{ .appended = {} };
+ return Result.ok;
},
else => unreachable,
},
@@ -901,10 +834,7 @@ fn lowerDeclRef(
.ty = slice_ptr_field_type,
.val = typed_value.val,
}, code, debug_output, reloc_info)) {
- .appended => {},
- .externally_managed => |external_slice| {
- code.appendSliceAssumeCapacity(external_slice);
- },
+ .ok => {},
.fail => |em| return Result{ .fail = em },
}
@@ -917,14 +847,11 @@ fn lowerDeclRef(
.ty = Type.usize,
.val = Value.initPayload(&slice_len.base),
}, code, debug_output, reloc_info)) {
- .appended => {},
- .externally_managed => |external_slice| {
- code.appendSliceAssumeCapacity(external_slice);
- },
+ .ok => {},
.fail => |em| return Result{ .fail = em },
}
- return Result{ .appended = {} };
+ return Result.ok;
}
const ptr_width = target.cpu.arch.ptrBitWidth();
@@ -932,7 +859,7 @@ fn lowerDeclRef(
const is_fn_body = decl.ty.zigTypeTag() == .Fn;
if (!is_fn_body and !decl.ty.hasRuntimeBits()) {
try code.writer().writeByteNTimes(0xaa, @divExact(ptr_width, 8));
- return Result{ .appended = {} };
+ return Result.ok;
}
module.markDeclAlive(decl);
@@ -950,7 +877,7 @@ fn lowerDeclRef(
else => unreachable,
}
- return Result{ .appended = {} };
+ return Result.ok;
}
pub fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u64 {
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 68c20ea49c..2f721e1b4b 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -16,7 +16,6 @@ const trace = @import("../tracy.zig").trace;
const LazySrcLoc = Module.LazySrcLoc;
const Air = @import("../Air.zig");
const Liveness = @import("../Liveness.zig");
-const CType = @import("../type.zig").CType;
const target_util = @import("../target.zig");
const libcFloatPrefix = target_util.libcFloatPrefix;
@@ -1663,6 +1662,22 @@ pub const DeclGen = struct {
defer buffer.deinit();
try buffer.appendSlice("struct ");
+
+ var needs_pack_attr = false;
+ {
+ var it = t.structFields().iterator();
+ while (it.next()) |field| {
+ const field_ty = field.value_ptr.ty;
+ if (!field_ty.hasRuntimeBits()) continue;
+ const alignment = field.value_ptr.abi_align;
+ if (alignment != 0 and alignment < field_ty.abiAlignment(dg.module.getTarget())) {
+ needs_pack_attr = true;
+ try buffer.appendSlice("zig_packed(");
+ break;
+ }
+ }
+ }
+
try buffer.appendSlice(name);
try buffer.appendSlice(" {\n");
{
@@ -1672,7 +1687,7 @@ pub const DeclGen = struct {
const field_ty = field.value_ptr.ty;
if (!field_ty.hasRuntimeBits()) continue;
- const alignment = field.value_ptr.abi_align;
+ const alignment = field.value_ptr.alignment(dg.module.getTarget(), t.containerLayout());
const field_name = CValue{ .identifier = field.key_ptr.* };
try buffer.append(' ');
try dg.renderTypeAndName(buffer.writer(), field_ty, field_name, .Mut, alignment, .Complete);
@@ -1682,7 +1697,7 @@ pub const DeclGen = struct {
}
if (empty) try buffer.appendSlice(" char empty_struct;\n");
}
- try buffer.appendSlice("};\n");
+ if (needs_pack_attr) try buffer.appendSlice("});\n") else try buffer.appendSlice("};\n");
const rendered = try buffer.toOwnedSlice();
errdefer dg.typedefs.allocator.free(rendered);
@@ -2367,8 +2382,13 @@ pub const DeclGen = struct {
depth += 1;
}
- if (alignment != 0 and alignment > ty.abiAlignment(target)) {
- try w.print("zig_align({}) ", .{alignment});
+ if (alignment != 0) {
+ const abi_alignment = ty.abiAlignment(target);
+ if (alignment < abi_alignment) {
+ try w.print("zig_under_align({}) ", .{alignment});
+ } else if (alignment > abi_alignment) {
+ try w.print("zig_align({}) ", .{alignment});
+ }
}
try dg.renderType(w, render_ty, kind);
@@ -2860,27 +2880,30 @@ pub fn genDecl(o: *Object) !void {
const w = o.writer();
if (!is_global) try w.writeAll("static ");
if (variable.is_threadlocal) try w.writeAll("zig_threadlocal ");
+ if (o.dg.decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section});
try o.dg.renderTypeAndName(w, o.dg.decl.ty, decl_c_value, .Mut, o.dg.decl.@"align", .Complete);
+ if (o.dg.decl.@"linksection" != null) try w.writeAll(", read, write)");
try w.writeAll(" = ");
try o.dg.renderValue(w, tv.ty, variable.init, .StaticInitializer);
try w.writeByte(';');
try o.indent_writer.insertNewline();
} else {
+ const is_global = o.dg.module.decl_exports.contains(o.dg.decl_index);
+ const fwd_decl_writer = o.dg.fwd_decl.writer();
const decl_c_value: CValue = .{ .decl = o.dg.decl_index };
- const fwd_decl_writer = o.dg.fwd_decl.writer();
- try fwd_decl_writer.writeAll("static ");
- try o.dg.renderTypeAndName(fwd_decl_writer, tv.ty, decl_c_value, .Mut, o.dg.decl.@"align", .Complete);
+ try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static ");
+ try o.dg.renderTypeAndName(fwd_decl_writer, tv.ty, decl_c_value, .Const, o.dg.decl.@"align", .Complete);
try fwd_decl_writer.writeAll(";\n");
- const writer = o.writer();
- try writer.writeAll("static ");
- // TODO ask the Decl if it is const
- // https://github.com/ziglang/zig/issues/7582
- try o.dg.renderTypeAndName(writer, tv.ty, decl_c_value, .Mut, o.dg.decl.@"align", .Complete);
- try writer.writeAll(" = ");
- try o.dg.renderValue(writer, tv.ty, tv.val, .StaticInitializer);
- try writer.writeAll(";\n");
+ const w = o.writer();
+ if (!is_global) try w.writeAll("static ");
+ if (o.dg.decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section});
+ try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .Const, o.dg.decl.@"align", .Complete);
+ if (o.dg.decl.@"linksection" != null) try w.writeAll(", read)");
+ try w.writeAll(" = ");
+ try o.dg.renderValue(w, tv.ty, tv.val, .StaticInitializer);
+ try w.writeAll(";\n");
}
}
@@ -3726,16 +3749,15 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
const ptr_val = try f.resolveInst(bin_op.lhs);
const src_ty = f.air.typeOf(bin_op.rhs);
- const src_val = try f.resolveInst(bin_op.rhs);
-
- try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
// TODO Sema should emit a different instruction when the store should
// possibly do the safety 0xaa bytes for undefined.
const src_val_is_undefined =
if (f.air.value(bin_op.rhs)) |v| v.isUndefDeep() else false;
- if (src_val_is_undefined)
+ if (src_val_is_undefined) {
+ try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return try storeUndefined(f, ptr_info.pointee_type, ptr_val);
+ }
const target = f.object.dg.module.getTarget();
const is_aligned = ptr_info.@"align" == 0 or
@@ -3744,6 +3766,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
const need_memcpy = !is_aligned or is_array;
const writer = f.object.writer();
+ const src_val = try f.resolveInst(bin_op.rhs);
+ try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
+
if (need_memcpy) {
// For this memcpy to safely work we need the rhs to have the same
// underlying type as the lhs (i.e. they must both be arrays of the same underlying type).
@@ -4344,8 +4369,9 @@ fn airDbgInline(f: *Function, inst: Air.Inst.Index) !CValue {
fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue {
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const name = f.air.nullTerminatedString(pl_op.payload);
- const operand = try f.resolveInst(pl_op.operand);
- _ = operand;
+ const operand_is_undef = if (f.air.value(pl_op.operand)) |v| v.isUndefDeep() else false;
+ if (!operand_is_undef) _ = try f.resolveInst(pl_op.operand);
+
try reap(f, inst, &.{pl_op.operand});
const writer = f.object.writer();
try writer.print("/* var:{s} */\n", .{name});
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index e53ebe5450..6433191221 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -19,7 +19,6 @@ const Liveness = @import("../Liveness.zig");
const Value = @import("../value.zig").Value;
const Type = @import("../type.zig").Type;
const LazySrcLoc = Module.LazySrcLoc;
-const CType = @import("../type.zig").CType;
const x86_64_abi = @import("../arch/x86_64/abi.zig");
const wasm_c_abi = @import("../arch/wasm/abi.zig");
const aarch64_c_abi = @import("../arch/aarch64/abi.zig");
@@ -11057,8 +11056,8 @@ fn backendSupportsF128(target: std.Target) bool {
fn intrinsicsAllowed(scalar_ty: Type, target: std.Target) bool {
return switch (scalar_ty.tag()) {
.f16 => backendSupportsF16(target),
- .f80 => (CType.longdouble.sizeInBits(target) == 80) and backendSupportsF80(target),
- .f128 => (CType.longdouble.sizeInBits(target) == 128) and backendSupportsF128(target),
+ .f80 => (target.c_type_bit_size(.longdouble) == 80) and backendSupportsF80(target),
+ .f128 => (target.c_type_bit_size(.longdouble) == 128) and backendSupportsF128(target),
else => true,
};
}
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index e1af8c847f..c5a3d57d07 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -49,7 +49,7 @@ pub const DeclGen = struct {
spv: *SpvModule,
/// The decl we are currently generating code for.
- decl: *Decl,
+ decl_index: Decl.Index,
/// The intermediate code of the declaration we are currently generating. Note: If
/// the declaration is not a function, this value will be undefined!
@@ -59,6 +59,8 @@ pub const DeclGen = struct {
/// Note: If the declaration is not a function, this value will be undefined!
liveness: Liveness,
+ ids: *const std.AutoHashMap(Decl.Index, IdResult),
+
/// An array of function argument result-ids. Each index corresponds with the
/// function argument of the same index.
args: std.ArrayListUnmanaged(IdRef) = .{},
@@ -133,14 +135,20 @@ pub const DeclGen = struct {
/// Initialize the common resources of a DeclGen. Some fields are left uninitialized,
/// only set when `gen` is called.
- pub fn init(allocator: Allocator, module: *Module, spv: *SpvModule) DeclGen {
+ pub fn init(
+ allocator: Allocator,
+ module: *Module,
+ spv: *SpvModule,
+ ids: *const std.AutoHashMap(Decl.Index, IdResult),
+ ) DeclGen {
return .{
.gpa = allocator,
.module = module,
.spv = spv,
- .decl = undefined,
+ .decl_index = undefined,
.air = undefined,
.liveness = undefined,
+ .ids = ids,
.next_arg_index = undefined,
.current_block_label_id = undefined,
.error_msg = undefined,
@@ -150,9 +158,9 @@ pub const DeclGen = struct {
/// Generate the code for `decl`. If a reportable error occurred during code generation,
/// a message is returned by this function. Callee owns the memory. If this function
/// returns such a reportable error, it is valid to be called again for a different decl.
- pub fn gen(self: *DeclGen, decl: *Decl, air: Air, liveness: Liveness) !?*Module.ErrorMsg {
+ pub fn gen(self: *DeclGen, decl_index: Decl.Index, air: Air, liveness: Liveness) !?*Module.ErrorMsg {
// Reset internal resources, we don't want to re-allocate these.
- self.decl = decl;
+ self.decl_index = decl_index;
self.air = air;
self.liveness = liveness;
self.args.items.len = 0;
@@ -194,7 +202,7 @@ pub const DeclGen = struct {
pub fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error {
@setCold(true);
const src = LazySrcLoc.nodeOffset(0);
- const src_loc = src.toSrcLoc(self.decl);
+ const src_loc = src.toSrcLoc(self.module.declPtr(self.decl_index));
assert(self.error_msg == null);
self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, format, args);
return error.CodegenFail;
@@ -332,7 +340,7 @@ pub const DeclGen = struct {
};
const decl = self.module.declPtr(fn_decl_index);
self.module.markDeclAlive(decl);
- return decl.fn_link.spirv.id.toRef();
+ return self.ids.get(fn_decl_index).?.toRef();
}
const target = self.getTarget();
@@ -553,8 +561,8 @@ pub const DeclGen = struct {
}
fn genDecl(self: *DeclGen) !void {
- const decl = self.decl;
- const result_id = decl.fn_link.spirv.id;
+ const result_id = self.ids.get(self.decl_index).?;
+ const decl = self.module.declPtr(self.decl_index);
if (decl.val.castTag(.function)) |_| {
assert(decl.ty.zigTypeTag() == .Fn);
@@ -945,7 +953,7 @@ pub const DeclGen = struct {
fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void {
const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
- const src_fname_id = try self.spv.resolveSourceFileName(self.decl);
+ const src_fname_id = try self.spv.resolveSourceFileName(self.module.declPtr(self.decl_index));
try self.func.body.emit(self.spv.gpa, .OpLine, .{
.file = src_fname_id,
.line = dbg_stmt.line,
@@ -1106,7 +1114,7 @@ pub const DeclGen = struct {
assert(as.errors.items.len != 0);
assert(self.error_msg == null);
const loc = LazySrcLoc.nodeOffset(0);
- const src_loc = loc.toSrcLoc(self.decl);
+ const src_loc = loc.toSrcLoc(self.module.declPtr(self.decl_index));
self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{});
const notes = try self.module.gpa.alloc(Module.ErrorMsg, as.errors.items.len);
diff --git a/src/link.zig b/src/link.zig
index 976debb72b..2b3ce51667 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -261,39 +261,6 @@ pub const File = struct {
/// of this linking operation.
lock: ?Cache.Lock = null,
- pub const LinkBlock = union {
- elf: Elf.TextBlock,
- coff: Coff.Atom,
- macho: MachO.Atom,
- plan9: Plan9.DeclBlock,
- c: void,
- wasm: Wasm.DeclBlock,
- spirv: void,
- nvptx: void,
- };
-
- pub const LinkFn = union {
- elf: Dwarf.SrcFn,
- coff: Coff.SrcFn,
- macho: Dwarf.SrcFn,
- plan9: void,
- c: void,
- wasm: Wasm.FnData,
- spirv: SpirV.FnData,
- nvptx: void,
- };
-
- pub const Export = union {
- elf: Elf.Export,
- coff: Coff.Export,
- macho: MachO.Export,
- plan9: Plan9.Export,
- c: void,
- wasm: Wasm.Export,
- spirv: void,
- nvptx: void,
- };
-
/// Attempts incremental linking, if the file already exists. If
/// incremental linking fails, falls back to truncating the file and
/// rewriting it. A malicious file is detected as incremental link failure
@@ -533,8 +500,7 @@ pub const File = struct {
}
}
- /// May be called before or after updateDeclExports but must be called
- /// after allocateDeclIndexes for any given Decl.
+ /// May be called before or after updateDeclExports for any given Decl.
pub fn updateDecl(base: *File, module: *Module, decl_index: Module.Decl.Index) UpdateDeclError!void {
const decl = module.declPtr(decl_index);
log.debug("updateDecl {*} ({s}), type={}", .{ decl, decl.name, decl.ty.fmtDebug() });
@@ -557,8 +523,7 @@ pub const File = struct {
}
}
- /// May be called before or after updateDeclExports but must be called
- /// after allocateDeclIndexes for any given Decl.
+ /// May be called before or after updateDeclExports for any given Decl.
pub fn updateFunc(base: *File, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) UpdateDeclError!void {
const owner_decl = module.declPtr(func.owner_decl);
log.debug("updateFunc {*} ({s}), type={}", .{
@@ -582,48 +547,27 @@ pub const File = struct {
}
}
- pub fn updateDeclLineNumber(base: *File, module: *Module, decl: *Module.Decl) UpdateDeclError!void {
+ pub fn updateDeclLineNumber(base: *File, module: *Module, decl_index: Module.Decl.Index) UpdateDeclError!void {
+ const decl = module.declPtr(decl_index);
log.debug("updateDeclLineNumber {*} ({s}), line={}", .{
decl, decl.name, decl.src_line + 1,
});
assert(decl.has_tv);
if (build_options.only_c) {
assert(base.tag == .c);
- return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl);
+ return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl_index);
}
switch (base.tag) {
- .coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl),
- .elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl),
- .macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl),
- .c => return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl),
- .wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclLineNumber(module, decl),
- .plan9 => return @fieldParentPtr(Plan9, "base", base).updateDeclLineNumber(module, decl),
+ .coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl_index),
+ .elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl_index),
+ .macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl_index),
+ .c => return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl_index),
+ .wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclLineNumber(module, decl_index),
+ .plan9 => return @fieldParentPtr(Plan9, "base", base).updateDeclLineNumber(module, decl_index),
.spirv, .nvptx => {},
}
}
- /// Must be called before any call to updateDecl or updateDeclExports for
- /// any given Decl.
- /// TODO we're transitioning to deleting this function and instead having
- /// each linker backend notice the first time updateDecl or updateFunc is called, or
- /// a callee referenced from AIR.
- pub fn allocateDeclIndexes(base: *File, decl_index: Module.Decl.Index) error{OutOfMemory}!void {
- const decl = base.options.module.?.declPtr(decl_index);
- log.debug("allocateDeclIndexes {*} ({s})", .{ decl, decl.name });
- if (build_options.only_c) {
- assert(base.tag == .c);
- return;
- }
- switch (base.tag) {
- .coff => return @fieldParentPtr(Coff, "base", base).allocateDeclIndexes(decl_index),
- .elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl_index),
- .macho => return @fieldParentPtr(MachO, "base", base).allocateDeclIndexes(decl_index),
- .wasm => return @fieldParentPtr(Wasm, "base", base).allocateDeclIndexes(decl_index),
- .plan9 => return @fieldParentPtr(Plan9, "base", base).allocateDeclIndexes(decl_index),
- .c, .spirv, .nvptx => {},
- }
- }
-
pub fn releaseLock(self: *File) void {
if (self.lock) |*lock| {
lock.release();
@@ -874,8 +818,7 @@ pub const File = struct {
AnalysisFail,
};
- /// May be called before or after updateDecl, but must be called after
- /// allocateDeclIndexes for any given Decl.
+ /// May be called before or after updateDecl for any given Decl.
pub fn updateDeclExports(
base: *File,
module: *Module,
@@ -911,6 +854,8 @@ pub const File = struct {
/// The linker is passed information about the containing atom, `parent_atom_index`, and offset within it's
/// memory buffer, `offset`, so that it can make a note of potential relocation sites, should the
/// `Decl`'s address was not yet resolved, or the containing atom gets moved in virtual memory.
+ /// May be called before or after updateFunc/updateDecl therefore it is up to the linker to allocate
+ /// the block/atom.
pub fn getDeclVAddr(base: *File, decl_index: Module.Decl.Index, reloc_info: RelocInfo) !u64 {
if (build_options.only_c) unreachable;
switch (base.tag) {
diff --git a/src/link/C.zig b/src/link/C.zig
index 8b05b8b22d..02e5cadfbc 100644
--- a/src/link/C.zig
+++ b/src/link/C.zig
@@ -219,12 +219,12 @@ pub fn updateDecl(self: *C, module: *Module, decl_index: Module.Decl.Index) !voi
code.shrinkAndFree(module.gpa, code.items.len);
}
-pub fn updateDeclLineNumber(self: *C, module: *Module, decl: *Module.Decl) !void {
+pub fn updateDeclLineNumber(self: *C, module: *Module, decl_index: Module.Decl.Index) !void {
// The C backend does not have the ability to fix line numbers without re-generating
// the entire Decl.
_ = self;
_ = module;
- _ = decl;
+ _ = decl_index;
}
pub fn flush(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) !void {
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index 9c05114a1f..2922e783e1 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -79,13 +79,13 @@ entry_addr: ?u32 = null,
/// We store them here so that we can properly dispose of any allocated
/// memory within the atom in the incremental linker.
/// TODO consolidate this.
-decls: std.AutoHashMapUnmanaged(Module.Decl.Index, ?u16) = .{},
+decls: std.AutoHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{},
/// List of atoms that are either synthetic or map directly to the Zig source program.
-managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
+atoms: std.ArrayListUnmanaged(Atom) = .{},
/// Table of atoms indexed by the symbol index.
-atom_by_index_table: std.AutoHashMapUnmanaged(u32, *Atom) = .{},
+atom_by_index_table: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{},
/// Table of unnamed constants associated with a parent `Decl`.
/// We store them here so that we can free the constants whenever the `Decl`
@@ -124,9 +124,9 @@ const Entry = struct {
sym_index: u32,
};
-const RelocTable = std.AutoHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(Relocation));
-const BaseRelocationTable = std.AutoHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(u32));
-const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(*Atom));
+const RelocTable = std.AutoHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Relocation));
+const BaseRelocationTable = std.AutoHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(u32));
+const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(Atom.Index));
const default_file_alignment: u16 = 0x200;
const default_size_of_stack_reserve: u32 = 0x1000000;
@@ -137,7 +137,7 @@ const default_size_of_heap_commit: u32 = 0x1000;
const Section = struct {
header: coff.SectionHeader,
- last_atom: ?*Atom = null,
+ last_atom_index: ?Atom.Index = null,
/// A list of atoms that have surplus capacity. This list can have false
/// positives, as functions grow and shrink over time, only sometimes being added
@@ -154,7 +154,34 @@ const Section = struct {
/// overcapacity can be negative. A simple way to have negative overcapacity is to
/// allocate a fresh atom, which will have ideal capacity, and then grow it
/// by 1 byte. It will then have -1 overcapacity.
- free_list: std.ArrayListUnmanaged(*Atom) = .{},
+ free_list: std.ArrayListUnmanaged(Atom.Index) = .{},
+};
+
+const DeclMetadata = struct {
+ atom: Atom.Index,
+ section: u16,
+ /// A list of all exports aliases of this Decl.
+ exports: std.ArrayListUnmanaged(u32) = .{},
+
+ fn getExport(m: DeclMetadata, coff_file: *const Coff, name: []const u8) ?u32 {
+ for (m.exports.items) |exp| {
+ if (mem.eql(u8, name, coff_file.getSymbolName(.{
+ .sym_index = exp,
+ .file = null,
+ }))) return exp;
+ }
+ return null;
+ }
+
+ fn getExportPtr(m: *DeclMetadata, coff_file: *Coff, name: []const u8) ?*u32 {
+ for (m.exports.items) |*exp| {
+ if (mem.eql(u8, name, coff_file.getSymbolName(.{
+ .sym_index = exp.*,
+ .file = null,
+ }))) return exp;
+ }
+ return null;
+ }
};
pub const PtrWidth = enum {
@@ -168,11 +195,6 @@ pub const PtrWidth = enum {
};
}
};
-pub const SrcFn = void;
-
-pub const Export = struct {
- sym_index: ?u32 = null,
-};
pub const SymbolWithLoc = struct {
// Index into the respective symbol table.
@@ -271,11 +293,7 @@ pub fn deinit(self: *Coff) void {
}
self.sections.deinit(gpa);
- for (self.managed_atoms.items) |atom| {
- gpa.destroy(atom);
- }
- self.managed_atoms.deinit(gpa);
-
+ self.atoms.deinit(gpa);
self.locals.deinit(gpa);
self.globals.deinit(gpa);
@@ -297,7 +315,15 @@ pub fn deinit(self: *Coff) void {
self.imports.deinit(gpa);
self.imports_free_list.deinit(gpa);
self.imports_table.deinit(gpa);
- self.decls.deinit(gpa);
+
+ {
+ var it = self.decls.iterator();
+ while (it.next()) |entry| {
+ entry.value_ptr.exports.deinit(gpa);
+ }
+ self.decls.deinit(gpa);
+ }
+
self.atom_by_index_table.deinit(gpa);
{
@@ -461,17 +487,18 @@ fn growSectionVM(self: *Coff, sect_id: u32, needed_size: u32) !void {
// TODO: enforce order by increasing VM addresses in self.sections container.
// This is required by the loader anyhow as far as I can tell.
for (self.sections.items(.header)[sect_id + 1 ..]) |*next_header, next_sect_id| {
- const maybe_last_atom = &self.sections.items(.last_atom)[sect_id + 1 + next_sect_id];
+ const maybe_last_atom_index = self.sections.items(.last_atom_index)[sect_id + 1 + next_sect_id];
next_header.virtual_address += diff;
- if (maybe_last_atom.*) |last_atom| {
- var atom = last_atom;
+ if (maybe_last_atom_index) |last_atom_index| {
+ var atom_index = last_atom_index;
while (true) {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbolPtr(self);
sym.value += diff;
- if (atom.prev) |prev| {
- atom = prev;
+ if (atom.prev_index) |prev_index| {
+ atom_index = prev_index;
} else break;
}
}
@@ -480,24 +507,15 @@ fn growSectionVM(self: *Coff, sect_id: u32, needed_size: u32) !void {
header.virtual_size = increased_size;
}
-pub fn allocateDeclIndexes(self: *Coff, decl_index: Module.Decl.Index) !void {
- if (self.llvm_object) |_| return;
- const decl = self.base.options.module.?.declPtr(decl_index);
- if (decl.link.coff.sym_index != 0) return;
- decl.link.coff.sym_index = try self.allocateSymbol();
- const gpa = self.base.allocator;
- try self.atom_by_index_table.putNoClobber(gpa, decl.link.coff.sym_index, &decl.link.coff);
- try self.decls.putNoClobber(gpa, decl_index, null);
-}
-
-fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u32 {
+fn allocateAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 {
const tracy = trace(@src());
defer tracy.end();
+ const atom = self.getAtom(atom_index);
const sect_id = @enumToInt(atom.getSymbol(self).section_number) - 1;
const header = &self.sections.items(.header)[sect_id];
const free_list = &self.sections.items(.free_list)[sect_id];
- const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
const new_atom_ideal_capacity = if (header.isCode()) padToIdeal(new_atom_size) else new_atom_size;
// We use these to indicate our intention to update metadata, placing the new atom,
@@ -505,7 +523,7 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
// It would be simpler to do it inside the for loop below, but that would cause a
// problem if an error was returned later in the function. So this action
// is actually carried out at the end of the function, when errors are no longer possible.
- var atom_placement: ?*Atom = null;
+ var atom_placement: ?Atom.Index = null;
var free_list_removal: ?usize = null;
// First we look for an appropriately sized free list node.
@@ -513,7 +531,8 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
var vaddr = blk: {
var i: usize = 0;
while (i < free_list.items.len) {
- const big_atom = free_list.items[i];
+ const big_atom_index = free_list.items[i];
+ const big_atom = self.getAtom(big_atom_index);
// We now have a pointer to a live atom that has too much capacity.
// Is it enough that we could fit this new atom?
const sym = big_atom.getSymbol(self);
@@ -541,34 +560,43 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
const keep_free_list_node = remaining_capacity >= min_text_capacity;
// Set up the metadata to be updated, after errors are no longer possible.
- atom_placement = big_atom;
+ atom_placement = big_atom_index;
if (!keep_free_list_node) {
free_list_removal = i;
}
break :blk new_start_vaddr;
- } else if (maybe_last_atom.*) |last| {
+ } else if (maybe_last_atom_index.*) |last_index| {
+ const last = self.getAtom(last_index);
const last_symbol = last.getSymbol(self);
const ideal_capacity = if (header.isCode()) padToIdeal(last.size) else last.size;
const ideal_capacity_end_vaddr = last_symbol.value + ideal_capacity;
const new_start_vaddr = mem.alignForwardGeneric(u32, ideal_capacity_end_vaddr, alignment);
- atom_placement = last;
+ atom_placement = last_index;
break :blk new_start_vaddr;
} else {
break :blk mem.alignForwardGeneric(u32, header.virtual_address, alignment);
}
};
- const expand_section = atom_placement == null or atom_placement.?.next == null;
+ const expand_section = if (atom_placement) |placement_index|
+ self.getAtom(placement_index).next_index == null
+ else
+ true;
if (expand_section) {
const sect_capacity = self.allocatedSize(header.pointer_to_raw_data);
const needed_size: u32 = (vaddr + new_atom_size) - header.virtual_address;
if (needed_size > sect_capacity) {
const new_offset = self.findFreeSpace(needed_size, default_file_alignment);
- const current_size = if (maybe_last_atom.*) |last_atom| blk: {
+ const current_size = if (maybe_last_atom_index.*) |last_atom_index| blk: {
+ const last_atom = self.getAtom(last_atom_index);
const sym = last_atom.getSymbol(self);
break :blk (sym.value + last_atom.size) - header.virtual_address;
} else 0;
- log.debug("moving {s} from 0x{x} to 0x{x}", .{ self.getSectionName(header), header.pointer_to_raw_data, new_offset });
+ log.debug("moving {s} from 0x{x} to 0x{x}", .{
+ self.getSectionName(header),
+ header.pointer_to_raw_data,
+ new_offset,
+ });
const amt = try self.base.file.?.copyRangeAll(
header.pointer_to_raw_data,
self.base.file.?,
@@ -587,26 +615,34 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
header.virtual_size = @max(header.virtual_size, needed_size);
header.size_of_raw_data = needed_size;
- maybe_last_atom.* = atom;
+ maybe_last_atom_index.* = atom_index;
}
- atom.size = new_atom_size;
- atom.alignment = alignment;
+ {
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.size = new_atom_size;
+ atom_ptr.alignment = alignment;
+ }
- if (atom.prev) |prev| {
- prev.next = atom.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
}
- if (atom.next) |next| {
- next.prev = atom.prev;
+ if (atom.next_index) |next_index| {
+ const next = self.getAtomPtr(next_index);
+ next.prev_index = atom.prev_index;
}
- if (atom_placement) |big_atom| {
- atom.prev = big_atom;
- atom.next = big_atom.next;
- big_atom.next = atom;
+ if (atom_placement) |big_atom_index| {
+ const big_atom = self.getAtomPtr(big_atom_index);
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = big_atom_index;
+ atom_ptr.next_index = big_atom.next_index;
+ big_atom.next_index = atom_index;
} else {
- atom.prev = null;
- atom.next = null;
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = null;
+ atom_ptr.next_index = null;
}
if (free_list_removal) |i| {
_ = free_list.swapRemove(i);
@@ -615,7 +651,7 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
return vaddr;
}
-fn allocateSymbol(self: *Coff) !u32 {
+pub fn allocateSymbol(self: *Coff) !u32 {
const gpa = self.base.allocator;
try self.locals.ensureUnusedCapacity(gpa, 1);
@@ -711,25 +747,37 @@ pub fn allocateImportEntry(self: *Coff, target: SymbolWithLoc) !u32 {
return index;
}
-fn createGotAtom(self: *Coff, target: SymbolWithLoc) !*Atom {
+pub fn createAtom(self: *Coff) !Atom.Index {
const gpa = self.base.allocator;
- const atom = try gpa.create(Atom);
- errdefer gpa.destroy(atom);
- atom.* = Atom.empty;
- atom.sym_index = try self.allocateSymbol();
+ const atom_index = @intCast(Atom.Index, self.atoms.items.len);
+ const atom = try self.atoms.addOne(gpa);
+ const sym_index = try self.allocateSymbol();
+ try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index);
+ atom.* = .{
+ .sym_index = sym_index,
+ .file = null,
+ .size = 0,
+ .alignment = 0,
+ .prev_index = null,
+ .next_index = null,
+ };
+ log.debug("creating ATOM(%{d}) at index {d}", .{ sym_index, atom_index });
+ return atom_index;
+}
+
+fn createGotAtom(self: *Coff, target: SymbolWithLoc) !Atom.Index {
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
- try self.managed_atoms.append(gpa, atom);
- try self.atom_by_index_table.putNoClobber(gpa, atom.sym_index, atom);
-
const sym = atom.getSymbolPtr(self);
sym.section_number = @intToEnum(coff.SectionNumber, self.got_section_index.? + 1);
- sym.value = try self.allocateAtom(atom, atom.size, atom.alignment);
+ sym.value = try self.allocateAtom(atom_index, atom.size, atom.alignment);
log.debug("allocated GOT atom at 0x{x}", .{sym.value});
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = .direct,
.target = target,
.offset = 0,
@@ -743,67 +791,67 @@ fn createGotAtom(self: *Coff, target: SymbolWithLoc) !*Atom {
.UNDEFINED => @panic("TODO generate a binding for undefined GOT target"),
.ABSOLUTE => {},
.DEBUG => unreachable, // not possible
- else => try atom.addBaseRelocation(self, 0),
+ else => try Atom.addBaseRelocation(self, atom_index, 0),
}
- return atom;
+ return atom_index;
}
-fn createImportAtom(self: *Coff) !*Atom {
- const gpa = self.base.allocator;
- const atom = try gpa.create(Atom);
- errdefer gpa.destroy(atom);
- atom.* = Atom.empty;
- atom.sym_index = try self.allocateSymbol();
+fn createImportAtom(self: *Coff) !Atom.Index {
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
- try self.managed_atoms.append(gpa, atom);
- try self.atom_by_index_table.putNoClobber(gpa, atom.sym_index, atom);
-
const sym = atom.getSymbolPtr(self);
sym.section_number = @intToEnum(coff.SectionNumber, self.idata_section_index.? + 1);
- sym.value = try self.allocateAtom(atom, atom.size, atom.alignment);
+ sym.value = try self.allocateAtom(atom_index, atom.size, atom.alignment);
log.debug("allocated import atom at 0x{x}", .{sym.value});
- return atom;
+ return atom_index;
}
-fn growAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u32 {
+fn growAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const align_ok = mem.alignBackwardGeneric(u32, sym.value, alignment) == sym.value;
const need_realloc = !align_ok or new_atom_size > atom.capacity(self);
if (!need_realloc) return sym.value;
- return self.allocateAtom(atom, new_atom_size, alignment);
+ return self.allocateAtom(atom_index, new_atom_size, alignment);
}
-fn shrinkAtom(self: *Coff, atom: *Atom, new_block_size: u32) void {
+fn shrinkAtom(self: *Coff, atom_index: Atom.Index, new_block_size: u32) void {
_ = self;
- _ = atom;
+ _ = atom_index;
_ = new_block_size;
// TODO check the new capacity, and if it crosses the size threshold into a big enough
// capacity, insert a free list node for it.
}
-fn writeAtom(self: *Coff, atom: *Atom, code: []const u8) !void {
+fn writeAtom(self: *Coff, atom_index: Atom.Index, code: []const u8) !void {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const section = self.sections.get(@enumToInt(sym.section_number) - 1);
const file_offset = section.header.pointer_to_raw_data + sym.value - section.header.virtual_address;
- log.debug("writing atom for symbol {s} at file offset 0x{x} to 0x{x}", .{ atom.getName(self), file_offset, file_offset + code.len });
+ log.debug("writing atom for symbol {s} at file offset 0x{x} to 0x{x}", .{
+ atom.getName(self),
+ file_offset,
+ file_offset + code.len,
+ });
try self.base.file.?.pwriteAll(code, file_offset);
- try self.resolveRelocs(atom);
+ try self.resolveRelocs(atom_index);
}
-fn writePtrWidthAtom(self: *Coff, atom: *Atom) !void {
+fn writePtrWidthAtom(self: *Coff, atom_index: Atom.Index) !void {
switch (self.ptr_width) {
.p32 => {
var buffer: [@sizeOf(u32)]u8 = [_]u8{0} ** @sizeOf(u32);
- try self.writeAtom(atom, &buffer);
+ try self.writeAtom(atom_index, &buffer);
},
.p64 => {
var buffer: [@sizeOf(u64)]u8 = [_]u8{0} ** @sizeOf(u64);
- try self.writeAtom(atom, &buffer);
+ try self.writeAtom(atom_index, &buffer);
},
}
}
@@ -823,7 +871,8 @@ fn markRelocsDirtyByAddress(self: *Coff, addr: u32) void {
var it = self.relocs.valueIterator();
while (it.next()) |relocs| {
for (relocs.items) |*reloc| {
- const target_atom = reloc.getTargetAtom(self) orelse continue;
+ const target_atom_index = reloc.getTargetAtomIndex(self) orelse continue;
+ const target_atom = self.getAtom(target_atom_index);
const target_sym = target_atom.getSymbol(self);
if (target_sym.value < addr) continue;
reloc.dirty = true;
@@ -831,23 +880,26 @@ fn markRelocsDirtyByAddress(self: *Coff, addr: u32) void {
}
}
-fn resolveRelocs(self: *Coff, atom: *Atom) !void {
- const relocs = self.relocs.get(atom) orelse return;
+fn resolveRelocs(self: *Coff, atom_index: Atom.Index) !void {
+ const relocs = self.relocs.get(atom_index) orelse return;
- log.debug("relocating '{s}'", .{atom.getName(self)});
+ log.debug("relocating '{s}'", .{self.getAtom(atom_index).getName(self)});
for (relocs.items) |*reloc| {
if (!reloc.dirty) continue;
- try reloc.resolve(atom, self);
+ try reloc.resolve(atom_index, self);
}
}
-fn freeAtom(self: *Coff, atom: *Atom) void {
- log.debug("freeAtom {*}", .{atom});
+fn freeAtom(self: *Coff, atom_index: Atom.Index) void {
+ log.debug("freeAtom {d}", .{atom_index});
+
+ const gpa = self.base.allocator;
// Remove any relocs and base relocs associated with this Atom
- self.freeRelocationsForAtom(atom);
+ Atom.freeRelocations(self, atom_index);
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const sect_id = @enumToInt(sym.section_number) - 1;
const free_list = &self.sections.items(.free_list)[sect_id];
@@ -856,46 +908,69 @@ fn freeAtom(self: *Coff, atom: *Atom) void {
var i: usize = 0;
// TODO turn free_list into a hash map
while (i < free_list.items.len) {
- if (free_list.items[i] == atom) {
+ if (free_list.items[i] == atom_index) {
_ = free_list.swapRemove(i);
continue;
}
- if (free_list.items[i] == atom.prev) {
+ if (free_list.items[i] == atom.prev_index) {
already_have_free_list_node = true;
}
i += 1;
}
}
- const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
- if (maybe_last_atom.*) |last_atom| {
- if (last_atom == atom) {
- if (atom.prev) |prev| {
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
+ if (maybe_last_atom_index.*) |last_atom_index| {
+ if (last_atom_index == atom_index) {
+ if (atom.prev_index) |prev_index| {
// TODO shrink the section size here
- maybe_last_atom.* = prev;
+ maybe_last_atom_index.* = prev_index;
} else {
- maybe_last_atom.* = null;
+ maybe_last_atom_index.* = null;
}
}
}
- if (atom.prev) |prev| {
- prev.next = atom.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
- if (!already_have_free_list_node and prev.freeListEligible(self)) {
+ if (!already_have_free_list_node and prev.*.freeListEligible(self)) {
// The free list is heuristics, it doesn't have to be perfect, so we can
// ignore the OOM here.
- free_list.append(self.base.allocator, prev) catch {};
+ free_list.append(gpa, prev_index) catch {};
}
} else {
- atom.prev = null;
+ self.getAtomPtr(atom_index).prev_index = null;
}
- if (atom.next) |next| {
- next.prev = atom.prev;
+ if (atom.next_index) |next_index| {
+ self.getAtomPtr(next_index).prev_index = atom.prev_index;
} else {
- atom.next = null;
+ self.getAtomPtr(atom_index).next_index = null;
+ }
+
+ // Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
+ const sym_index = atom.getSymbolIndex().?;
+ self.locals_free_list.append(gpa, sym_index) catch {};
+
+ // Try freeing GOT atom if this decl had one
+ const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
+ if (self.got_entries_table.get(got_target)) |got_index| {
+ self.got_entries_free_list.append(gpa, @intCast(u32, got_index)) catch {};
+ self.got_entries.items[got_index] = .{
+ .target = .{ .sym_index = 0, .file = null },
+ .sym_index = 0,
+ };
+ _ = self.got_entries_table.remove(got_target);
+
+ log.debug(" adding GOT index {d} to free list (target local@{d})", .{ got_index, sym_index });
}
+
+ self.locals.items[sym_index].section_number = .UNDEFINED;
+ _ = self.atom_by_index_table.remove(sym_index);
+ log.debug(" adding local symbol index {d} to free list", .{sym_index});
+ self.getAtomPtr(atom_index).sym_index = 0;
}
pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
@@ -912,8 +987,10 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
+
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
self.freeUnnamedConsts(decl_index);
- self.freeRelocationsForAtom(&decl.link.coff);
+ Atom.freeRelocations(self, atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -928,7 +1005,7 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live
.none,
);
const code = switch (res) {
- .appended => code_buffer.items,
+ .ok => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em);
@@ -957,12 +1034,8 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
}
const unnamed_consts = gop.value_ptr;
- const atom = try gpa.create(Atom);
- errdefer gpa.destroy(atom);
- atom.* = Atom.empty;
+ const atom_index = try self.createAtom();
- atom.sym_index = try self.allocateSymbol();
- const sym = atom.getSymbolPtr(self);
const sym_name = blk: {
const decl_name = try decl.getFullyQualifiedName(mod);
defer gpa.free(decl_name);
@@ -971,18 +1044,18 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
break :blk try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index });
};
defer gpa.free(sym_name);
- try self.setSymbolName(sym, sym_name);
- sym.section_number = @intToEnum(coff.SectionNumber, self.rdata_section_index.? + 1);
-
- try self.managed_atoms.append(gpa, atom);
- try self.atom_by_index_table.putNoClobber(gpa, atom.sym_index, atom);
+ {
+ const atom = self.getAtom(atom_index);
+ const sym = atom.getSymbolPtr(self);
+ try self.setSymbolName(sym, sym_name);
+ sym.section_number = @intToEnum(coff.SectionNumber, self.rdata_section_index.? + 1);
+ }
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), tv, &code_buffer, .none, .{
- .parent_atom_index = atom.sym_index,
+ .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?,
});
const code = switch (res) {
- .externally_managed => |x| x,
- .appended => code_buffer.items,
+ .ok => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try mod.failed_decls.put(mod.gpa, decl_index, em);
@@ -992,19 +1065,20 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
};
const required_alignment = tv.ty.abiAlignment(self.base.options.target);
+ const atom = self.getAtomPtr(atom_index);
atom.alignment = required_alignment;
atom.size = @intCast(u32, code.len);
- sym.value = try self.allocateAtom(atom, atom.size, atom.alignment);
- errdefer self.freeAtom(atom);
+ atom.getSymbolPtr(self).value = try self.allocateAtom(atom_index, atom.size, atom.alignment);
+ errdefer self.freeAtom(atom_index);
- try unnamed_consts.append(gpa, atom);
+ try unnamed_consts.append(gpa, atom_index);
- log.debug("allocated atom for {s} at 0x{x}", .{ sym_name, sym.value });
+ log.debug("allocated atom for {s} at 0x{x}", .{ sym_name, atom.getSymbol(self).value });
log.debug(" (required alignment 0x{x})", .{required_alignment});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
- return atom.sym_index;
+ return atom.getSymbolIndex().?;
}
pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !void {
@@ -1029,7 +1103,9 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
}
}
- self.freeRelocationsForAtom(&decl.link.coff);
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ Atom.freeRelocations(self, atom_index);
+ const atom = self.getAtom(atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -1039,11 +1115,10 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .none, .{
- .parent_atom_index = decl.link.coff.sym_index,
+ .parent_atom_index = atom.getSymbolIndex().?,
});
const code = switch (res) {
- .externally_managed => |x| x,
- .appended => code_buffer.items,
+ .ok => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em);
@@ -1058,7 +1133,20 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
}
-fn getDeclOutputSection(self: *Coff, decl: *Module.Decl) u16 {
+pub fn getOrCreateAtomForDecl(self: *Coff, decl_index: Module.Decl.Index) !Atom.Index {
+ const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{
+ .atom = try self.createAtom(),
+ .section = self.getDeclOutputSection(decl_index),
+ .exports = .{},
+ };
+ }
+ return gop.value_ptr.atom;
+}
+
+fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 {
+ const decl = self.base.options.module.?.declPtr(decl_index);
const ty = decl.ty;
const zig_ty = ty.zigTypeTag();
const val = decl.val;
@@ -1093,15 +1181,12 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8,
log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
const required_alignment = decl.getAlignment(self.base.options.target);
- const decl_ptr = self.decls.getPtr(decl_index).?;
- if (decl_ptr.* == null) {
- decl_ptr.* = self.getDeclOutputSection(decl);
- }
- const sect_index = decl_ptr.*.?;
-
+ const decl_metadata = self.decls.get(decl_index).?;
+ const atom_index = decl_metadata.atom;
+ const atom = self.getAtom(atom_index);
+ const sect_index = decl_metadata.section;
const code_len = @intCast(u32, code.len);
- const atom = &decl.link.coff;
- assert(atom.sym_index != 0); // Caller forgot to allocateDeclIndexes()
+
if (atom.size != 0) {
const sym = atom.getSymbolPtr(self);
try self.setSymbolName(sym, decl_name);
@@ -1111,62 +1196,51 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8,
const capacity = atom.capacity(self);
const need_realloc = code.len > capacity or !mem.isAlignedGeneric(u64, sym.value, required_alignment);
if (need_realloc) {
- const vaddr = try self.growAtom(atom, code_len, required_alignment);
+ const vaddr = try self.growAtom(atom_index, code_len, required_alignment);
log.debug("growing {s} from 0x{x} to 0x{x}", .{ decl_name, sym.value, vaddr });
log.debug(" (required alignment 0x{x}", .{required_alignment});
if (vaddr != sym.value) {
sym.value = vaddr;
log.debug(" (updating GOT entry)", .{});
- const got_target = SymbolWithLoc{ .sym_index = atom.sym_index, .file = null };
- const got_atom = self.getGotAtomForSymbol(got_target).?;
+ const got_target = SymbolWithLoc{ .sym_index = atom.getSymbolIndex().?, .file = null };
+ const got_atom_index = self.getGotAtomIndexForSymbol(got_target).?;
self.markRelocsDirtyByTarget(got_target);
- try self.writePtrWidthAtom(got_atom);
+ try self.writePtrWidthAtom(got_atom_index);
}
} else if (code_len < atom.size) {
- self.shrinkAtom(atom, code_len);
+ self.shrinkAtom(atom_index, code_len);
}
- atom.size = code_len;
+ self.getAtomPtr(atom_index).size = code_len;
} else {
const sym = atom.getSymbolPtr(self);
try self.setSymbolName(sym, decl_name);
sym.section_number = @intToEnum(coff.SectionNumber, sect_index + 1);
sym.type = .{ .complex_type = complex_type, .base_type = .NULL };
- const vaddr = try self.allocateAtom(atom, code_len, required_alignment);
- errdefer self.freeAtom(atom);
+ const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment);
+ errdefer self.freeAtom(atom_index);
log.debug("allocated atom for {s} at 0x{x}", .{ decl_name, vaddr });
- atom.size = code_len;
+ self.getAtomPtr(atom_index).size = code_len;
sym.value = vaddr;
- const got_target = SymbolWithLoc{ .sym_index = atom.sym_index, .file = null };
+ const got_target = SymbolWithLoc{ .sym_index = atom.getSymbolIndex().?, .file = null };
const got_index = try self.allocateGotEntry(got_target);
- const got_atom = try self.createGotAtom(got_target);
- self.got_entries.items[got_index].sym_index = got_atom.sym_index;
- try self.writePtrWidthAtom(got_atom);
+ const got_atom_index = try self.createGotAtom(got_target);
+ const got_atom = self.getAtom(got_atom_index);
+ self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?;
+ try self.writePtrWidthAtom(got_atom_index);
}
self.markRelocsDirtyByTarget(atom.getSymbolWithLoc());
- try self.writeAtom(atom, code);
-}
-
-fn freeRelocationsForAtom(self: *Coff, atom: *Atom) void {
- var removed_relocs = self.relocs.fetchRemove(atom);
- if (removed_relocs) |*relocs| relocs.value.deinit(self.base.allocator);
- var removed_base_relocs = self.base_relocs.fetchRemove(atom);
- if (removed_base_relocs) |*base_relocs| base_relocs.value.deinit(self.base.allocator);
+ try self.writeAtom(atom_index, code);
}
fn freeUnnamedConsts(self: *Coff, decl_index: Module.Decl.Index) void {
const gpa = self.base.allocator;
const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return;
- for (unnamed_consts.items) |atom| {
- self.freeAtom(atom);
- self.locals_free_list.append(gpa, atom.sym_index) catch {};
- self.locals.items[atom.sym_index].section_number = .UNDEFINED;
- _ = self.atom_by_index_table.remove(atom.sym_index);
- log.debug(" adding local symbol index {d} to free list", .{atom.sym_index});
- atom.sym_index = 0;
+ for (unnamed_consts.items) |atom_index| {
+ self.freeAtom(atom_index);
}
unnamed_consts.clearAndFree(gpa);
}
@@ -1181,35 +1255,11 @@ pub fn freeDecl(self: *Coff, decl_index: Module.Decl.Index) void {
log.debug("freeDecl {*}", .{decl});
- const kv = self.decls.fetchRemove(decl_index);
- if (kv.?.value) |_| {
- self.freeAtom(&decl.link.coff);
+ if (self.decls.fetchRemove(decl_index)) |const_kv| {
+ var kv = const_kv;
+ self.freeAtom(kv.value.atom);
self.freeUnnamedConsts(decl_index);
- }
-
- // Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
- const gpa = self.base.allocator;
- const sym_index = decl.link.coff.sym_index;
- if (sym_index != 0) {
- self.locals_free_list.append(gpa, sym_index) catch {};
-
- // Try freeing GOT atom if this decl had one
- const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
- if (self.got_entries_table.get(got_target)) |got_index| {
- self.got_entries_free_list.append(gpa, @intCast(u32, got_index)) catch {};
- self.got_entries.items[got_index] = .{
- .target = .{ .sym_index = 0, .file = null },
- .sym_index = 0,
- };
- _ = self.got_entries_table.remove(got_target);
-
- log.debug(" adding GOT index {d} to free list (target local@{d})", .{ got_index, sym_index });
- }
-
- self.locals.items[sym_index].section_number = .UNDEFINED;
- _ = self.atom_by_index_table.remove(sym_index);
- log.debug(" adding local symbol index {d} to free list", .{sym_index});
- decl.link.coff.sym_index = 0;
+ kv.value.exports.deinit(self.base.allocator);
}
}
@@ -1262,9 +1312,10 @@ pub fn updateDeclExports(
const gpa = self.base.allocator;
const decl = module.declPtr(decl_index);
- const atom = &decl.link.coff;
- if (atom.sym_index == 0) return;
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const atom = self.getAtom(atom_index);
const decl_sym = atom.getSymbol(self);
+ const decl_metadata = self.decls.getPtr(decl_index).?;
for (exports) |exp| {
log.debug("adding new export '{s}'", .{exp.options.name});
@@ -1299,9 +1350,9 @@ pub fn updateDeclExports(
continue;
}
- const sym_index = exp.link.coff.sym_index orelse blk: {
+ const sym_index = decl_metadata.getExport(self, exp.options.name) orelse blk: {
const sym_index = try self.allocateSymbol();
- exp.link.coff.sym_index = sym_index;
+ try decl_metadata.exports.append(gpa, sym_index);
break :blk sym_index;
};
const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
@@ -1324,16 +1375,15 @@ pub fn updateDeclExports(
}
}
-pub fn deleteExport(self: *Coff, exp: Export) void {
+pub fn deleteDeclExport(self: *Coff, decl_index: Module.Decl.Index, name: []const u8) void {
if (self.llvm_object) |_| return;
- const sym_index = exp.sym_index orelse return;
+ const metadata = self.decls.getPtr(decl_index) orelse return;
+ const sym_index = metadata.getExportPtr(self, name) orelse return;
const gpa = self.base.allocator;
-
- const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
+ const sym_loc = SymbolWithLoc{ .sym_index = sym_index.*, .file = null };
const sym = self.getSymbolPtr(sym_loc);
- const sym_name = self.getSymbolName(sym_loc);
- log.debug("deleting export '{s}'", .{sym_name});
+ log.debug("deleting export '{s}'", .{name});
assert(sym.storage_class == .EXTERNAL and sym.section_number != .UNDEFINED);
sym.* = .{
.name = [_]u8{0} ** 8,
@@ -1343,9 +1393,9 @@ pub fn deleteExport(self: *Coff, exp: Export) void {
.storage_class = .NULL,
.number_of_aux_symbols = 0,
};
- self.locals_free_list.append(gpa, sym_index) catch {};
+ self.locals_free_list.append(gpa, sym_index.*) catch {};
- if (self.resolver.fetchRemove(sym_name)) |entry| {
+ if (self.resolver.fetchRemove(name)) |entry| {
defer gpa.free(entry.key);
self.globals_free_list.append(gpa, entry.value) catch {};
self.globals.items[entry.value] = .{
@@ -1353,6 +1403,8 @@ pub fn deleteExport(self: *Coff, exp: Export) void {
.file = null,
};
}
+
+ sym_index.* = 0;
}
fn resolveGlobalSymbol(self: *Coff, current: SymbolWithLoc) !void {
@@ -1417,9 +1469,10 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
if (self.imports_table.contains(global)) continue;
const import_index = try self.allocateImportEntry(global);
- const import_atom = try self.createImportAtom();
- self.imports.items[import_index].sym_index = import_atom.sym_index;
- try self.writePtrWidthAtom(import_atom);
+ const import_atom_index = try self.createImportAtom();
+ const import_atom = self.getAtom(import_atom_index);
+ self.imports.items[import_index].sym_index = import_atom.getSymbolIndex().?;
+ try self.writePtrWidthAtom(import_atom_index);
}
if (build_options.enable_logging) {
@@ -1453,20 +1506,14 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
}
}
-pub fn getDeclVAddr(
- self: *Coff,
- decl_index: Module.Decl.Index,
- reloc_info: link.File.RelocInfo,
-) !u64 {
- const mod = self.base.options.module.?;
- const decl = mod.declPtr(decl_index);
-
+pub fn getDeclVAddr(self: *Coff, decl_index: Module.Decl.Index, reloc_info: link.File.RelocInfo) !u64 {
assert(self.llvm_object == null);
- assert(decl.link.coff.sym_index != 0);
- const atom = self.getAtomForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
- const target = SymbolWithLoc{ .sym_index = decl.link.coff.sym_index, .file = null };
- try atom.addRelocation(self, .{
+ const this_atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const sym_index = self.getAtom(this_atom_index).getSymbolIndex().?;
+ const atom_index = self.getAtomIndexForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
+ const target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
+ try Atom.addRelocation(self, atom_index, .{
.type = .direct,
.target = target,
.offset = @intCast(u32, reloc_info.offset),
@@ -1474,7 +1521,7 @@ pub fn getDeclVAddr(
.pcrel = false,
.length = 3,
});
- try atom.addBaseRelocation(self, @intCast(u32, reloc_info.offset));
+ try Atom.addBaseRelocation(self, atom_index, @intCast(u32, reloc_info.offset));
return 0;
}
@@ -1501,10 +1548,10 @@ pub fn getGlobalSymbol(self: *Coff, name: []const u8) !u32 {
return global_index;
}
-pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !void {
+pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !void {
_ = self;
_ = module;
- _ = decl;
+ _ = decl_index;
log.debug("TODO implement updateDeclLineNumber", .{});
}
@@ -1525,7 +1572,8 @@ fn writeBaseRelocations(self: *Coff) !void {
var it = self.base_relocs.iterator();
while (it.next()) |entry| {
- const atom = entry.key_ptr.*;
+ const atom_index = entry.key_ptr.*;
+ const atom = self.getAtom(atom_index);
const offsets = entry.value_ptr.*;
for (offsets.items) |offset| {
@@ -1609,7 +1657,8 @@ fn writeImportTable(self: *Coff) !void {
const gpa = self.base.allocator;
const section = self.sections.get(self.idata_section_index.?);
- const last_atom = section.last_atom orelse return;
+ const last_atom_index = section.last_atom_index orelse return;
+ const last_atom = self.getAtom(last_atom_index);
const iat_rva = section.header.virtual_address;
const iat_size = last_atom.getSymbol(self).value + last_atom.size * 2 - iat_rva; // account for sentinel zero pointer
@@ -2047,27 +2096,37 @@ pub fn getOrPutGlobalPtr(self: *Coff, name: []const u8) !GetOrPutGlobalPtrResult
return GetOrPutGlobalPtrResult{ .found_existing = false, .value_ptr = ptr };
}
+pub fn getAtom(self: *const Coff, atom_index: Atom.Index) Atom {
+ assert(atom_index < self.atoms.items.len);
+ return self.atoms.items[atom_index];
+}
+
+pub fn getAtomPtr(self: *Coff, atom_index: Atom.Index) *Atom {
+ assert(atom_index < self.atoms.items.len);
+ return &self.atoms.items[atom_index];
+}
+
/// Returns atom if there is an atom referenced by the symbol described by `sym_loc` descriptor.
/// Returns null on failure.
-pub fn getAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom {
+pub fn getAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.Index {
assert(sym_loc.file == null); // TODO linking with object files
return self.atom_by_index_table.get(sym_loc.sym_index);
}
/// Returns GOT atom that references `sym_loc` if one exists.
/// Returns null otherwise.
-pub fn getGotAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom {
+pub fn getGotAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.Index {
const got_index = self.got_entries_table.get(sym_loc) orelse return null;
const got_entry = self.got_entries.items[got_index];
- return self.getAtomForSymbol(.{ .sym_index = got_entry.sym_index, .file = null });
+ return self.getAtomIndexForSymbol(.{ .sym_index = got_entry.sym_index, .file = null });
}
/// Returns import atom that references `sym_loc` if one exists.
/// Returns null otherwise.
-pub fn getImportAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom {
+pub fn getImportAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.Index {
const imports_index = self.imports_table.get(sym_loc) orelse return null;
const imports_entry = self.imports.items[imports_index];
- return self.getAtomForSymbol(.{ .sym_index = imports_entry.sym_index, .file = null });
+ return self.getAtomIndexForSymbol(.{ .sym_index = imports_entry.sym_index, .file = null });
}
fn setSectionName(self: *Coff, header: *coff.SectionHeader, name: []const u8) !void {
diff --git a/src/link/Coff/Atom.zig b/src/link/Coff/Atom.zig
index b1bb292c62..80c04a8fa1 100644
--- a/src/link/Coff/Atom.zig
+++ b/src/link/Coff/Atom.zig
@@ -27,42 +27,44 @@ alignment: u32,
/// Points to the previous and next neighbors, based on the `text_offset`.
/// This can be used to find, for example, the capacity of this `Atom`.
-prev: ?*Atom,
-next: ?*Atom,
-
-pub const empty = Atom{
- .sym_index = 0,
- .file = null,
- .size = 0,
- .alignment = 0,
- .prev = null,
- .next = null,
-};
+prev_index: ?Index,
+next_index: ?Index,
+
+pub const Index = u32;
+
+pub fn getSymbolIndex(self: Atom) ?u32 {
+ if (self.sym_index == 0) return null;
+ return self.sym_index;
+}
/// Returns symbol referencing this atom.
pub fn getSymbol(self: Atom, coff_file: *const Coff) *const coff.Symbol {
+ const sym_index = self.getSymbolIndex().?;
return coff_file.getSymbol(.{
- .sym_index = self.sym_index,
+ .sym_index = sym_index,
.file = self.file,
});
}
/// Returns pointer-to-symbol referencing this atom.
pub fn getSymbolPtr(self: Atom, coff_file: *Coff) *coff.Symbol {
+ const sym_index = self.getSymbolIndex().?;
return coff_file.getSymbolPtr(.{
- .sym_index = self.sym_index,
+ .sym_index = sym_index,
.file = self.file,
});
}
pub fn getSymbolWithLoc(self: Atom) SymbolWithLoc {
- return .{ .sym_index = self.sym_index, .file = self.file };
+ const sym_index = self.getSymbolIndex().?;
+ return .{ .sym_index = sym_index, .file = self.file };
}
/// Returns the name of this atom.
pub fn getName(self: Atom, coff_file: *const Coff) []const u8 {
+ const sym_index = self.getSymbolIndex().?;
return coff_file.getSymbolName(.{
- .sym_index = self.sym_index,
+ .sym_index = sym_index,
.file = self.file,
});
}
@@ -70,7 +72,8 @@ pub fn getName(self: Atom, coff_file: *const Coff) []const u8 {
/// Returns how much room there is to grow in virtual address space.
pub fn capacity(self: Atom, coff_file: *const Coff) u32 {
const self_sym = self.getSymbol(coff_file);
- if (self.next) |next| {
+ if (self.next_index) |next_index| {
+ const next = coff_file.getAtom(next_index);
const next_sym = next.getSymbol(coff_file);
return next_sym.value - self_sym.value;
} else {
@@ -82,7 +85,8 @@ pub fn capacity(self: Atom, coff_file: *const Coff) u32 {
pub fn freeListEligible(self: Atom, coff_file: *const Coff) bool {
// No need to keep a free list node for the last atom.
- const next = self.next orelse return false;
+ const next_index = self.next_index orelse return false;
+ const next = coff_file.getAtom(next_index);
const self_sym = self.getSymbol(coff_file);
const next_sym = next.getSymbol(coff_file);
const cap = next_sym.value - self_sym.value;
@@ -92,22 +96,33 @@ pub fn freeListEligible(self: Atom, coff_file: *const Coff) bool {
return surplus >= Coff.min_text_capacity;
}
-pub fn addRelocation(self: *Atom, coff_file: *Coff, reloc: Relocation) !void {
+pub fn addRelocation(coff_file: *Coff, atom_index: Index, reloc: Relocation) !void {
const gpa = coff_file.base.allocator;
log.debug(" (adding reloc of type {s} to target %{d})", .{ @tagName(reloc.type), reloc.target.sym_index });
- const gop = try coff_file.relocs.getOrPut(gpa, self);
+ const gop = try coff_file.relocs.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, reloc);
}
-pub fn addBaseRelocation(self: *Atom, coff_file: *Coff, offset: u32) !void {
+pub fn addBaseRelocation(coff_file: *Coff, atom_index: Index, offset: u32) !void {
const gpa = coff_file.base.allocator;
- log.debug(" (adding base relocation at offset 0x{x} in %{d})", .{ offset, self.sym_index });
- const gop = try coff_file.base_relocs.getOrPut(gpa, self);
+ log.debug(" (adding base relocation at offset 0x{x} in %{d})", .{
+ offset,
+ coff_file.getAtom(atom_index).getSymbolIndex().?,
+ });
+ const gop = try coff_file.base_relocs.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, offset);
}
+
+pub fn freeRelocations(coff_file: *Coff, atom_index: Index) void {
+ const gpa = coff_file.base.allocator;
+ var removed_relocs = coff_file.relocs.fetchRemove(atom_index);
+ if (removed_relocs) |*relocs| relocs.value.deinit(gpa);
+ var removed_base_relocs = coff_file.base_relocs.fetchRemove(atom_index);
+ if (removed_base_relocs) |*base_relocs| base_relocs.value.deinit(gpa);
+}
diff --git a/src/link/Coff/Relocation.zig b/src/link/Coff/Relocation.zig
index 12a34b332d..1ba1d7a1c1 100644
--- a/src/link/Coff/Relocation.zig
+++ b/src/link/Coff/Relocation.zig
@@ -46,33 +46,35 @@ length: u2,
dirty: bool = true,
/// Returns an Atom which is the target node of this relocation edge (if any).
-pub fn getTargetAtom(self: Relocation, coff_file: *Coff) ?*Atom {
+pub fn getTargetAtomIndex(self: Relocation, coff_file: *const Coff) ?Atom.Index {
switch (self.type) {
.got,
.got_page,
.got_pageoff,
- => return coff_file.getGotAtomForSymbol(self.target),
+ => return coff_file.getGotAtomIndexForSymbol(self.target),
.direct,
.page,
.pageoff,
- => return coff_file.getAtomForSymbol(self.target),
+ => return coff_file.getAtomIndexForSymbol(self.target),
.import,
.import_page,
.import_pageoff,
- => return coff_file.getImportAtomForSymbol(self.target),
+ => return coff_file.getImportAtomIndexForSymbol(self.target),
}
}
-pub fn resolve(self: *Relocation, atom: *Atom, coff_file: *Coff) !void {
+pub fn resolve(self: *Relocation, atom_index: Atom.Index, coff_file: *Coff) !void {
+ const atom = coff_file.getAtom(atom_index);
const source_sym = atom.getSymbol(coff_file);
const source_section = coff_file.sections.get(@enumToInt(source_sym.section_number) - 1).header;
const source_vaddr = source_sym.value + self.offset;
const file_offset = source_section.pointer_to_raw_data + source_sym.value - source_section.virtual_address;
- const target_atom = self.getTargetAtom(coff_file) orelse return;
+ const target_atom_index = self.getTargetAtomIndex(coff_file) orelse return;
+ const target_atom = coff_file.getAtom(target_atom_index);
const target_vaddr = target_atom.getSymbol(coff_file).value;
const target_vaddr_with_addend = target_vaddr + self.addend;
@@ -107,7 +109,7 @@ const Context = struct {
image_base: u64,
};
-fn resolveAarch64(self: *Relocation, ctx: Context, coff_file: *Coff) !void {
+fn resolveAarch64(self: Relocation, ctx: Context, coff_file: *Coff) !void {
var buffer: [@sizeOf(u64)]u8 = undefined;
switch (self.length) {
2 => {
@@ -197,7 +199,7 @@ fn resolveAarch64(self: *Relocation, ctx: Context, coff_file: *Coff) !void {
}
}
-fn resolveX86(self: *Relocation, ctx: Context, coff_file: *Coff) !void {
+fn resolveX86(self: Relocation, ctx: Context, coff_file: *Coff) !void {
switch (self.type) {
.got_page => unreachable,
.got_pageoff => unreachable,
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index 1b65bbb04b..a3d0aa8a53 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -18,31 +18,36 @@ const LinkBlock = File.LinkBlock;
const LinkFn = File.LinkFn;
const LinkerLoad = @import("../codegen.zig").LinkerLoad;
const Module = @import("../Module.zig");
-const Value = @import("../value.zig").Value;
+const StringTable = @import("strtab.zig").StringTable;
const Type = @import("../type.zig").Type;
+const Value = @import("../value.zig").Value;
allocator: Allocator,
bin_file: *File,
ptr_width: PtrWidth,
target: std.Target,
-/// A list of `File.LinkFn` whose Line Number Programs have surplus capacity.
-/// This is the same concept as `text_block_free_list`; see those doc comments.
-dbg_line_fn_free_list: std.AutoHashMapUnmanaged(*SrcFn, void) = .{},
-dbg_line_fn_first: ?*SrcFn = null,
-dbg_line_fn_last: ?*SrcFn = null,
+/// A list of `Atom`s whose Line Number Programs have surplus capacity.
+/// This is the same concept as `Section.free_list` in Elf; see those doc comments.
+src_fn_free_list: std.AutoHashMapUnmanaged(Atom.Index, void) = .{},
+src_fn_first_index: ?Atom.Index = null,
+src_fn_last_index: ?Atom.Index = null,
+src_fns: std.ArrayListUnmanaged(Atom) = .{},
+src_fn_decls: AtomTable = .{},
/// A list of `Atom`s whose corresponding .debug_info tags have surplus capacity.
/// This is the same concept as `text_block_free_list`; see those doc comments.
-atom_free_list: std.AutoHashMapUnmanaged(*Atom, void) = .{},
-atom_first: ?*Atom = null,
-atom_last: ?*Atom = null,
+di_atom_free_list: std.AutoHashMapUnmanaged(Atom.Index, void) = .{},
+di_atom_first_index: ?Atom.Index = null,
+di_atom_last_index: ?Atom.Index = null,
+di_atoms: std.ArrayListUnmanaged(Atom) = .{},
+di_atom_decls: AtomTable = .{},
abbrev_table_offset: ?u64 = null,
/// TODO replace with InternPool
/// Table of debug symbol names.
-strtab: std.ArrayListUnmanaged(u8) = .{},
+strtab: StringTable(.strtab) = .{},
/// Quick lookup array of all defined source files referenced by at least one Decl.
/// They will end up in the DWARF debug_line header as two lists:
@@ -50,22 +55,23 @@ strtab: std.ArrayListUnmanaged(u8) = .{},
/// * []file_names
di_files: std.AutoArrayHashMapUnmanaged(*const Module.File, void) = .{},
-/// List of atoms that are owned directly by the DWARF module.
-/// TODO convert links in DebugInfoAtom into indices and make
-/// sure every atom is owned by this module.
-managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
-
global_abbrev_relocs: std.ArrayListUnmanaged(AbbrevRelocation) = .{},
-pub const Atom = struct {
- /// Previous/next linked list pointers.
- /// This is the linked list node for this Decl's corresponding .debug_info tag.
- prev: ?*Atom,
- next: ?*Atom,
- /// Offset into .debug_info pointing to the tag for this Decl.
+const AtomTable = std.AutoHashMapUnmanaged(Module.Decl.Index, Atom.Index);
+
+const Atom = struct {
+ /// Offset into .debug_info pointing to the tag for this Decl, or
+ /// offset from the beginning of the Debug Line Program header that contains this function.
off: u32,
- /// Size of the .debug_info tag for this Decl, not including padding.
+ /// Size of the .debug_info tag for this Decl, not including padding, or
+ /// size of the line number program component belonging to this function, not
+ /// including padding.
len: u32,
+
+ prev_index: ?Index,
+ next_index: ?Index,
+
+ pub const Index = u32;
};
/// Represents state of the analysed Decl.
@@ -75,6 +81,7 @@ pub const Atom = struct {
pub const DeclState = struct {
gpa: Allocator,
mod: *Module,
+ di_atom_decls: *const AtomTable,
dbg_line: std.ArrayList(u8),
dbg_info: std.ArrayList(u8),
abbrev_type_arena: std.heap.ArenaAllocator,
@@ -88,10 +95,11 @@ pub const DeclState = struct {
abbrev_relocs: std.ArrayListUnmanaged(AbbrevRelocation) = .{},
exprloc_relocs: std.ArrayListUnmanaged(ExprlocRelocation) = .{},
- fn init(gpa: Allocator, mod: *Module) DeclState {
+ fn init(gpa: Allocator, mod: *Module, di_atom_decls: *const AtomTable) DeclState {
return .{
.gpa = gpa,
.mod = mod,
+ .di_atom_decls = di_atom_decls,
.dbg_line = std.ArrayList(u8).init(gpa),
.dbg_info = std.ArrayList(u8).init(gpa),
.abbrev_type_arena = std.heap.ArenaAllocator.init(gpa),
@@ -119,11 +127,11 @@ pub const DeclState = struct {
/// Adds local type relocation of the form: @offset => @this + addend
/// @this signifies the offset within the .debug_abbrev section of the containing atom.
- fn addTypeRelocLocal(self: *DeclState, atom: *const Atom, offset: u32, addend: u32) !void {
+ fn addTypeRelocLocal(self: *DeclState, atom_index: Atom.Index, offset: u32, addend: u32) !void {
log.debug("{x}: @this + {x}", .{ offset, addend });
try self.abbrev_relocs.append(self.gpa, .{
.target = null,
- .atom = atom,
+ .atom_index = atom_index,
.offset = offset,
.addend = addend,
});
@@ -132,13 +140,13 @@ pub const DeclState = struct {
/// Adds global type relocation of the form: @offset => @symbol + 0
/// @symbol signifies a type abbreviation posititioned somewhere in the .debug_abbrev section
/// which we use as our target of the relocation.
- fn addTypeRelocGlobal(self: *DeclState, atom: *const Atom, ty: Type, offset: u32) !void {
+ fn addTypeRelocGlobal(self: *DeclState, atom_index: Atom.Index, ty: Type, offset: u32) !void {
const resolv = self.abbrev_resolver.getContext(ty, .{
.mod = self.mod,
}) orelse blk: {
const sym_index = @intCast(u32, self.abbrev_table.items.len);
try self.abbrev_table.append(self.gpa, .{
- .atom = atom,
+ .atom_index = atom_index,
.type = ty,
.offset = undefined,
});
@@ -153,7 +161,7 @@ pub const DeclState = struct {
log.debug("{x}: %{d} + 0", .{ offset, resolv });
try self.abbrev_relocs.append(self.gpa, .{
.target = resolv,
- .atom = atom,
+ .atom_index = atom_index,
.offset = offset,
.addend = 0,
});
@@ -162,7 +170,7 @@ pub const DeclState = struct {
fn addDbgInfoType(
self: *DeclState,
module: *Module,
- atom: *Atom,
+ atom_index: Atom.Index,
ty: Type,
) error{OutOfMemory}!void {
const arena = self.abbrev_type_arena.allocator();
@@ -227,7 +235,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, Type.bool, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, Type.bool, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try dbg_info_buffer.ensureUnusedCapacity(6);
dbg_info_buffer.appendAssumeCapacity(0);
@@ -239,7 +247,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, payload_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
const offset = abi_size - payload_ty.abiSize(target);
try leb128.writeULEB128(dbg_info_buffer.writer(), offset);
@@ -270,7 +278,7 @@ pub const DeclState = struct {
try dbg_info_buffer.resize(index + 4);
var buf = try arena.create(Type.SlicePtrFieldTypeBuffer);
const ptr_ty = ty.slicePtrFieldType(buf);
- try self.addTypeRelocGlobal(atom, ptr_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, ptr_ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try dbg_info_buffer.ensureUnusedCapacity(6);
dbg_info_buffer.appendAssumeCapacity(0);
@@ -282,7 +290,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, Type.usize, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try dbg_info_buffer.ensureUnusedCapacity(2);
dbg_info_buffer.appendAssumeCapacity(ptr_bytes);
@@ -294,7 +302,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, ty.childType(), @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, ty.childType(), @intCast(u32, index));
}
},
.Array => {
@@ -305,13 +313,13 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, ty.childType(), @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, ty.childType(), @intCast(u32, index));
// DW.AT.subrange_type
try dbg_info_buffer.append(@enumToInt(AbbrevKind.array_dim));
// DW.AT.type, DW.FORM.ref4
index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, Type.usize, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(u32, index));
// DW.AT.count, DW.FORM.udata
const len = ty.arrayLenIncludingSentinel();
try leb128.writeULEB128(dbg_info_buffer.writer(), len);
@@ -339,7 +347,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, field, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, field, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
const field_off = ty.structFieldOffset(field_index, target);
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
@@ -371,7 +379,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, field.ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, field.ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
const field_off = ty.structFieldOffset(field_index, target);
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
@@ -454,7 +462,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const inner_union_index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(inner_union_index + 4);
- try self.addTypeRelocLocal(atom, @intCast(u32, inner_union_index), 5);
+ try self.addTypeRelocLocal(atom_index, @intCast(u32, inner_union_index), 5);
// DW.AT.data_member_location, DW.FORM.sdata
try leb128.writeULEB128(dbg_info_buffer.writer(), payload_offset);
}
@@ -481,7 +489,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, field.ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, field.ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try dbg_info_buffer.append(0);
}
@@ -498,7 +506,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, union_obj.tag_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, union_obj.tag_ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try leb128.writeULEB128(dbg_info_buffer.writer(), tag_offset);
@@ -541,7 +549,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, payload_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try leb128.writeULEB128(dbg_info_buffer.writer(), payload_off);
@@ -554,7 +562,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, error_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, error_ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try leb128.writeULEB128(dbg_info_buffer.writer(), error_off);
@@ -587,12 +595,11 @@ pub const DeclState = struct {
self: *DeclState,
name: [:0]const u8,
ty: Type,
- tag: File.Tag,
owner_decl: Module.Decl.Index,
loc: DbgInfoLoc,
) error{OutOfMemory}!void {
const dbg_info = &self.dbg_info;
- const atom = getDbgInfoAtom(tag, self.mod, owner_decl);
+ const atom_index = self.di_atom_decls.get(owner_decl).?;
const name_with_null = name.ptr[0 .. name.len + 1];
switch (loc) {
@@ -637,7 +644,7 @@ pub const DeclState = struct {
try dbg_info.ensureUnusedCapacity(5 + name_with_null.len);
const index = dbg_info.items.len;
try dbg_info.resize(index + 4); // dw.at.type, dw.form.ref4
- try self.addTypeRelocGlobal(atom, ty, @intCast(u32, index)); // DW.AT.type, DW.FORM.ref4
+ try self.addTypeRelocGlobal(atom_index, ty, @intCast(u32, index)); // DW.AT.type, DW.FORM.ref4
dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
}
@@ -645,13 +652,12 @@ pub const DeclState = struct {
self: *DeclState,
name: [:0]const u8,
ty: Type,
- tag: File.Tag,
owner_decl: Module.Decl.Index,
is_ptr: bool,
loc: DbgInfoLoc,
) error{OutOfMemory}!void {
const dbg_info = &self.dbg_info;
- const atom = getDbgInfoAtom(tag, self.mod, owner_decl);
+ const atom_index = self.di_atom_decls.get(owner_decl).?;
const name_with_null = name.ptr[0 .. name.len + 1];
try dbg_info.append(@enumToInt(AbbrevKind.variable));
const target = self.mod.getTarget();
@@ -781,7 +787,7 @@ pub const DeclState = struct {
try dbg_info.ensureUnusedCapacity(5 + name_with_null.len);
const index = dbg_info.items.len;
try dbg_info.resize(index + 4); // dw.at.type, dw.form.ref4
- try self.addTypeRelocGlobal(atom, child_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, child_ty, @intCast(u32, index));
dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
}
@@ -814,7 +820,7 @@ pub const DeclState = struct {
};
pub const AbbrevEntry = struct {
- atom: *const Atom,
+ atom_index: Atom.Index,
type: Type,
offset: u32,
};
@@ -823,7 +829,7 @@ pub const AbbrevRelocation = struct {
/// If target is null, we deal with a local relocation that is based on simple offset + addend
/// only.
target: ?u32,
- atom: *const Atom,
+ atom_index: Atom.Index,
offset: u32,
addend: u32,
};
@@ -840,26 +846,6 @@ pub const ExprlocRelocation = struct {
offset: u32,
};
-pub const SrcFn = struct {
- /// Offset from the beginning of the Debug Line Program header that contains this function.
- off: u32,
- /// Size of the line number program component belonging to this function, not
- /// including padding.
- len: u32,
-
- /// Points to the previous and next neighbors, based on the offset from .debug_line.
- /// This can be used to find, for example, the capacity of this `SrcFn`.
- prev: ?*SrcFn,
- next: ?*SrcFn,
-
- pub const empty: SrcFn = .{
- .off = 0,
- .len = 0,
- .prev = null,
- .next = null,
- };
-};
-
pub const PtrWidth = enum { p32, p64 };
pub const AbbrevKind = enum(u8) {
@@ -909,16 +895,18 @@ pub fn init(allocator: Allocator, bin_file: *File, target: std.Target) Dwarf {
pub fn deinit(self: *Dwarf) void {
const gpa = self.allocator;
- self.dbg_line_fn_free_list.deinit(gpa);
- self.atom_free_list.deinit(gpa);
+
+ self.src_fn_free_list.deinit(gpa);
+ self.src_fns.deinit(gpa);
+ self.src_fn_decls.deinit(gpa);
+
+ self.di_atom_free_list.deinit(gpa);
+ self.di_atoms.deinit(gpa);
+ self.di_atom_decls.deinit(gpa);
+
self.strtab.deinit(gpa);
self.di_files.deinit(gpa);
self.global_abbrev_relocs.deinit(gpa);
-
- for (self.managed_atoms.items) |atom| {
- gpa.destroy(atom);
- }
- self.managed_atoms.deinit(gpa);
}
/// Initializes Decl's state and its matching output buffers.
@@ -934,15 +922,19 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
log.debug("initDeclState {s}{*}", .{ decl_name, decl });
const gpa = self.allocator;
- var decl_state = DeclState.init(gpa, mod);
+ var decl_state = DeclState.init(gpa, mod, &self.di_atom_decls);
errdefer decl_state.deinit();
const dbg_line_buffer = &decl_state.dbg_line;
const dbg_info_buffer = &decl_state.dbg_info;
+ const di_atom_index = try self.getOrCreateAtomForDecl(.di_atom, decl_index);
+
assert(decl.has_tv);
switch (decl.ty.zigTypeTag()) {
.Fn => {
+ _ = try self.getOrCreateAtomForDecl(.src_fn, decl_index);
+
// For functions we need to add a prologue to the debug line program.
try dbg_line_buffer.ensureTotalCapacity(26);
@@ -1002,8 +994,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
dbg_info_buffer.items.len += 4; // DW.AT.high_pc, DW.FORM.data4
//
if (fn_ret_has_bits) {
- const atom = getDbgInfoAtom(self.bin_file.tag, mod, decl_index);
- try decl_state.addTypeRelocGlobal(atom, fn_ret_type, @intCast(u32, dbg_info_buffer.items.len));
+ try decl_state.addTypeRelocGlobal(di_atom_index, fn_ret_type, @intCast(u32, dbg_info_buffer.items.len));
dbg_info_buffer.items.len += 4; // DW.AT.type, DW.FORM.ref4
}
@@ -1075,31 +1066,28 @@ pub fn commitDeclState(
// This logic is nearly identical to the logic below in `updateDeclDebugInfo` for
// `TextBlock` and the .debug_info. If you are editing this logic, you
// probably need to edit that logic too.
- const src_fn = switch (self.bin_file.tag) {
- .elf => &decl.fn_link.elf,
- .macho => &decl.fn_link.macho,
- .wasm => &decl.fn_link.wasm.src_fn,
- else => unreachable, // TODO
- };
+ const src_fn_index = self.src_fn_decls.get(decl_index).?;
+ const src_fn = self.getAtomPtr(.src_fn, src_fn_index);
src_fn.len = @intCast(u32, dbg_line_buffer.items.len);
- if (self.dbg_line_fn_last) |last| blk: {
- if (src_fn == last) break :blk;
- if (src_fn.next) |next| {
+ if (self.src_fn_last_index) |last_index| blk: {
+ if (src_fn_index == last_index) break :blk;
+ if (src_fn.next_index) |next_index| {
+ const next = self.getAtomPtr(.src_fn, next_index);
// Update existing function - non-last item.
if (src_fn.off + src_fn.len + min_nop_size > next.off) {
// It grew too big, so we move it to a new location.
- if (src_fn.prev) |prev| {
- self.dbg_line_fn_free_list.put(gpa, prev, {}) catch {};
- prev.next = src_fn.next;
+ if (src_fn.prev_index) |prev_index| {
+ self.src_fn_free_list.put(gpa, prev_index, {}) catch {};
+ self.getAtomPtr(.src_fn, prev_index).next_index = src_fn.next_index;
}
- next.prev = src_fn.prev;
- src_fn.next = null;
+ next.prev_index = src_fn.prev_index;
+ src_fn.next_index = null;
// Populate where it used to be with NOPs.
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const debug_line_sect = &elf_file.sections.items[elf_file.debug_line_section_index.?];
+ const debug_line_sect = &elf_file.sections.items(.shdr)[elf_file.debug_line_section_index.?];
const file_pos = debug_line_sect.sh_offset + src_fn.off;
try pwriteDbgLineNops(elf_file.base.file.?, file_pos, 0, &[0]u8{}, src_fn.len);
},
@@ -1111,39 +1099,48 @@ pub fn commitDeclState(
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_line = wasm_file.debug_line_atom.?.code;
+ const debug_line = wasm_file.getAtomPtr(wasm_file.debug_line_atom.?).code;
writeDbgLineNopsBuffered(debug_line.items, src_fn.off, 0, &.{}, src_fn.len);
},
else => unreachable,
}
// TODO Look at the free list before appending at the end.
- src_fn.prev = last;
- last.next = src_fn;
- self.dbg_line_fn_last = src_fn;
+ src_fn.prev_index = last_index;
+ const last = self.getAtomPtr(.src_fn, last_index);
+ last.next_index = src_fn_index;
+ self.src_fn_last_index = src_fn_index;
src_fn.off = last.off + padToIdeal(last.len);
}
- } else if (src_fn.prev == null) {
+ } else if (src_fn.prev_index == null) {
// Append new function.
// TODO Look at the free list before appending at the end.
- src_fn.prev = last;
- last.next = src_fn;
- self.dbg_line_fn_last = src_fn;
+ src_fn.prev_index = last_index;
+ const last = self.getAtomPtr(.src_fn, last_index);
+ last.next_index = src_fn_index;
+ self.src_fn_last_index = src_fn_index;
src_fn.off = last.off + padToIdeal(last.len);
}
} else {
// This is the first function of the Line Number Program.
- self.dbg_line_fn_first = src_fn;
- self.dbg_line_fn_last = src_fn;
+ self.src_fn_first_index = src_fn_index;
+ self.src_fn_last_index = src_fn_index;
src_fn.off = padToIdeal(self.dbgLineNeededHeaderBytes(&[0][]u8{}, &[0][]u8{}));
}
- const last_src_fn = self.dbg_line_fn_last.?;
+ const last_src_fn_index = self.src_fn_last_index.?;
+ const last_src_fn = self.getAtom(.src_fn, last_src_fn_index);
const needed_size = last_src_fn.off + last_src_fn.len;
- const prev_padding_size: u32 = if (src_fn.prev) |prev| src_fn.off - (prev.off + prev.len) else 0;
- const next_padding_size: u32 = if (src_fn.next) |next| next.off - (src_fn.off + src_fn.len) else 0;
+ const prev_padding_size: u32 = if (src_fn.prev_index) |prev_index| blk: {
+ const prev = self.getAtom(.src_fn, prev_index);
+ break :blk src_fn.off - (prev.off + prev.len);
+ } else 0;
+ const next_padding_size: u32 = if (src_fn.next_index) |next_index| blk: {
+ const next = self.getAtom(.src_fn, next_index);
+ break :blk next.off - (src_fn.off + src_fn.len);
+ } else 0;
// We only have support for one compilation unit so far, so the offsets are directly
// from the .debug_line section.
@@ -1152,7 +1149,7 @@ pub fn commitDeclState(
const elf_file = self.bin_file.cast(File.Elf).?;
const shdr_index = elf_file.debug_line_section_index.?;
try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true);
- const debug_line_sect = elf_file.sections.items[shdr_index];
+ const debug_line_sect = elf_file.sections.items(.shdr)[shdr_index];
const file_pos = debug_line_sect.sh_offset + src_fn.off;
try pwriteDbgLineNops(
elf_file.base.file.?,
@@ -1180,7 +1177,7 @@ pub fn commitDeclState(
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const atom = wasm_file.debug_line_atom.?;
+ const atom = wasm_file.getAtomPtr(wasm_file.debug_line_atom.?);
const debug_line = &atom.code;
const segment_size = debug_line.items.len;
if (needed_size != segment_size) {
@@ -1212,7 +1209,7 @@ pub fn commitDeclState(
if (dbg_info_buffer.items.len == 0)
return;
- const atom = getDbgInfoAtom(self.bin_file.tag, module, decl_index);
+ const di_atom_index = self.di_atom_decls.get(decl_index).?;
if (decl_state.abbrev_table.items.len > 0) {
// Now we emit the .debug_info types of the Decl. These will count towards the size of
// the buffer, so we have to do it before computing the offset, and we can't perform the actual
@@ -1234,12 +1231,12 @@ pub fn commitDeclState(
if (deferred) continue;
symbol.offset = @intCast(u32, dbg_info_buffer.items.len);
- try decl_state.addDbgInfoType(module, atom, ty);
+ try decl_state.addDbgInfoType(module, di_atom_index, ty);
}
}
log.debug("updateDeclDebugInfoAllocation for '{s}'", .{decl.name});
- try self.updateDeclDebugInfoAllocation(atom, @intCast(u32, dbg_info_buffer.items.len));
+ try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(u32, dbg_info_buffer.items.len));
while (decl_state.abbrev_relocs.popOrNull()) |reloc| {
if (reloc.target) |target| {
@@ -1260,11 +1257,12 @@ pub fn commitDeclState(
try self.global_abbrev_relocs.append(gpa, .{
.target = null,
.offset = reloc.offset,
- .atom = reloc.atom,
+ .atom_index = reloc.atom_index,
.addend = reloc.addend,
});
} else {
- const value = symbol.atom.off + symbol.offset + reloc.addend;
+ const atom = self.getAtom(.di_atom, symbol.atom_index);
+ const value = atom.off + symbol.offset + reloc.addend;
log.debug("{x}: [() => {x}] (%{d}, '{}')", .{ reloc.offset, value, target, ty.fmtDebug() });
mem.writeInt(
u32,
@@ -1274,10 +1272,11 @@ pub fn commitDeclState(
);
}
} else {
+ const atom = self.getAtom(.di_atom, reloc.atom_index);
mem.writeInt(
u32,
dbg_info_buffer.items[reloc.offset..][0..@sizeOf(u32)],
- reloc.atom.off + reloc.offset + reloc.addend,
+ atom.off + reloc.offset + reloc.addend,
target_endian,
);
}
@@ -1293,7 +1292,7 @@ pub fn commitDeclState(
.got_load => .got_load,
},
.target = reloc.target,
- .offset = reloc.offset + atom.off,
+ .offset = reloc.offset + self.getAtom(.di_atom, di_atom_index).off,
.addend = 0,
.prev_vaddr = 0,
});
@@ -1303,10 +1302,10 @@ pub fn commitDeclState(
}
log.debug("writeDeclDebugInfo for '{s}", .{decl.name});
- try self.writeDeclDebugInfo(atom, dbg_info_buffer.items);
+ try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items);
}
-fn updateDeclDebugInfoAllocation(self: *Dwarf, atom: *Atom, len: u32) !void {
+fn updateDeclDebugInfoAllocation(self: *Dwarf, atom_index: Atom.Index, len: u32) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -1315,24 +1314,26 @@ fn updateDeclDebugInfoAllocation(self: *Dwarf, atom: *Atom, len: u32) !void {
// probably need to edit that logic too.
const gpa = self.allocator;
+ const atom = self.getAtomPtr(.di_atom, atom_index);
atom.len = len;
- if (self.atom_last) |last| blk: {
- if (atom == last) break :blk;
- if (atom.next) |next| {
+ if (self.di_atom_last_index) |last_index| blk: {
+ if (atom_index == last_index) break :blk;
+ if (atom.next_index) |next_index| {
+ const next = self.getAtomPtr(.di_atom, next_index);
// Update existing Decl - non-last item.
if (atom.off + atom.len + min_nop_size > next.off) {
// It grew too big, so we move it to a new location.
- if (atom.prev) |prev| {
- self.atom_free_list.put(gpa, prev, {}) catch {};
- prev.next = atom.next;
+ if (atom.prev_index) |prev_index| {
+ self.di_atom_free_list.put(gpa, prev_index, {}) catch {};
+ self.getAtomPtr(.di_atom, prev_index).next_index = atom.next_index;
}
- next.prev = atom.prev;
- atom.next = null;
+ next.prev_index = atom.prev_index;
+ atom.next_index = null;
// Populate where it used to be with NOPs.
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const debug_info_sect = &elf_file.sections.items[elf_file.debug_info_section_index.?];
+ const debug_info_sect = &elf_file.sections.items(.shdr)[elf_file.debug_info_section_index.?];
const file_pos = debug_info_sect.sh_offset + atom.off;
try pwriteDbgInfoNops(elf_file.base.file.?, file_pos, 0, &[0]u8{}, atom.len, false);
},
@@ -1344,37 +1345,40 @@ fn updateDeclDebugInfoAllocation(self: *Dwarf, atom: *Atom, len: u32) !void {
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_info = &wasm_file.debug_info_atom.?.code;
+ const debug_info_index = wasm_file.debug_info_atom.?;
+ const debug_info = &wasm_file.getAtomPtr(debug_info_index).code;
try writeDbgInfoNopsToArrayList(gpa, debug_info, atom.off, 0, &.{0}, atom.len, false);
},
else => unreachable,
}
// TODO Look at the free list before appending at the end.
- atom.prev = last;
- last.next = atom;
- self.atom_last = atom;
+ atom.prev_index = last_index;
+ const last = self.getAtomPtr(.di_atom, last_index);
+ last.next_index = atom_index;
+ self.di_atom_last_index = atom_index;
atom.off = last.off + padToIdeal(last.len);
}
- } else if (atom.prev == null) {
+ } else if (atom.prev_index == null) {
// Append new Decl.
// TODO Look at the free list before appending at the end.
- atom.prev = last;
- last.next = atom;
- self.atom_last = atom;
+ atom.prev_index = last_index;
+ const last = self.getAtomPtr(.di_atom, last_index);
+ last.next_index = atom_index;
+ self.di_atom_last_index = atom_index;
atom.off = last.off + padToIdeal(last.len);
}
} else {
// This is the first Decl of the .debug_info
- self.atom_first = atom;
- self.atom_last = atom;
+ self.di_atom_first_index = atom_index;
+ self.di_atom_last_index = atom_index;
atom.off = @intCast(u32, padToIdeal(self.dbgInfoHeaderBytes()));
}
}
-fn writeDeclDebugInfo(self: *Dwarf, atom: *Atom, dbg_info_buf: []const u8) !void {
+fn writeDeclDebugInfo(self: *Dwarf, atom_index: Atom.Index, dbg_info_buf: []const u8) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -1383,14 +1387,22 @@ fn writeDeclDebugInfo(self: *Dwarf, atom: *Atom, dbg_info_buf: []const u8) !void
// probably need to edit that logic too.
const gpa = self.allocator;
- const last_decl = self.atom_last.?;
+ const atom = self.getAtom(.di_atom, atom_index);
+ const last_decl_index = self.di_atom_last_index.?;
+ const last_decl = self.getAtom(.di_atom, last_decl_index);
// +1 for a trailing zero to end the children of the decl tag.
const needed_size = last_decl.off + last_decl.len + 1;
- const prev_padding_size: u32 = if (atom.prev) |prev| atom.off - (prev.off + prev.len) else 0;
- const next_padding_size: u32 = if (atom.next) |next| next.off - (atom.off + atom.len) else 0;
+ const prev_padding_size: u32 = if (atom.prev_index) |prev_index| blk: {
+ const prev = self.getAtom(.di_atom, prev_index);
+ break :blk atom.off - (prev.off + prev.len);
+ } else 0;
+ const next_padding_size: u32 = if (atom.next_index) |next_index| blk: {
+ const next = self.getAtom(.di_atom, next_index);
+ break :blk next.off - (atom.off + atom.len);
+ } else 0;
// To end the children of the decl tag.
- const trailing_zero = atom.next == null;
+ const trailing_zero = atom.next_index == null;
// We only have support for one compilation unit so far, so the offsets are directly
// from the .debug_info section.
@@ -1399,7 +1411,7 @@ fn writeDeclDebugInfo(self: *Dwarf, atom: *Atom, dbg_info_buf: []const u8) !void
const elf_file = self.bin_file.cast(File.Elf).?;
const shdr_index = elf_file.debug_info_section_index.?;
try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true);
- const debug_info_sect = elf_file.sections.items[shdr_index];
+ const debug_info_sect = elf_file.sections.items(.shdr)[shdr_index];
const file_pos = debug_info_sect.sh_offset + atom.off;
try pwriteDbgInfoNops(
elf_file.base.file.?,
@@ -1430,7 +1442,7 @@ fn writeDeclDebugInfo(self: *Dwarf, atom: *Atom, dbg_info_buf: []const u8) !void
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
const info_atom = wasm_file.debug_info_atom.?;
- const debug_info = &info_atom.code;
+ const debug_info = &wasm_file.getAtomPtr(info_atom).code;
const segment_size = debug_info.items.len;
if (needed_size != segment_size) {
log.debug(" needed size does not equal allocated size: {d}", .{needed_size});
@@ -1458,10 +1470,15 @@ fn writeDeclDebugInfo(self: *Dwarf, atom: *Atom, dbg_info_buf: []const u8) !void
}
}
-pub fn updateDeclLineNumber(self: *Dwarf, decl: *const Module.Decl) !void {
+pub fn updateDeclLineNumber(self: *Dwarf, module: *Module, decl_index: Module.Decl.Index) !void {
const tracy = trace(@src());
defer tracy.end();
+ const atom_index = try self.getOrCreateAtomForDecl(.src_fn, decl_index);
+ const atom = self.getAtom(.src_fn, atom_index);
+ if (atom.len == 0) return;
+
+ const decl = module.declPtr(decl_index);
const func = decl.val.castTag(.function).?.data;
log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{
decl.src_line,
@@ -1475,79 +1492,81 @@ pub fn updateDeclLineNumber(self: *Dwarf, decl: *const Module.Decl) !void {
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const shdr = elf_file.sections.items[elf_file.debug_line_section_index.?];
- const file_pos = shdr.sh_offset + decl.fn_link.elf.off + self.getRelocDbgLineOff();
+ const shdr = elf_file.sections.items(.shdr)[elf_file.debug_line_section_index.?];
+ const file_pos = shdr.sh_offset + atom.off + self.getRelocDbgLineOff();
try elf_file.base.file.?.pwriteAll(&data, file_pos);
},
.macho => {
const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
const sect = d_sym.getSection(d_sym.debug_line_section_index.?);
- const file_pos = sect.offset + decl.fn_link.macho.off + self.getRelocDbgLineOff();
+ const file_pos = sect.offset + atom.off + self.getRelocDbgLineOff();
try d_sym.file.pwriteAll(&data, file_pos);
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const offset = decl.fn_link.wasm.src_fn.off + self.getRelocDbgLineOff();
- const atom = wasm_file.debug_line_atom.?;
- mem.copy(u8, atom.code.items[offset..], &data);
+ const offset = atom.off + self.getRelocDbgLineOff();
+ const line_atom_index = wasm_file.debug_line_atom.?;
+ mem.copy(u8, wasm_file.getAtomPtr(line_atom_index).code.items[offset..], &data);
},
else => unreachable,
}
}
-pub fn freeAtom(self: *Dwarf, atom: *Atom) void {
- if (self.atom_first == atom) {
- self.atom_first = atom.next;
- }
- if (self.atom_last == atom) {
- // TODO shrink the .debug_info section size here
- self.atom_last = atom.prev;
- }
-
- if (atom.prev) |prev| {
- prev.next = atom.next;
+pub fn freeDecl(self: *Dwarf, decl_index: Module.Decl.Index) void {
+ const gpa = self.allocator;
- // TODO the free list logic like we do for text blocks above
- } else {
- atom.prev = null;
+ // Free SrcFn atom
+ if (self.src_fn_decls.fetchRemove(decl_index)) |kv| {
+ const src_fn_index = kv.value;
+ const src_fn = self.getAtom(.src_fn, src_fn_index);
+ _ = self.src_fn_free_list.remove(src_fn_index);
+
+ if (src_fn.prev_index) |prev_index| {
+ self.src_fn_free_list.put(gpa, prev_index, {}) catch {};
+ const prev = self.getAtomPtr(.src_fn, prev_index);
+ prev.next_index = src_fn.next_index;
+ if (src_fn.next_index) |next_index| {
+ self.getAtomPtr(.src_fn, next_index).prev_index = prev_index;
+ } else {
+ self.src_fn_last_index = prev_index;
+ }
+ } else if (src_fn.next_index) |next_index| {
+ self.src_fn_first_index = next_index;
+ self.getAtomPtr(.src_fn, next_index).prev_index = null;
+ }
+ if (self.src_fn_first_index == src_fn_index) {
+ self.src_fn_first_index = src_fn.next_index;
+ }
+ if (self.src_fn_last_index == src_fn_index) {
+ self.src_fn_last_index = src_fn.prev_index;
+ }
}
- if (atom.next) |next| {
- next.prev = atom.prev;
- } else {
- atom.next = null;
- }
-}
+ // Free DI atom
+ if (self.di_atom_decls.fetchRemove(decl_index)) |kv| {
+ const di_atom_index = kv.value;
+ const di_atom = self.getAtomPtr(.di_atom, di_atom_index);
-pub fn freeDecl(self: *Dwarf, decl: *Module.Decl) void {
- // TODO make this logic match freeTextBlock. Maybe abstract the logic out since the same thing
- // is desired for both.
- const gpa = self.allocator;
- const fn_link = switch (self.bin_file.tag) {
- .elf => &decl.fn_link.elf,
- .macho => &decl.fn_link.macho,
- .wasm => &decl.fn_link.wasm.src_fn,
- else => unreachable,
- };
- _ = self.dbg_line_fn_free_list.remove(fn_link);
+ if (self.di_atom_first_index == di_atom_index) {
+ self.di_atom_first_index = di_atom.next_index;
+ }
+ if (self.di_atom_last_index == di_atom_index) {
+ // TODO shrink the .debug_info section size here
+ self.di_atom_last_index = di_atom.prev_index;
+ }
- if (fn_link.prev) |prev| {
- self.dbg_line_fn_free_list.put(gpa, prev, {}) catch {};
- prev.next = fn_link.next;
- if (fn_link.next) |next| {
- next.prev = prev;
+ if (di_atom.prev_index) |prev_index| {
+ self.getAtomPtr(.di_atom, prev_index).next_index = di_atom.next_index;
+ // TODO the free list logic like we do for SrcFn above
} else {
- self.dbg_line_fn_last = prev;
+ di_atom.prev_index = null;
+ }
+
+ if (di_atom.next_index) |next_index| {
+ self.getAtomPtr(.di_atom, next_index).prev_index = di_atom.prev_index;
+ } else {
+ di_atom.next_index = null;
}
- } else if (fn_link.next) |next| {
- self.dbg_line_fn_first = next;
- next.prev = null;
- }
- if (self.dbg_line_fn_first == fn_link) {
- self.dbg_line_fn_first = fn_link.next;
- }
- if (self.dbg_line_fn_last == fn_link) {
- self.dbg_line_fn_last = fn_link.prev;
}
}
@@ -1690,7 +1709,7 @@ pub fn writeDbgAbbrev(self: *Dwarf) !void {
const elf_file = self.bin_file.cast(File.Elf).?;
const shdr_index = elf_file.debug_abbrev_section_index.?;
try elf_file.growNonAllocSection(shdr_index, needed_size, 1, false);
- const debug_abbrev_sect = elf_file.sections.items[shdr_index];
+ const debug_abbrev_sect = elf_file.sections.items(.shdr)[shdr_index];
const file_pos = debug_abbrev_sect.sh_offset + abbrev_offset;
try elf_file.base.file.?.pwriteAll(&abbrev_buf, file_pos);
},
@@ -1704,7 +1723,7 @@ pub fn writeDbgAbbrev(self: *Dwarf) !void {
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_abbrev = &wasm_file.debug_abbrev_atom.?.code;
+ const debug_abbrev = &wasm_file.getAtomPtr(wasm_file.debug_abbrev_atom.?).code;
try debug_abbrev.resize(wasm_file.base.allocator, needed_size);
mem.copy(u8, debug_abbrev.items, &abbrev_buf);
},
@@ -1770,11 +1789,11 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u
},
}
// Write the form for the compile unit, which must match the abbrev table above.
- const name_strp = try self.makeString(module.root_pkg.root_src_path);
+ const name_strp = try self.strtab.insert(self.allocator, module.root_pkg.root_src_path);
var compile_unit_dir_buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined;
const compile_unit_dir = resolveCompilationDir(module, &compile_unit_dir_buffer);
- const comp_dir_strp = try self.makeString(compile_unit_dir);
- const producer_strp = try self.makeString(link.producer_string);
+ const comp_dir_strp = try self.strtab.insert(self.allocator, compile_unit_dir);
+ const producer_strp = try self.strtab.insert(self.allocator, link.producer_string);
di_buf.appendAssumeCapacity(@enumToInt(AbbrevKind.compile_unit));
if (self.bin_file.tag == .macho) {
@@ -1805,7 +1824,7 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const debug_info_sect = elf_file.sections.items[elf_file.debug_info_section_index.?];
+ const debug_info_sect = elf_file.sections.items(.shdr)[elf_file.debug_info_section_index.?];
const file_pos = debug_info_sect.sh_offset;
try pwriteDbgInfoNops(elf_file.base.file.?, file_pos, 0, di_buf.items, jmp_amt, false);
},
@@ -1817,7 +1836,7 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_info = &wasm_file.debug_info_atom.?.code;
+ const debug_info = &wasm_file.getAtomPtr(wasm_file.debug_info_atom.?).code;
try writeDbgInfoNopsToArrayList(self.allocator, debug_info, 0, 0, di_buf.items, jmp_amt, false);
},
else => unreachable,
@@ -2124,7 +2143,7 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void {
const elf_file = self.bin_file.cast(File.Elf).?;
const shdr_index = elf_file.debug_aranges_section_index.?;
try elf_file.growNonAllocSection(shdr_index, needed_size, 16, false);
- const debug_aranges_sect = elf_file.sections.items[shdr_index];
+ const debug_aranges_sect = elf_file.sections.items(.shdr)[shdr_index];
const file_pos = debug_aranges_sect.sh_offset;
try elf_file.base.file.?.pwriteAll(di_buf.items, file_pos);
},
@@ -2138,7 +2157,7 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void {
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_ranges = &wasm_file.debug_ranges_atom.?.code;
+ const debug_ranges = &wasm_file.getAtomPtr(wasm_file.debug_ranges_atom.?).code;
try debug_ranges.resize(wasm_file.base.allocator, needed_size);
mem.copy(u8, debug_ranges.items, di_buf.items);
},
@@ -2275,19 +2294,23 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
const needed_with_padding = padToIdeal(needed_bytes);
const delta = needed_with_padding - dbg_line_prg_off;
- var src_fn = self.dbg_line_fn_first.?;
- const last_fn = self.dbg_line_fn_last.?;
+ const first_fn_index = self.src_fn_first_index.?;
+ const first_fn = self.getAtom(.src_fn, first_fn_index);
+ const last_fn_index = self.src_fn_last_index.?;
+ const last_fn = self.getAtom(.src_fn, last_fn_index);
+
+ var src_fn_index = first_fn_index;
- var buffer = try gpa.alloc(u8, last_fn.off + last_fn.len - src_fn.off);
+ var buffer = try gpa.alloc(u8, last_fn.off + last_fn.len - first_fn.off);
defer gpa.free(buffer);
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
const shdr_index = elf_file.debug_line_section_index.?;
- const needed_size = elf_file.sections.items[shdr_index].sh_size + delta;
+ const needed_size = elf_file.sections.items(.shdr)[shdr_index].sh_size + delta;
try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true);
- const file_pos = elf_file.sections.items[shdr_index].sh_offset + src_fn.off;
+ const file_pos = elf_file.sections.items(.shdr)[shdr_index].sh_offset + first_fn.off;
const amt = try elf_file.base.file.?.preadAll(buffer, file_pos);
if (amt != buffer.len) return error.InputOutput;
@@ -2299,7 +2322,7 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
const sect_index = d_sym.debug_line_section_index.?;
const needed_size = @intCast(u32, d_sym.getSection(sect_index).size + delta);
try d_sym.growSection(sect_index, needed_size, true);
- const file_pos = d_sym.getSection(sect_index).offset + src_fn.off;
+ const file_pos = d_sym.getSection(sect_index).offset + first_fn.off;
const amt = try d_sym.file.preadAll(buffer, file_pos);
if (amt != buffer.len) return error.InputOutput;
@@ -2308,19 +2331,20 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_line = &wasm_file.debug_line_atom.?.code;
- mem.copy(u8, buffer, debug_line.items[src_fn.off..]);
+ const debug_line = &wasm_file.getAtomPtr(wasm_file.debug_line_atom.?).code;
+ mem.copy(u8, buffer, debug_line.items[first_fn.off..]);
try debug_line.resize(self.allocator, debug_line.items.len + delta);
- mem.copy(u8, debug_line.items[src_fn.off + delta ..], buffer);
+ mem.copy(u8, debug_line.items[first_fn.off + delta ..], buffer);
},
else => unreachable,
}
while (true) {
+ const src_fn = self.getAtomPtr(.src_fn, src_fn_index);
src_fn.off += delta;
- if (src_fn.next) |next| {
- src_fn = next;
+ if (src_fn.next_index) |next_index| {
+ src_fn_index = next_index;
} else break;
}
}
@@ -2346,7 +2370,7 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const debug_line_sect = elf_file.sections.items[elf_file.debug_line_section_index.?];
+ const debug_line_sect = elf_file.sections.items(.shdr)[elf_file.debug_line_section_index.?];
const file_pos = debug_line_sect.sh_offset;
try pwriteDbgLineNops(elf_file.base.file.?, file_pos, 0, di_buf.items, jmp_amt);
},
@@ -2358,7 +2382,7 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_line = wasm_file.debug_line_atom.?.code;
+ const debug_line = &wasm_file.getAtomPtr(wasm_file.debug_line_atom.?).code;
writeDbgLineNopsBuffered(debug_line.items, 0, 0, di_buf.items, jmp_amt);
},
else => unreachable,
@@ -2366,22 +2390,26 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
}
fn getDebugInfoOff(self: Dwarf) ?u32 {
- const first = self.atom_first orelse return null;
+ const first_index = self.di_atom_first_index orelse return null;
+ const first = self.getAtom(.di_atom, first_index);
return first.off;
}
fn getDebugInfoEnd(self: Dwarf) ?u32 {
- const last = self.atom_last orelse return null;
+ const last_index = self.di_atom_last_index orelse return null;
+ const last = self.getAtom(.di_atom, last_index);
return last.off + last.len;
}
fn getDebugLineProgramOff(self: Dwarf) ?u32 {
- const first = self.dbg_line_fn_first orelse return null;
+ const first_index = self.src_fn_first_index orelse return null;
+ const first = self.getAtom(.src_fn, first_index);
return first.off;
}
fn getDebugLineProgramEnd(self: Dwarf) ?u32 {
- const last = self.dbg_line_fn_last orelse return null;
+ const last_index = self.src_fn_last_index orelse return null;
+ const last = self.getAtom(.src_fn, last_index);
return last.off + last.len;
}
@@ -2435,15 +2463,6 @@ fn getRelocDbgInfoSubprogramHighPC(self: Dwarf) u32 {
return dbg_info_low_pc_reloc_index + self.ptrWidthBytes();
}
-/// TODO Improve this to use a table.
-fn makeString(self: *Dwarf, bytes: []const u8) !u32 {
- try self.strtab.ensureUnusedCapacity(self.allocator, bytes.len + 1);
- const result = self.strtab.items.len;
- self.strtab.appendSliceAssumeCapacity(bytes);
- self.strtab.appendAssumeCapacity(0);
- return @intCast(u32, result);
-}
-
fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
return actual_size +| (actual_size / ideal_factor);
}
@@ -2465,29 +2484,20 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void {
}
error_set.names = names;
- const atom = try gpa.create(Atom);
- errdefer gpa.destroy(atom);
- atom.* = .{
- .prev = null,
- .next = null,
- .off = 0,
- .len = 0,
- };
-
var dbg_info_buffer = std.ArrayList(u8).init(arena);
try addDbgInfoErrorSet(arena, module, error_ty, self.target, &dbg_info_buffer);
- try self.managed_atoms.append(gpa, atom);
+ const di_atom_index = try self.createAtom(.di_atom);
log.debug("updateDeclDebugInfoAllocation in flushModule", .{});
- try self.updateDeclDebugInfoAllocation(atom, @intCast(u32, dbg_info_buffer.items.len));
+ try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(u32, dbg_info_buffer.items.len));
log.debug("writeDeclDebugInfo in flushModule", .{});
- try self.writeDeclDebugInfo(atom, dbg_info_buffer.items);
+ try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items);
const file_pos = blk: {
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const debug_info_sect = &elf_file.sections.items[elf_file.debug_info_section_index.?];
+ const debug_info_sect = &elf_file.sections.items(.shdr)[elf_file.debug_info_section_index.?];
break :blk debug_info_sect.sh_offset;
},
.macho => {
@@ -2502,22 +2512,23 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void {
};
var buf: [@sizeOf(u32)]u8 = undefined;
- mem.writeInt(u32, &buf, atom.off, self.target.cpu.arch.endian());
+ mem.writeInt(u32, &buf, self.getAtom(.di_atom, di_atom_index).off, self.target.cpu.arch.endian());
while (self.global_abbrev_relocs.popOrNull()) |reloc| {
+ const atom = self.getAtom(.di_atom, reloc.atom_index);
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- try elf_file.base.file.?.pwriteAll(&buf, file_pos + reloc.atom.off + reloc.offset);
+ try elf_file.base.file.?.pwriteAll(&buf, file_pos + atom.off + reloc.offset);
},
.macho => {
const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
- try d_sym.file.pwriteAll(&buf, file_pos + reloc.atom.off + reloc.offset);
+ try d_sym.file.pwriteAll(&buf, file_pos + atom.off + reloc.offset);
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_info = wasm_file.debug_info_atom.?.code;
- mem.copy(u8, debug_info.items[reloc.atom.off + reloc.offset ..], &buf);
+ const debug_info = wasm_file.getAtomPtr(wasm_file.debug_info_atom.?).code;
+ mem.copy(u8, debug_info.items[atom.off + reloc.offset ..], &buf);
},
else => unreachable,
}
@@ -2635,12 +2646,62 @@ fn addDbgInfoErrorSet(
try dbg_info_buffer.append(0);
}
-fn getDbgInfoAtom(tag: File.Tag, mod: *Module, decl_index: Module.Decl.Index) *Atom {
- const decl = mod.declPtr(decl_index);
- return switch (tag) {
- .elf => &decl.link.elf.dbg_info_atom,
- .macho => &decl.link.macho.dbg_info_atom,
- .wasm => &decl.link.wasm.dbg_info_atom,
- else => unreachable,
+const Kind = enum { src_fn, di_atom };
+
+fn createAtom(self: *Dwarf, comptime kind: Kind) !Atom.Index {
+ const index = blk: {
+ switch (kind) {
+ .src_fn => {
+ const index = @intCast(Atom.Index, self.src_fns.items.len);
+ _ = try self.src_fns.addOne(self.allocator);
+ break :blk index;
+ },
+ .di_atom => {
+ const index = @intCast(Atom.Index, self.di_atoms.items.len);
+ _ = try self.di_atoms.addOne(self.allocator);
+ break :blk index;
+ },
+ }
+ };
+ const atom = self.getAtomPtr(kind, index);
+ atom.* = .{
+ .off = 0,
+ .len = 0,
+ .prev_index = null,
+ .next_index = null,
+ };
+ return index;
+}
+
+fn getOrCreateAtomForDecl(self: *Dwarf, comptime kind: Kind, decl_index: Module.Decl.Index) !Atom.Index {
+ switch (kind) {
+ .src_fn => {
+ const gop = try self.src_fn_decls.getOrPut(self.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = try self.createAtom(kind);
+ }
+ return gop.value_ptr.*;
+ },
+ .di_atom => {
+ const gop = try self.di_atom_decls.getOrPut(self.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = try self.createAtom(kind);
+ }
+ return gop.value_ptr.*;
+ },
+ }
+}
+
+fn getAtom(self: *const Dwarf, comptime kind: Kind, index: Atom.Index) Atom {
+ return switch (kind) {
+ .src_fn => self.src_fns.items[index],
+ .di_atom => self.di_atoms.items[index],
+ };
+}
+
+fn getAtomPtr(self: *Dwarf, comptime kind: Kind, index: Atom.Index) *Atom {
+ return switch (kind) {
+ .src_fn => &self.src_fns.items[index],
+ .di_atom => &self.di_atoms.items[index],
};
}
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 48e0320dc6..45952da6c0 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -1,40 +1,89 @@
const Elf = @This();
const std = @import("std");
+const build_options = @import("build_options");
const builtin = @import("builtin");
-const math = std.math;
-const mem = std.mem;
const assert = std.debug.assert;
-const Allocator = std.mem.Allocator;
-const fs = std.fs;
const elf = std.elf;
+const fs = std.fs;
const log = std.log.scoped(.link);
+const math = std.math;
+const mem = std.mem;
-const Module = @import("../Module.zig");
-const Compilation = @import("../Compilation.zig");
-const Dwarf = @import("Dwarf.zig");
const codegen = @import("../codegen.zig");
-const lldMain = @import("../main.zig").lldMain;
-const trace = @import("../tracy.zig").trace;
-const Package = @import("../Package.zig");
-const Value = @import("../value.zig").Value;
-const Type = @import("../type.zig").Type;
-const TypedValue = @import("../TypedValue.zig");
-const link = @import("../link.zig");
-const File = link.File;
-const build_options = @import("build_options");
-const target_util = @import("../target.zig");
const glibc = @import("../glibc.zig");
+const link = @import("../link.zig");
+const lldMain = @import("../main.zig").lldMain;
const musl = @import("../musl.zig");
-const Cache = @import("../Cache.zig");
+const target_util = @import("../target.zig");
+const trace = @import("../tracy.zig").trace;
+
const Air = @import("../Air.zig");
+const Allocator = std.mem.Allocator;
+pub const Atom = @import("Elf/Atom.zig");
+const Cache = @import("../Cache.zig");
+const Compilation = @import("../Compilation.zig");
+const Dwarf = @import("Dwarf.zig");
+const File = link.File;
const Liveness = @import("../Liveness.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
+const Module = @import("../Module.zig");
+const Package = @import("../Package.zig");
+const StringTable = @import("strtab.zig").StringTable;
+const Type = @import("../type.zig").Type;
+const TypedValue = @import("../TypedValue.zig");
+const Value = @import("../value.zig").Value;
const default_entry_addr = 0x8000000;
pub const base_tag: File.Tag = .elf;
+const Section = struct {
+ shdr: elf.Elf64_Shdr,
+ phdr_index: u16,
+
+ /// Index of the last allocated atom in this section.
+ last_atom_index: ?Atom.Index = null,
+
+ /// A list of atoms that have surplus capacity. This list can have false
+ /// positives, as functions grow and shrink over time, only sometimes being added
+ /// or removed from the freelist.
+ ///
+ /// An atom has surplus capacity when its overcapacity value is greater than
+ /// padToIdeal(minimum_atom_size). That is, when it has so
+ /// much extra capacity, that we could fit a small new symbol in it, itself with
+ /// ideal_capacity or more.
+ ///
+ /// Ideal capacity is defined by size + (size / ideal_factor)
+ ///
+ /// Overcapacity is measured by actual_capacity - ideal_capacity. Note that
+ /// overcapacity can be negative. A simple way to have negative overcapacity is to
+ /// allocate a fresh text block, which will have ideal capacity, and then grow it
+ /// by 1 byte. It will then have -1 overcapacity.
+ free_list: std.ArrayListUnmanaged(Atom.Index) = .{},
+};
+
+const DeclMetadata = struct {
+ atom: Atom.Index,
+ shdr: u16,
+ /// A list of all exports aliases of this Decl.
+ exports: std.ArrayListUnmanaged(u32) = .{},
+
+ fn getExport(m: DeclMetadata, elf_file: *const Elf, name: []const u8) ?u32 {
+ for (m.exports.items) |exp| {
+ if (mem.eql(u8, name, elf_file.getGlobalName(exp))) return exp;
+ }
+ return null;
+ }
+
+ fn getExportPtr(m: *DeclMetadata, elf_file: *Elf, name: []const u8) ?*u32 {
+ for (m.exports.items) |*exp| {
+ if (mem.eql(u8, name, elf_file.getGlobalName(exp.*))) return exp;
+ }
+ return null;
+ }
+};
+
base: File,
dwarf: ?Dwarf = null,
@@ -45,12 +94,12 @@ llvm_object: ?*LlvmObject = null,
/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
/// Same order as in the file.
-sections: std.ArrayListUnmanaged(elf.Elf64_Shdr) = std.ArrayListUnmanaged(elf.Elf64_Shdr){},
+sections: std.MultiArrayList(Section) = .{},
shdr_table_offset: ?u64 = null,
/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
/// Same order as in the file.
-program_headers: std.ArrayListUnmanaged(elf.Elf64_Phdr) = std.ArrayListUnmanaged(elf.Elf64_Phdr){},
+program_headers: std.ArrayListUnmanaged(elf.Elf64_Phdr) = .{},
phdr_table_offset: ?u64 = null,
/// The index into the program headers of a PT_LOAD program header with Read and Execute flags
phdr_load_re_index: ?u16 = null,
@@ -62,12 +111,10 @@ phdr_load_ro_index: ?u16 = null,
/// The index into the program headers of a PT_LOAD program header with Write flag
phdr_load_rw_index: ?u16 = null,
-phdr_shdr_table: std.AutoHashMapUnmanaged(u16, u16) = .{},
-
entry_addr: ?u64 = null,
page_size: u32,
-shstrtab: std.ArrayListUnmanaged(u8) = std.ArrayListUnmanaged(u8){},
+shstrtab: StringTable(.strtab) = .{},
shstrtab_index: ?u16 = null,
symtab_section_index: ?u16 = null,
@@ -110,39 +157,14 @@ debug_line_header_dirty: bool = false,
error_flags: File.ErrorFlags = File.ErrorFlags{},
-/// Pointer to the last allocated atom
-atoms: std.AutoHashMapUnmanaged(u16, *TextBlock) = .{},
-
-/// A list of text blocks that have surplus capacity. This list can have false
-/// positives, as functions grow and shrink over time, only sometimes being added
-/// or removed from the freelist.
-///
-/// A text block has surplus capacity when its overcapacity value is greater than
-/// padToIdeal(minimum_text_block_size). That is, when it has so
-/// much extra capacity, that we could fit a small new symbol in it, itself with
-/// ideal_capacity or more.
-///
-/// Ideal capacity is defined by size + (size / ideal_factor)
-///
-/// Overcapacity is measured by actual_capacity - ideal_capacity. Note that
-/// overcapacity can be negative. A simple way to have negative overcapacity is to
-/// allocate a fresh text block, which will have ideal capacity, and then grow it
-/// by 1 byte. It will then have -1 overcapacity.
-atom_free_lists: std.AutoHashMapUnmanaged(u16, std.ArrayListUnmanaged(*TextBlock)) = .{},
-
-/// Table of Decls that are currently alive.
-/// We store them here so that we can properly dispose of any allocated
-/// memory within the atom in the incremental linker.
-/// TODO consolidate this.
-decls: std.AutoHashMapUnmanaged(Module.Decl.Index, ?u16) = .{},
+/// Table of tracked Decls.
+decls: std.AutoHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{},
/// List of atoms that are owned directly by the linker.
-/// Currently these are only atoms that are the result of linking
-/// object files. Atoms which take part in incremental linking are
-/// at present owned by Module.Decl.
-/// TODO consolidate this.
-managed_atoms: std.ArrayListUnmanaged(*TextBlock) = .{},
-atom_by_index_table: std.AutoHashMapUnmanaged(u32, *TextBlock) = .{},
+atoms: std.ArrayListUnmanaged(Atom) = .{},
+
+/// Table of atoms indexed by the symbol index.
+atom_by_index_table: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{},
/// Table of unnamed constants associated with a parent `Decl`.
/// We store them here so that we can free the constants whenever the `Decl`
@@ -170,15 +192,8 @@ unnamed_const_atoms: UnnamedConstTable = .{},
/// this will be a table indexed by index into the list of Atoms.
relocs: RelocTable = .{},
-const Reloc = struct {
- target: u32,
- offset: u64,
- addend: u32,
- prev_vaddr: u64,
-};
-
-const RelocTable = std.AutoHashMapUnmanaged(*TextBlock, std.ArrayListUnmanaged(Reloc));
-const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(*TextBlock));
+const RelocTable = std.AutoHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Atom.Reloc));
+const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(Atom.Index));
/// When allocating, the ideal_capacity is calculated by
/// actual_capacity + (actual_capacity / ideal_factor)
@@ -187,67 +202,11 @@ const ideal_factor = 3;
/// In order for a slice of bytes to be considered eligible to keep metadata pointing at
/// it as a possible place to put new symbols, it must have enough room for this many bytes
/// (plus extra for reserved capacity).
-const minimum_text_block_size = 64;
-const min_text_capacity = padToIdeal(minimum_text_block_size);
+const minimum_atom_size = 64;
+pub const min_text_capacity = padToIdeal(minimum_atom_size);
pub const PtrWidth = enum { p32, p64 };
-pub const TextBlock = struct {
- /// Each decl always gets a local symbol with the fully qualified name.
- /// The vaddr and size are found here directly.
- /// The file offset is found by computing the vaddr offset from the section vaddr
- /// the symbol references, and adding that to the file offset of the section.
- /// If this field is 0, it means the codegen size = 0 and there is no symbol or
- /// offset table entry.
- local_sym_index: u32,
- /// This field is undefined for symbols with size = 0.
- offset_table_index: u32,
- /// Points to the previous and next neighbors, based on the `text_offset`.
- /// This can be used to find, for example, the capacity of this `TextBlock`.
- prev: ?*TextBlock,
- next: ?*TextBlock,
-
- dbg_info_atom: Dwarf.Atom,
-
- pub const empty = TextBlock{
- .local_sym_index = 0,
- .offset_table_index = undefined,
- .prev = null,
- .next = null,
- .dbg_info_atom = undefined,
- };
-
- /// Returns how much room there is to grow in virtual address space.
- /// File offset relocation happens transparently, so it is not included in
- /// this calculation.
- fn capacity(self: TextBlock, elf_file: Elf) u64 {
- const self_sym = elf_file.local_symbols.items[self.local_sym_index];
- if (self.next) |next| {
- const next_sym = elf_file.local_symbols.items[next.local_sym_index];
- return next_sym.st_value - self_sym.st_value;
- } else {
- // We are the last block. The capacity is limited only by virtual address space.
- return std.math.maxInt(u32) - self_sym.st_value;
- }
- }
-
- fn freeListEligible(self: TextBlock, elf_file: Elf) bool {
- // No need to keep a free list node for the last block.
- const next = self.next orelse return false;
- const self_sym = elf_file.local_symbols.items[self.local_sym_index];
- const next_sym = elf_file.local_symbols.items[next.local_sym_index];
- const cap = next_sym.st_value - self_sym.st_value;
- const ideal_cap = padToIdeal(self_sym.st_size);
- if (cap <= ideal_cap) return false;
- const surplus = cap - ideal_cap;
- return surplus >= min_text_capacity;
- }
-};
-
-pub const Export = struct {
- sym_index: ?u32 = null,
-};
-
pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Elf {
assert(options.target.ofmt == .elf);
@@ -279,16 +238,19 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
// There must always be a null section in index 0
try self.sections.append(allocator, .{
- .sh_name = 0,
- .sh_type = elf.SHT_NULL,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = 0,
- .sh_size = 0,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = 0,
- .sh_entsize = 0,
+ .shdr = .{
+ .sh_name = 0,
+ .sh_type = elf.SHT_NULL,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = 0,
+ .sh_size = 0,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = 0,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
});
try self.populateMissingMetadata();
@@ -335,74 +297,67 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Elf {
}
pub fn deinit(self: *Elf) void {
+ const gpa = self.base.allocator;
+
if (build_options.have_llvm) {
- if (self.llvm_object) |llvm_object| llvm_object.destroy(self.base.allocator);
- }
-
- self.sections.deinit(self.base.allocator);
- self.program_headers.deinit(self.base.allocator);
- self.shstrtab.deinit(self.base.allocator);
- self.local_symbols.deinit(self.base.allocator);
- self.global_symbols.deinit(self.base.allocator);
- self.global_symbol_free_list.deinit(self.base.allocator);
- self.local_symbol_free_list.deinit(self.base.allocator);
- self.offset_table_free_list.deinit(self.base.allocator);
- self.offset_table.deinit(self.base.allocator);
- self.phdr_shdr_table.deinit(self.base.allocator);
- self.decls.deinit(self.base.allocator);
-
- self.atoms.deinit(self.base.allocator);
+ if (self.llvm_object) |llvm_object| llvm_object.destroy(gpa);
+ }
+
+ for (self.sections.items(.free_list)) |*free_list| {
+ free_list.deinit(gpa);
+ }
+ self.sections.deinit(gpa);
+
+ self.program_headers.deinit(gpa);
+ self.shstrtab.deinit(gpa);
+ self.local_symbols.deinit(gpa);
+ self.global_symbols.deinit(gpa);
+ self.global_symbol_free_list.deinit(gpa);
+ self.local_symbol_free_list.deinit(gpa);
+ self.offset_table_free_list.deinit(gpa);
+ self.offset_table.deinit(gpa);
+
{
- var it = self.atom_free_lists.valueIterator();
- while (it.next()) |free_list| {
- free_list.deinit(self.base.allocator);
+ var it = self.decls.iterator();
+ while (it.next()) |entry| {
+ entry.value_ptr.exports.deinit(gpa);
}
- self.atom_free_lists.deinit(self.base.allocator);
+ self.decls.deinit(gpa);
}
- for (self.managed_atoms.items) |atom| {
- self.base.allocator.destroy(atom);
- }
- self.managed_atoms.deinit(self.base.allocator);
+ self.atoms.deinit(gpa);
+ self.atom_by_index_table.deinit(gpa);
{
var it = self.unnamed_const_atoms.valueIterator();
while (it.next()) |atoms| {
- atoms.deinit(self.base.allocator);
+ atoms.deinit(gpa);
}
- self.unnamed_const_atoms.deinit(self.base.allocator);
+ self.unnamed_const_atoms.deinit(gpa);
}
{
var it = self.relocs.valueIterator();
while (it.next()) |relocs| {
- relocs.deinit(self.base.allocator);
+ relocs.deinit(gpa);
}
- self.relocs.deinit(self.base.allocator);
+ self.relocs.deinit(gpa);
}
- self.atom_by_index_table.deinit(self.base.allocator);
-
if (self.dwarf) |*dw| {
dw.deinit();
}
}
pub fn getDeclVAddr(self: *Elf, decl_index: Module.Decl.Index, reloc_info: File.RelocInfo) !u64 {
- const mod = self.base.options.module.?;
- const decl = mod.declPtr(decl_index);
-
assert(self.llvm_object == null);
- assert(decl.link.elf.local_sym_index != 0);
- const target = decl.link.elf.local_sym_index;
- const vaddr = self.local_symbols.items[target].st_value;
- const atom = self.atom_by_index_table.get(reloc_info.parent_atom_index).?;
- const gop = try self.relocs.getOrPut(self.base.allocator, atom);
- if (!gop.found_existing) {
- gop.value_ptr.* = .{};
- }
- try gop.value_ptr.append(self.base.allocator, .{
+ const this_atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const this_atom = self.getAtom(this_atom_index);
+ const target = this_atom.getSymbolIndex().?;
+ const vaddr = this_atom.getSymbol(self).st_value;
+ const atom_index = self.getAtomIndexForSymbol(reloc_info.parent_atom_index).?;
+ try Atom.addRelocation(self, atom_index, .{
.target = target,
.offset = reloc_info.offset,
.addend = reloc_info.addend,
@@ -423,7 +378,7 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) ?u64 {
if (self.shdr_table_offset) |off| {
const shdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Shdr) else @sizeOf(elf.Elf64_Shdr);
- const tight_size = self.sections.items.len * shdr_size;
+ const tight_size = self.sections.slice().len * shdr_size;
const increased_size = padToIdeal(tight_size);
const test_end = off + increased_size;
if (end > off and start < test_end) {
@@ -433,7 +388,7 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) ?u64 {
if (self.phdr_table_offset) |off| {
const phdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Phdr) else @sizeOf(elf.Elf64_Phdr);
- const tight_size = self.sections.items.len * phdr_size;
+ const tight_size = self.sections.slice().len * phdr_size;
const increased_size = padToIdeal(tight_size);
const test_end = off + increased_size;
if (end > off and start < test_end) {
@@ -441,7 +396,7 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) ?u64 {
}
}
- for (self.sections.items) |section| {
+ for (self.sections.items(.shdr)) |section| {
const increased_size = padToIdeal(section.sh_size);
const test_end = section.sh_offset + increased_size;
if (end > section.sh_offset and start < test_end) {
@@ -468,7 +423,7 @@ pub fn allocatedSize(self: *Elf, start: u64) u64 {
if (self.phdr_table_offset) |off| {
if (off > start and off < min_pos) min_pos = off;
}
- for (self.sections.items) |section| {
+ for (self.sections.items(.shdr)) |section| {
if (section.sh_offset <= start) continue;
if (section.sh_offset < min_pos) min_pos = section.sh_offset;
}
@@ -487,31 +442,10 @@ pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u32) u64 {
return start;
}
-/// TODO Improve this to use a table.
-fn makeString(self: *Elf, bytes: []const u8) !u32 {
- try self.shstrtab.ensureUnusedCapacity(self.base.allocator, bytes.len + 1);
- const result = self.shstrtab.items.len;
- self.shstrtab.appendSliceAssumeCapacity(bytes);
- self.shstrtab.appendAssumeCapacity(0);
- return @intCast(u32, result);
-}
-
-fn getString(self: Elf, str_off: u32) []const u8 {
- assert(str_off < self.shstrtab.items.len);
- return mem.sliceTo(@ptrCast([*:0]const u8, self.shstrtab.items.ptr + str_off), 0);
-}
-
-fn updateString(self: *Elf, old_str_off: u32, new_name: []const u8) !u32 {
- const existing_name = self.getString(old_str_off);
- if (mem.eql(u8, existing_name, new_name)) {
- return old_str_off;
- }
- return self.makeString(new_name);
-}
-
pub fn populateMissingMetadata(self: *Elf) !void {
assert(self.llvm_object == null);
+ const gpa = self.base.allocator;
const small_ptr = switch (self.ptr_width) {
.p32 => true,
.p64 => false,
@@ -525,7 +459,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
const off = self.findFreeSpace(file_size, p_align);
log.debug("found PT_LOAD RE free space 0x{x} to 0x{x}", .{ off, off + file_size });
const entry_addr: u64 = self.entry_addr orelse if (self.base.options.target.cpu.arch == .spu_2) @as(u64, 0) else default_entry_addr;
- try self.program_headers.append(self.base.allocator, .{
+ try self.program_headers.append(gpa, .{
.p_type = elf.PT_LOAD,
.p_offset = off,
.p_filesz = file_size,
@@ -535,7 +469,6 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.p_align = p_align,
.p_flags = elf.PF_X | elf.PF_R,
});
- try self.atom_free_lists.putNoClobber(self.base.allocator, self.phdr_load_re_index.?, .{});
self.entry_addr = null;
self.phdr_table_dirty = true;
}
@@ -552,7 +485,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
// we'll need to re-use that function anyway, in case the GOT grows and overlaps something
// else in virtual memory.
const got_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0x4000000 else 0x8000;
- try self.program_headers.append(self.base.allocator, .{
+ try self.program_headers.append(gpa, .{
.p_type = elf.PT_LOAD,
.p_offset = off,
.p_filesz = file_size,
@@ -575,7 +508,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
log.debug("found PT_LOAD RO free space 0x{x} to 0x{x}", .{ off, off + file_size });
// TODO Same as for GOT
const rodata_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0xc000000 else 0xa000;
- try self.program_headers.append(self.base.allocator, .{
+ try self.program_headers.append(gpa, .{
.p_type = elf.PT_LOAD,
.p_offset = off,
.p_filesz = file_size,
@@ -585,7 +518,6 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.p_align = p_align,
.p_flags = elf.PF_R,
});
- try self.atom_free_lists.putNoClobber(self.base.allocator, self.phdr_load_ro_index.?, .{});
self.phdr_table_dirty = true;
}
@@ -599,7 +531,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
log.debug("found PT_LOAD RW free space 0x{x} to 0x{x}", .{ off, off + file_size });
// TODO Same as for GOT
const rwdata_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0x10000000 else 0xc000;
- try self.program_headers.append(self.base.allocator, .{
+ try self.program_headers.append(gpa, .{
.p_type = elf.PT_LOAD,
.p_offset = off,
.p_filesz = file_size,
@@ -609,148 +541,145 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.p_align = p_align,
.p_flags = elf.PF_R | elf.PF_W,
});
- try self.atom_free_lists.putNoClobber(self.base.allocator, self.phdr_load_rw_index.?, .{});
self.phdr_table_dirty = true;
}
if (self.shstrtab_index == null) {
- self.shstrtab_index = @intCast(u16, self.sections.items.len);
- assert(self.shstrtab.items.len == 0);
- try self.shstrtab.append(self.base.allocator, 0); // need a 0 at position 0
- const off = self.findFreeSpace(self.shstrtab.items.len, 1);
- log.debug("found shstrtab free space 0x{x} to 0x{x}", .{ off, off + self.shstrtab.items.len });
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".shstrtab"),
- .sh_type = elf.SHT_STRTAB,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = self.shstrtab.items.len,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = 1,
- .sh_entsize = 0,
+ self.shstrtab_index = @intCast(u16, self.sections.slice().len);
+ assert(self.shstrtab.buffer.items.len == 0);
+ try self.shstrtab.buffer.append(gpa, 0); // need a 0 at position 0
+ const off = self.findFreeSpace(self.shstrtab.buffer.items.len, 1);
+ log.debug("found shstrtab free space 0x{x} to 0x{x}", .{ off, off + self.shstrtab.buffer.items.len });
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".shstrtab"),
+ .sh_type = elf.SHT_STRTAB,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = self.shstrtab.buffer.items.len,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = 1,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
});
self.shstrtab_dirty = true;
self.shdr_table_dirty = true;
}
if (self.text_section_index == null) {
- self.text_section_index = @intCast(u16, self.sections.items.len);
+ self.text_section_index = @intCast(u16, self.sections.slice().len);
const phdr = &self.program_headers.items[self.phdr_load_re_index.?];
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".text"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
- .sh_addr = phdr.p_vaddr,
- .sh_offset = phdr.p_offset,
- .sh_size = phdr.p_filesz,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = 1,
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".text"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
+ .sh_addr = phdr.p_vaddr,
+ .sh_offset = phdr.p_offset,
+ .sh_size = phdr.p_filesz,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = 1,
+ .sh_entsize = 0,
+ },
+ .phdr_index = self.phdr_load_re_index.?,
});
- try self.phdr_shdr_table.putNoClobber(
- self.base.allocator,
- self.phdr_load_re_index.?,
- self.text_section_index.?,
- );
self.shdr_table_dirty = true;
}
if (self.got_section_index == null) {
- self.got_section_index = @intCast(u16, self.sections.items.len);
+ self.got_section_index = @intCast(u16, self.sections.slice().len);
const phdr = &self.program_headers.items[self.phdr_got_index.?];
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".got"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = elf.SHF_ALLOC,
- .sh_addr = phdr.p_vaddr,
- .sh_offset = phdr.p_offset,
- .sh_size = phdr.p_filesz,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = @as(u16, ptr_size),
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".got"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = elf.SHF_ALLOC,
+ .sh_addr = phdr.p_vaddr,
+ .sh_offset = phdr.p_offset,
+ .sh_size = phdr.p_filesz,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = @as(u16, ptr_size),
+ .sh_entsize = 0,
+ },
+ .phdr_index = self.phdr_got_index.?,
});
- try self.phdr_shdr_table.putNoClobber(
- self.base.allocator,
- self.phdr_got_index.?,
- self.got_section_index.?,
- );
self.shdr_table_dirty = true;
}
if (self.rodata_section_index == null) {
- self.rodata_section_index = @intCast(u16, self.sections.items.len);
+ self.rodata_section_index = @intCast(u16, self.sections.slice().len);
const phdr = &self.program_headers.items[self.phdr_load_ro_index.?];
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".rodata"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = elf.SHF_ALLOC,
- .sh_addr = phdr.p_vaddr,
- .sh_offset = phdr.p_offset,
- .sh_size = phdr.p_filesz,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = 1,
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".rodata"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = elf.SHF_ALLOC,
+ .sh_addr = phdr.p_vaddr,
+ .sh_offset = phdr.p_offset,
+ .sh_size = phdr.p_filesz,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = 1,
+ .sh_entsize = 0,
+ },
+ .phdr_index = self.phdr_load_ro_index.?,
});
- try self.phdr_shdr_table.putNoClobber(
- self.base.allocator,
- self.phdr_load_ro_index.?,
- self.rodata_section_index.?,
- );
self.shdr_table_dirty = true;
}
if (self.data_section_index == null) {
- self.data_section_index = @intCast(u16, self.sections.items.len);
+ self.data_section_index = @intCast(u16, self.sections.slice().len);
const phdr = &self.program_headers.items[self.phdr_load_rw_index.?];
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".data"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = elf.SHF_WRITE | elf.SHF_ALLOC,
- .sh_addr = phdr.p_vaddr,
- .sh_offset = phdr.p_offset,
- .sh_size = phdr.p_filesz,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = @as(u16, ptr_size),
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".data"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = elf.SHF_WRITE | elf.SHF_ALLOC,
+ .sh_addr = phdr.p_vaddr,
+ .sh_offset = phdr.p_offset,
+ .sh_size = phdr.p_filesz,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = @as(u16, ptr_size),
+ .sh_entsize = 0,
+ },
+ .phdr_index = self.phdr_load_rw_index.?,
});
- try self.phdr_shdr_table.putNoClobber(
- self.base.allocator,
- self.phdr_load_rw_index.?,
- self.data_section_index.?,
- );
self.shdr_table_dirty = true;
}
if (self.symtab_section_index == null) {
- self.symtab_section_index = @intCast(u16, self.sections.items.len);
+ self.symtab_section_index = @intCast(u16, self.sections.slice().len);
const min_align: u16 = if (small_ptr) @alignOf(elf.Elf32_Sym) else @alignOf(elf.Elf64_Sym);
const each_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Sym) else @sizeOf(elf.Elf64_Sym);
const file_size = self.base.options.symbol_count_hint * each_size;
const off = self.findFreeSpace(file_size, min_align);
log.debug("found symtab free space 0x{x} to 0x{x}", .{ off, off + file_size });
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".symtab"),
- .sh_type = elf.SHT_SYMTAB,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = file_size,
- // The section header index of the associated string table.
- .sh_link = self.shstrtab_index.?,
- .sh_info = @intCast(u32, self.local_symbols.items.len),
- .sh_addralign = min_align,
- .sh_entsize = each_size,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".symtab"),
+ .sh_type = elf.SHT_SYMTAB,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = file_size,
+ // The section header index of the associated string table.
+ .sh_link = self.shstrtab_index.?,
+ .sh_info = @intCast(u32, self.local_symbols.items.len),
+ .sh_addralign = min_align,
+ .sh_entsize = each_size,
+ },
+ .phdr_index = undefined,
});
self.shdr_table_dirty = true;
try self.writeSymbol(0);
@@ -758,27 +687,30 @@ pub fn populateMissingMetadata(self: *Elf) !void {
if (self.dwarf) |*dw| {
if (self.debug_str_section_index == null) {
- self.debug_str_section_index = @intCast(u16, self.sections.items.len);
- assert(dw.strtab.items.len == 0);
- try dw.strtab.append(self.base.allocator, 0);
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".debug_str"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = elf.SHF_MERGE | elf.SHF_STRINGS,
- .sh_addr = 0,
- .sh_offset = 0,
- .sh_size = 0,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = 1,
- .sh_entsize = 1,
+ self.debug_str_section_index = @intCast(u16, self.sections.slice().len);
+ assert(dw.strtab.buffer.items.len == 0);
+ try dw.strtab.buffer.append(gpa, 0);
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".debug_str"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = elf.SHF_MERGE | elf.SHF_STRINGS,
+ .sh_addr = 0,
+ .sh_offset = 0,
+ .sh_size = 0,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = 1,
+ .sh_entsize = 1,
+ },
+ .phdr_index = undefined,
});
self.debug_strtab_dirty = true;
self.shdr_table_dirty = true;
}
if (self.debug_info_section_index == null) {
- self.debug_info_section_index = @intCast(u16, self.sections.items.len);
+ self.debug_info_section_index = @intCast(u16, self.sections.slice().len);
const file_size_hint = 200;
const p_align = 1;
@@ -787,24 +719,27 @@ pub fn populateMissingMetadata(self: *Elf) !void {
off,
off + file_size_hint,
});
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".debug_info"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = file_size_hint,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = p_align,
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".debug_info"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = file_size_hint,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = p_align,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
});
self.shdr_table_dirty = true;
self.debug_info_header_dirty = true;
}
if (self.debug_abbrev_section_index == null) {
- self.debug_abbrev_section_index = @intCast(u16, self.sections.items.len);
+ self.debug_abbrev_section_index = @intCast(u16, self.sections.slice().len);
const file_size_hint = 128;
const p_align = 1;
@@ -813,24 +748,27 @@ pub fn populateMissingMetadata(self: *Elf) !void {
off,
off + file_size_hint,
});
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".debug_abbrev"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = file_size_hint,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = p_align,
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".debug_abbrev"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = file_size_hint,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = p_align,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
});
self.shdr_table_dirty = true;
self.debug_abbrev_section_dirty = true;
}
if (self.debug_aranges_section_index == null) {
- self.debug_aranges_section_index = @intCast(u16, self.sections.items.len);
+ self.debug_aranges_section_index = @intCast(u16, self.sections.slice().len);
const file_size_hint = 160;
const p_align = 16;
@@ -839,24 +777,27 @@ pub fn populateMissingMetadata(self: *Elf) !void {
off,
off + file_size_hint,
});
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".debug_aranges"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = file_size_hint,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = p_align,
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".debug_aranges"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = file_size_hint,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = p_align,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
});
self.shdr_table_dirty = true;
self.debug_aranges_section_dirty = true;
}
if (self.debug_line_section_index == null) {
- self.debug_line_section_index = @intCast(u16, self.sections.items.len);
+ self.debug_line_section_index = @intCast(u16, self.sections.slice().len);
const file_size_hint = 250;
const p_align = 1;
@@ -865,17 +806,20 @@ pub fn populateMissingMetadata(self: *Elf) !void {
off,
off + file_size_hint,
});
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".debug_line"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = file_size_hint,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = p_align,
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".debug_line"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = file_size_hint,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = p_align,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
});
self.shdr_table_dirty = true;
self.debug_line_header_dirty = true;
@@ -891,7 +835,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.p64 => @alignOf(elf.Elf64_Shdr),
};
if (self.shdr_table_offset == null) {
- self.shdr_table_offset = self.findFreeSpace(self.sections.items.len * shsize, shalign);
+ self.shdr_table_offset = self.findFreeSpace(self.sections.slice().len * shsize, shalign);
self.shdr_table_dirty = true;
}
@@ -922,7 +866,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
// offset + it's filesize.
var max_file_offset: u64 = 0;
- for (self.sections.items) |shdr| {
+ for (self.sections.items(.shdr)) |shdr| {
if (shdr.sh_offset + shdr.sh_size > max_file_offset) {
max_file_offset = shdr.sh_offset + shdr.sh_size;
}
@@ -932,24 +876,27 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
}
-fn growAllocSection(self: *Elf, shdr_index: u16, phdr_index: u16, needed_size: u64) !void {
+fn growAllocSection(self: *Elf, shdr_index: u16, needed_size: u64) !void {
// TODO Also detect virtual address collisions.
- const shdr = &self.sections.items[shdr_index];
+ const shdr = &self.sections.items(.shdr)[shdr_index];
+ const phdr_index = self.sections.items(.phdr_index)[shdr_index];
const phdr = &self.program_headers.items[phdr_index];
+ const maybe_last_atom_index = self.sections.items(.last_atom_index)[shdr_index];
if (needed_size > self.allocatedSize(shdr.sh_offset)) {
// Must move the entire section.
const new_offset = self.findFreeSpace(needed_size, self.page_size);
- const existing_size = if (self.atoms.get(phdr_index)) |last| blk: {
- const sym = self.local_symbols.items[last.local_sym_index];
+ const existing_size = if (maybe_last_atom_index) |last_atom_index| blk: {
+ const last = self.getAtom(last_atom_index);
+ const sym = last.getSymbol(self);
break :blk (sym.st_value + sym.st_size) - phdr.p_vaddr;
} else if (shdr_index == self.got_section_index.?) blk: {
break :blk shdr.sh_size;
} else 0;
shdr.sh_size = 0;
- log.debug("new '{s}' file offset 0x{x} to 0x{x}", .{
- self.getString(shdr.sh_name),
+ log.debug("new '{?s}' file offset 0x{x} to 0x{x}", .{
+ self.shstrtab.get(shdr.sh_name),
new_offset,
new_offset + existing_size,
});
@@ -975,7 +922,7 @@ pub fn growNonAllocSection(
min_alignment: u32,
requires_file_copy: bool,
) !void {
- const shdr = &self.sections.items[shdr_index];
+ const shdr = &self.sections.items(.shdr)[shdr_index];
if (needed_size > self.allocatedSize(shdr.sh_offset)) {
const existing_size = if (self.symtab_section_index.? == shdr_index) blk: {
@@ -988,7 +935,7 @@ pub fn growNonAllocSection(
shdr.sh_size = 0;
// Move all the symbols to a new file location.
const new_offset = self.findFreeSpace(needed_size, min_alignment);
- log.debug("moving '{s}' from 0x{x} to 0x{x}", .{ self.getString(shdr.sh_name), shdr.sh_offset, new_offset });
+ log.debug("moving '{?s}' from 0x{x} to 0x{x}", .{ self.shstrtab.get(shdr.sh_name), shdr.sh_offset, new_offset });
if (requires_file_copy) {
const amt = try self.base.file.?.copyRangeAll(
@@ -1059,6 +1006,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
}
}
+ const gpa = self.base.allocator;
var sub_prog_node = prog_node.start("ELF Flush", 0);
sub_prog_node.activate();
defer sub_prog_node.end();
@@ -1077,12 +1025,13 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
{
var it = self.relocs.iterator();
while (it.next()) |entry| {
- const atom = entry.key_ptr.*;
+ const atom_index = entry.key_ptr.*;
const relocs = entry.value_ptr.*;
- const source_sym = self.local_symbols.items[atom.local_sym_index];
- const source_shdr = self.sections.items[source_sym.st_shndx];
+ const atom = self.getAtom(atom_index);
+ const source_sym = atom.getSymbol(self);
+ const source_shdr = self.sections.items(.shdr)[source_sym.st_shndx];
- log.debug("relocating '{s}'", .{self.getString(source_sym.st_name)});
+ log.debug("relocating '{?s}'", .{self.shstrtab.get(source_sym.st_name)});
for (relocs.items) |*reloc| {
const target_sym = self.local_symbols.items[reloc.target];
@@ -1093,10 +1042,10 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
const section_offset = (source_sym.st_value + reloc.offset) - source_shdr.sh_addr;
const file_offset = source_shdr.sh_offset + section_offset;
- log.debug(" ({x}: [() => 0x{x}] ({s}))", .{
+ log.debug(" ({x}: [() => 0x{x}] ({?s}))", .{
reloc.offset,
target_vaddr,
- self.getString(target_sym.st_name),
+ self.shstrtab.get(target_sym.st_name),
});
switch (self.ptr_width) {
@@ -1174,8 +1123,8 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
switch (self.ptr_width) {
.p32 => {
- const buf = try self.base.allocator.alloc(elf.Elf32_Phdr, self.program_headers.items.len);
- defer self.base.allocator.free(buf);
+ const buf = try gpa.alloc(elf.Elf32_Phdr, self.program_headers.items.len);
+ defer gpa.free(buf);
for (buf) |*phdr, i| {
phdr.* = progHeaderTo32(self.program_headers.items[i]);
@@ -1186,8 +1135,8 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.phdr_table_offset.?);
},
.p64 => {
- const buf = try self.base.allocator.alloc(elf.Elf64_Phdr, self.program_headers.items.len);
- defer self.base.allocator.free(buf);
+ const buf = try gpa.alloc(elf.Elf64_Phdr, self.program_headers.items.len);
+ defer gpa.free(buf);
for (buf) |*phdr, i| {
phdr.* = self.program_headers.items[i];
@@ -1203,20 +1152,20 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
{
const shdr_index = self.shstrtab_index.?;
- if (self.shstrtab_dirty or self.shstrtab.items.len != self.sections.items[shdr_index].sh_size) {
- try self.growNonAllocSection(shdr_index, self.shstrtab.items.len, 1, false);
- const shstrtab_sect = self.sections.items[shdr_index];
- try self.base.file.?.pwriteAll(self.shstrtab.items, shstrtab_sect.sh_offset);
+ if (self.shstrtab_dirty or self.shstrtab.buffer.items.len != self.sections.items(.shdr)[shdr_index].sh_size) {
+ try self.growNonAllocSection(shdr_index, self.shstrtab.buffer.items.len, 1, false);
+ const shstrtab_sect = self.sections.items(.shdr)[shdr_index];
+ try self.base.file.?.pwriteAll(self.shstrtab.buffer.items, shstrtab_sect.sh_offset);
self.shstrtab_dirty = false;
}
}
if (self.dwarf) |dwarf| {
const shdr_index = self.debug_str_section_index.?;
- if (self.debug_strtab_dirty or dwarf.strtab.items.len != self.sections.items[shdr_index].sh_size) {
- try self.growNonAllocSection(shdr_index, dwarf.strtab.items.len, 1, false);
- const debug_strtab_sect = self.sections.items[shdr_index];
- try self.base.file.?.pwriteAll(dwarf.strtab.items, debug_strtab_sect.sh_offset);
+ if (self.debug_strtab_dirty or dwarf.strtab.buffer.items.len != self.sections.items(.shdr)[shdr_index].sh_size) {
+ try self.growNonAllocSection(shdr_index, dwarf.strtab.buffer.items.len, 1, false);
+ const debug_strtab_sect = self.sections.items(.shdr)[shdr_index];
+ try self.base.file.?.pwriteAll(dwarf.strtab.buffer.items, debug_strtab_sect.sh_offset);
self.debug_strtab_dirty = false;
}
}
@@ -1231,7 +1180,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
.p64 => @alignOf(elf.Elf64_Shdr),
};
const allocated_size = self.allocatedSize(self.shdr_table_offset.?);
- const needed_size = self.sections.items.len * shsize;
+ const needed_size = self.sections.slice().len * shsize;
if (needed_size > allocated_size) {
self.shdr_table_offset = null; // free the space
@@ -1240,12 +1189,13 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
switch (self.ptr_width) {
.p32 => {
- const buf = try self.base.allocator.alloc(elf.Elf32_Shdr, self.sections.items.len);
- defer self.base.allocator.free(buf);
+ const slice = self.sections.slice();
+ const buf = try gpa.alloc(elf.Elf32_Shdr, slice.len);
+ defer gpa.free(buf);
for (buf) |*shdr, i| {
- shdr.* = sectHeaderTo32(self.sections.items[i]);
- log.debug("writing section {s}: {}", .{ self.getString(shdr.sh_name), shdr.* });
+ shdr.* = sectHeaderTo32(slice.items(.shdr)[i]);
+ log.debug("writing section {?s}: {}", .{ self.shstrtab.get(shdr.sh_name), shdr.* });
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf32_Shdr, shdr);
}
@@ -1253,12 +1203,13 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
},
.p64 => {
- const buf = try self.base.allocator.alloc(elf.Elf64_Shdr, self.sections.items.len);
- defer self.base.allocator.free(buf);
+ const slice = self.sections.slice();
+ const buf = try gpa.alloc(elf.Elf64_Shdr, slice.len);
+ defer gpa.free(buf);
for (buf) |*shdr, i| {
- shdr.* = self.sections.items[i];
- log.debug("writing section {s}: {}", .{ self.getString(shdr.sh_name), shdr.* });
+ shdr.* = slice.items(.shdr)[i];
+ log.debug("writing section {?s}: {}", .{ self.shstrtab.get(shdr.sh_name), shdr.* });
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf64_Shdr, shdr);
}
@@ -2069,7 +2020,7 @@ fn writeElfHeader(self: *Elf) !void {
mem.writeInt(u16, hdr_buf[index..][0..2], e_shentsize, endian);
index += 2;
- const e_shnum = @intCast(u16, self.sections.items.len);
+ const e_shnum = @intCast(u16, self.sections.slice().len);
mem.writeInt(u16, hdr_buf[index..][0..2], e_shnum, endian);
index += 2;
@@ -2081,113 +2032,145 @@ fn writeElfHeader(self: *Elf) !void {
try self.base.file.?.pwriteAll(hdr_buf[0..index], 0);
}
-fn freeTextBlock(self: *Elf, text_block: *TextBlock, phdr_index: u16) void {
- const local_sym = self.local_symbols.items[text_block.local_sym_index];
- const name_str_index = local_sym.st_name;
- const name = self.getString(name_str_index);
- log.debug("freeTextBlock {*} ({s})", .{ text_block, name });
+fn freeAtom(self: *Elf, atom_index: Atom.Index) void {
+ const atom = self.getAtom(atom_index);
+ log.debug("freeAtom {d} ({s})", .{ atom_index, atom.getName(self) });
- const free_list = self.atom_free_lists.getPtr(phdr_index).?;
+ Atom.freeRelocations(self, atom_index);
+
+ const gpa = self.base.allocator;
+ const shndx = atom.getSymbol(self).st_shndx;
+ const free_list = &self.sections.items(.free_list)[shndx];
var already_have_free_list_node = false;
{
var i: usize = 0;
// TODO turn free_list into a hash map
while (i < free_list.items.len) {
- if (free_list.items[i] == text_block) {
+ if (free_list.items[i] == atom_index) {
_ = free_list.swapRemove(i);
continue;
}
- if (free_list.items[i] == text_block.prev) {
+ if (free_list.items[i] == atom.prev_index) {
already_have_free_list_node = true;
}
i += 1;
}
}
- if (self.atoms.getPtr(phdr_index)) |last_block| {
- if (last_block.* == text_block) {
- if (text_block.prev) |prev| {
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[shndx];
+ if (maybe_last_atom_index.*) |last_atom_index| {
+ if (last_atom_index == atom_index) {
+ if (atom.prev_index) |prev_index| {
// TODO shrink the section size here
- last_block.* = prev;
+ maybe_last_atom_index.* = prev_index;
} else {
- _ = self.atoms.fetchRemove(phdr_index);
+ maybe_last_atom_index.* = null;
}
}
}
- if (text_block.prev) |prev| {
- prev.next = text_block.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
- if (!already_have_free_list_node and prev.freeListEligible(self.*)) {
+ if (!already_have_free_list_node and prev.*.freeListEligible(self)) {
// The free list is heuristics, it doesn't have to be perfect, so we can
// ignore the OOM here.
- free_list.append(self.base.allocator, prev) catch {};
+ free_list.append(gpa, prev_index) catch {};
}
} else {
- text_block.prev = null;
+ self.getAtomPtr(atom_index).prev_index = null;
}
- if (text_block.next) |next| {
- next.prev = text_block.prev;
+ if (atom.next_index) |next_index| {
+ self.getAtomPtr(next_index).prev_index = atom.prev_index;
} else {
- text_block.next = null;
+ self.getAtomPtr(atom_index).next_index = null;
}
- if (self.dwarf) |*dw| {
- dw.freeAtom(&text_block.dbg_info_atom);
- }
+ // Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
+ const local_sym_index = atom.getSymbolIndex().?;
+
+ self.local_symbol_free_list.append(gpa, local_sym_index) catch {};
+ self.local_symbols.items[local_sym_index].st_info = 0;
+ self.local_symbols.items[local_sym_index].st_shndx = 0;
+ _ = self.atom_by_index_table.remove(local_sym_index);
+ self.getAtomPtr(atom_index).local_sym_index = 0;
+
+ self.offset_table_free_list.append(self.base.allocator, atom.offset_table_index) catch {};
}
-fn shrinkTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, phdr_index: u16) void {
+fn shrinkAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64) void {
_ = self;
- _ = text_block;
+ _ = atom_index;
_ = new_block_size;
- _ = phdr_index;
}
-fn growTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, alignment: u64, phdr_index: u16) !u64 {
- const sym = self.local_symbols.items[text_block.local_sym_index];
+fn growAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignment: u64) !u64 {
+ const atom = self.getAtom(atom_index);
+ const sym = atom.getSymbol(self);
const align_ok = mem.alignBackwardGeneric(u64, sym.st_value, alignment) == sym.st_value;
- const need_realloc = !align_ok or new_block_size > text_block.capacity(self.*);
+ const need_realloc = !align_ok or new_block_size > atom.capacity(self);
if (!need_realloc) return sym.st_value;
- return self.allocateTextBlock(text_block, new_block_size, alignment, phdr_index);
+ return self.allocateAtom(atom_index, new_block_size, alignment);
}
-fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, alignment: u64, phdr_index: u16) !u64 {
- const shdr_index = self.phdr_shdr_table.get(phdr_index).?;
+pub fn createAtom(self: *Elf) !Atom.Index {
+ const gpa = self.base.allocator;
+ const atom_index = @intCast(Atom.Index, self.atoms.items.len);
+ const atom = try self.atoms.addOne(gpa);
+ const local_sym_index = try self.allocateLocalSymbol();
+ const offset_table_index = try self.allocateGotOffset();
+ try self.atom_by_index_table.putNoClobber(gpa, local_sym_index, atom_index);
+ atom.* = .{
+ .local_sym_index = local_sym_index,
+ .offset_table_index = offset_table_index,
+ .prev_index = null,
+ .next_index = null,
+ };
+ log.debug("creating ATOM(%{d}) at index {d}", .{ local_sym_index, atom_index });
+ return atom_index;
+}
+
+fn allocateAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignment: u64) !u64 {
+ const atom = self.getAtom(atom_index);
+ const sym = atom.getSymbol(self);
+ const phdr_index = self.sections.items(.phdr_index)[sym.st_shndx];
const phdr = &self.program_headers.items[phdr_index];
- const shdr = &self.sections.items[shdr_index];
- const new_block_ideal_capacity = padToIdeal(new_block_size);
+ const shdr = &self.sections.items(.shdr)[sym.st_shndx];
+ const free_list = &self.sections.items(.free_list)[sym.st_shndx];
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sym.st_shndx];
+ const new_atom_ideal_capacity = padToIdeal(new_block_size);
- // We use these to indicate our intention to update metadata, placing the new block,
+ // We use these to indicate our intention to update metadata, placing the new atom,
// and possibly removing a free list node.
// It would be simpler to do it inside the for loop below, but that would cause a
// problem if an error was returned later in the function. So this action
// is actually carried out at the end of the function, when errors are no longer possible.
- var block_placement: ?*TextBlock = null;
+ var atom_placement: ?Atom.Index = null;
var free_list_removal: ?usize = null;
- var free_list = self.atom_free_lists.get(phdr_index).?;
// First we look for an appropriately sized free list node.
// The list is unordered. We'll just take the first thing that works.
const vaddr = blk: {
var i: usize = 0;
while (i < free_list.items.len) {
- const big_block = free_list.items[i];
- // We now have a pointer to a live text block that has too much capacity.
- // Is it enough that we could fit this new text block?
- const sym = self.local_symbols.items[big_block.local_sym_index];
- const capacity = big_block.capacity(self.*);
+ const big_atom_index = free_list.items[i];
+ const big_atom = self.getAtom(big_atom_index);
+ // We now have a pointer to a live atom that has too much capacity.
+ // Is it enough that we could fit this new atom?
+ const big_atom_sym = big_atom.getSymbol(self);
+ const capacity = big_atom.capacity(self);
const ideal_capacity = padToIdeal(capacity);
- const ideal_capacity_end_vaddr = std.math.add(u64, sym.st_value, ideal_capacity) catch ideal_capacity;
- const capacity_end_vaddr = sym.st_value + capacity;
- const new_start_vaddr_unaligned = capacity_end_vaddr - new_block_ideal_capacity;
+ const ideal_capacity_end_vaddr = std.math.add(u64, big_atom_sym.st_value, ideal_capacity) catch ideal_capacity;
+ const capacity_end_vaddr = big_atom_sym.st_value + capacity;
+ const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
const new_start_vaddr = mem.alignBackwardGeneric(u64, new_start_vaddr_unaligned, alignment);
if (new_start_vaddr < ideal_capacity_end_vaddr) {
// Additional bookkeeping here to notice if this free list node
// should be deleted because the block that it points to has grown to take up
// more of the extra capacity.
- if (!big_block.freeListEligible(self.*)) {
+ if (!big_atom.freeListEligible(self)) {
_ = free_list.swapRemove(i);
} else {
i += 1;
@@ -2201,29 +2184,33 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
const keep_free_list_node = remaining_capacity >= min_text_capacity;
// Set up the metadata to be updated, after errors are no longer possible.
- block_placement = big_block;
+ atom_placement = big_atom_index;
if (!keep_free_list_node) {
free_list_removal = i;
}
break :blk new_start_vaddr;
- } else if (self.atoms.get(phdr_index)) |last| {
- const sym = self.local_symbols.items[last.local_sym_index];
- const ideal_capacity = padToIdeal(sym.st_size);
- const ideal_capacity_end_vaddr = sym.st_value + ideal_capacity;
+ } else if (maybe_last_atom_index.*) |last_index| {
+ const last = self.getAtom(last_index);
+ const last_sym = last.getSymbol(self);
+ const ideal_capacity = padToIdeal(last_sym.st_size);
+ const ideal_capacity_end_vaddr = last_sym.st_value + ideal_capacity;
const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment);
// Set up the metadata to be updated, after errors are no longer possible.
- block_placement = last;
+ atom_placement = last_index;
break :blk new_start_vaddr;
} else {
break :blk phdr.p_vaddr;
}
};
- const expand_text_section = block_placement == null or block_placement.?.next == null;
- if (expand_text_section) {
+ const expand_section = if (atom_placement) |placement_index|
+ self.getAtom(placement_index).next_index == null
+ else
+ true;
+ if (expand_section) {
const needed_size = (vaddr + new_block_size) - phdr.p_vaddr;
- try self.growAllocSection(shdr_index, phdr_index, needed_size);
- _ = try self.atoms.put(self.base.allocator, phdr_index, text_block);
+ try self.growAllocSection(sym.st_shndx, needed_size);
+ maybe_last_atom_index.* = atom_index;
if (self.dwarf) |_| {
// The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
@@ -2238,23 +2225,28 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
}
shdr.sh_addralign = math.max(shdr.sh_addralign, alignment);
- // This function can also reallocate a text block.
+ // This function can also reallocate an atom.
// In this case we need to "unplug" it from its previous location before
// plugging it in to its new location.
- if (text_block.prev) |prev| {
- prev.next = text_block.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
}
- if (text_block.next) |next| {
- next.prev = text_block.prev;
+ if (atom.next_index) |next_index| {
+ const next = self.getAtomPtr(next_index);
+ next.prev_index = atom.prev_index;
}
- if (block_placement) |big_block| {
- text_block.prev = big_block;
- text_block.next = big_block.next;
- big_block.next = text_block;
+ if (atom_placement) |big_atom_index| {
+ const big_atom = self.getAtomPtr(big_atom_index);
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = big_atom_index;
+ atom_ptr.next_index = big_atom.next_index;
+ big_atom.next_index = atom_index;
} else {
- text_block.prev = null;
- text_block.next = null;
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = null;
+ atom_ptr.next_index = null;
}
if (free_list_removal) |i| {
_ = free_list.swapRemove(i);
@@ -2262,7 +2254,7 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
return vaddr;
}
-fn allocateLocalSymbol(self: *Elf) !u32 {
+pub fn allocateLocalSymbol(self: *Elf) !u32 {
try self.local_symbols.ensureUnusedCapacity(self.base.allocator, 1);
const index = blk: {
@@ -2289,40 +2281,30 @@ fn allocateLocalSymbol(self: *Elf) !u32 {
return index;
}
-pub fn allocateDeclIndexes(self: *Elf, decl_index: Module.Decl.Index) !void {
- if (self.llvm_object) |_| return;
-
- const mod = self.base.options.module.?;
- const decl = mod.declPtr(decl_index);
- if (decl.link.elf.local_sym_index != 0) return;
-
+pub fn allocateGotOffset(self: *Elf) !u32 {
try self.offset_table.ensureUnusedCapacity(self.base.allocator, 1);
- try self.decls.putNoClobber(self.base.allocator, decl_index, null);
- const decl_name = try decl.getFullyQualifiedName(mod);
- defer self.base.allocator.free(decl_name);
-
- log.debug("allocating symbol indexes for {s}", .{decl_name});
- decl.link.elf.local_sym_index = try self.allocateLocalSymbol();
- try self.atom_by_index_table.putNoClobber(self.base.allocator, decl.link.elf.local_sym_index, &decl.link.elf);
+ const index = blk: {
+ if (self.offset_table_free_list.popOrNull()) |index| {
+ log.debug(" (reusing GOT offset at index {d})", .{index});
+ break :blk index;
+ } else {
+ log.debug(" (allocating GOT offset at index {d})", .{self.offset_table.items.len});
+ const index = @intCast(u32, self.offset_table.items.len);
+ _ = self.offset_table.addOneAssumeCapacity();
+ self.offset_table_count_dirty = true;
+ break :blk index;
+ }
+ };
- if (self.offset_table_free_list.popOrNull()) |i| {
- decl.link.elf.offset_table_index = i;
- } else {
- decl.link.elf.offset_table_index = @intCast(u32, self.offset_table.items.len);
- _ = self.offset_table.addOneAssumeCapacity();
- self.offset_table_count_dirty = true;
- }
- self.offset_table.items[decl.link.elf.offset_table_index] = 0;
+ self.offset_table.items[index] = 0;
+ return index;
}
fn freeUnnamedConsts(self: *Elf, decl_index: Module.Decl.Index) void {
const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return;
for (unnamed_consts.items) |atom| {
- self.freeTextBlock(atom, self.phdr_load_ro_index.?);
- self.local_symbol_free_list.append(self.base.allocator, atom.local_sym_index) catch {};
- self.local_symbols.items[atom.local_sym_index].st_info = 0;
- _ = self.atom_by_index_table.remove(atom.local_sym_index);
+ self.freeAtom(atom);
}
unnamed_consts.clearAndFree(self.base.allocator);
}
@@ -2335,52 +2317,59 @@ pub fn freeDecl(self: *Elf, decl_index: Module.Decl.Index) void {
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
- const kv = self.decls.fetchRemove(decl_index);
- if (kv.?.value) |index| {
- self.freeTextBlock(&decl.link.elf, index);
+ log.debug("freeDecl {*}", .{decl});
+
+ if (self.decls.fetchRemove(decl_index)) |const_kv| {
+ var kv = const_kv;
+ self.freeAtom(kv.value.atom);
self.freeUnnamedConsts(decl_index);
+ kv.value.exports.deinit(self.base.allocator);
}
- // Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
- if (decl.link.elf.local_sym_index != 0) {
- self.local_symbol_free_list.append(self.base.allocator, decl.link.elf.local_sym_index) catch {};
- self.local_symbols.items[decl.link.elf.local_sym_index].st_info = 0;
- _ = self.atom_by_index_table.remove(decl.link.elf.local_sym_index);
- decl.link.elf.local_sym_index = 0;
-
- self.offset_table_free_list.append(self.base.allocator, decl.link.elf.offset_table_index) catch {};
+ if (self.dwarf) |*dw| {
+ dw.freeDecl(decl_index);
}
+}
- if (self.dwarf) |*dw| {
- dw.freeDecl(decl);
+pub fn getOrCreateAtomForDecl(self: *Elf, decl_index: Module.Decl.Index) !Atom.Index {
+ const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{
+ .atom = try self.createAtom(),
+ .shdr = self.getDeclShdrIndex(decl_index),
+ .exports = .{},
+ };
}
+ return gop.value_ptr.atom;
}
-fn getDeclPhdrIndex(self: *Elf, decl: *Module.Decl) !u16 {
+fn getDeclShdrIndex(self: *Elf, decl_index: Module.Decl.Index) u16 {
+ const decl = self.base.options.module.?.declPtr(decl_index);
const ty = decl.ty;
const zig_ty = ty.zigTypeTag();
const val = decl.val;
- const phdr_index: u16 = blk: {
+ const shdr_index: u16 = blk: {
if (val.isUndefDeep()) {
// TODO in release-fast and release-small, we should put undef in .bss
- break :blk self.phdr_load_rw_index.?;
+ break :blk self.data_section_index.?;
}
switch (zig_ty) {
// TODO: what if this is a function pointer?
- .Fn => break :blk self.phdr_load_re_index.?,
+ .Fn => break :blk self.text_section_index.?,
else => {
if (val.castTag(.variable)) |_| {
- break :blk self.phdr_load_rw_index.?;
+ break :blk self.data_section_index.?;
}
- break :blk self.phdr_load_ro_index.?;
+ break :blk self.rodata_section_index.?;
},
}
};
- return phdr_index;
+ return shdr_index;
}
fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, stt_bits: u8) !*elf.Elf64_Sym {
+ const gpa = self.base.allocator;
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
@@ -2390,61 +2379,65 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s
log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
const required_alignment = decl.getAlignment(self.base.options.target);
- const decl_ptr = self.decls.getPtr(decl_index).?;
- if (decl_ptr.* == null) {
- decl_ptr.* = try self.getDeclPhdrIndex(decl);
- }
- const phdr_index = decl_ptr.*.?;
- const shdr_index = self.phdr_shdr_table.get(phdr_index).?;
+ const decl_metadata = self.decls.get(decl_index).?;
+ const atom_index = decl_metadata.atom;
+ const atom = self.getAtom(atom_index);
- assert(decl.link.elf.local_sym_index != 0); // Caller forgot to allocateDeclIndexes()
- const local_sym = &self.local_symbols.items[decl.link.elf.local_sym_index];
- if (local_sym.st_size != 0) {
- const capacity = decl.link.elf.capacity(self.*);
+ const shdr_index = decl_metadata.shdr;
+ if (atom.getSymbol(self).st_size != 0) {
+ const local_sym = atom.getSymbolPtr(self);
+ local_sym.st_name = try self.shstrtab.insert(gpa, decl_name);
+ local_sym.st_info = (elf.STB_LOCAL << 4) | stt_bits;
+ local_sym.st_other = 0;
+ local_sym.st_shndx = shdr_index;
+
+ const capacity = atom.capacity(self);
const need_realloc = code.len > capacity or
!mem.isAlignedGeneric(u64, local_sym.st_value, required_alignment);
+
if (need_realloc) {
- const vaddr = try self.growTextBlock(&decl.link.elf, code.len, required_alignment, phdr_index);
+ const vaddr = try self.growAtom(atom_index, code.len, required_alignment);
log.debug("growing {s} from 0x{x} to 0x{x}", .{ decl_name, local_sym.st_value, vaddr });
if (vaddr != local_sym.st_value) {
local_sym.st_value = vaddr;
log.debug(" (writing new offset table entry)", .{});
- self.offset_table.items[decl.link.elf.offset_table_index] = vaddr;
- try self.writeOffsetTableEntry(decl.link.elf.offset_table_index);
+ self.offset_table.items[atom.offset_table_index] = vaddr;
+ try self.writeOffsetTableEntry(atom.offset_table_index);
}
} else if (code.len < local_sym.st_size) {
- self.shrinkTextBlock(&decl.link.elf, code.len, phdr_index);
+ self.shrinkAtom(atom_index, code.len);
}
local_sym.st_size = code.len;
- local_sym.st_name = try self.updateString(local_sym.st_name, decl_name);
- local_sym.st_info = (elf.STB_LOCAL << 4) | stt_bits;
- local_sym.st_other = 0;
- local_sym.st_shndx = shdr_index;
+
// TODO this write could be avoided if no fields of the symbol were changed.
- try self.writeSymbol(decl.link.elf.local_sym_index);
+ try self.writeSymbol(atom.getSymbolIndex().?);
} else {
- const name_str_index = try self.makeString(decl_name);
- const vaddr = try self.allocateTextBlock(&decl.link.elf, code.len, required_alignment, phdr_index);
- errdefer self.freeTextBlock(&decl.link.elf, phdr_index);
- log.debug("allocated text block for {s} at 0x{x}", .{ decl_name, vaddr });
-
+ const local_sym = atom.getSymbolPtr(self);
local_sym.* = .{
- .st_name = name_str_index,
+ .st_name = try self.shstrtab.insert(gpa, decl_name),
.st_info = (elf.STB_LOCAL << 4) | stt_bits,
.st_other = 0,
.st_shndx = shdr_index,
- .st_value = vaddr,
- .st_size = code.len,
+ .st_value = 0,
+ .st_size = 0,
};
- self.offset_table.items[decl.link.elf.offset_table_index] = vaddr;
+ const vaddr = try self.allocateAtom(atom_index, code.len, required_alignment);
+ errdefer self.freeAtom(atom_index);
+ log.debug("allocated text block for {s} at 0x{x}", .{ decl_name, vaddr });
+
+ self.offset_table.items[atom.offset_table_index] = vaddr;
+ local_sym.st_value = vaddr;
+ local_sym.st_size = code.len;
- try self.writeSymbol(decl.link.elf.local_sym_index);
- try self.writeOffsetTableEntry(decl.link.elf.offset_table_index);
+ try self.writeSymbol(atom.getSymbolIndex().?);
+ try self.writeOffsetTableEntry(atom.offset_table_index);
}
+ const local_sym = atom.getSymbolPtr(self);
+ const phdr_index = self.sections.items(.phdr_index)[shdr_index];
const section_offset = local_sym.st_value - self.program_headers.items[phdr_index].p_vaddr;
- const file_offset = self.sections.items[shdr_index].sh_offset + section_offset;
+ const file_offset = self.sections.items(.shdr)[shdr_index].sh_offset + section_offset;
try self.base.file.?.pwriteAll(code, file_offset);
return local_sym;
@@ -2461,12 +2454,15 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
const tracy = trace(@src());
defer tracy.end();
- var code_buffer = std.ArrayList(u8).init(self.base.allocator);
- defer code_buffer.deinit();
-
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
+
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
self.freeUnnamedConsts(decl_index);
+ Atom.freeRelocations(self, atom_index);
+
+ var code_buffer = std.ArrayList(u8).init(self.base.allocator);
+ defer code_buffer.deinit();
var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl_index) else null;
defer if (decl_state) |*ds| ds.deinit();
@@ -2479,7 +2475,7 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none);
const code = switch (res) {
- .appended => code_buffer.items,
+ .ok => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em);
@@ -2525,7 +2521,9 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v
}
}
- assert(!self.unnamed_const_atoms.contains(decl_index));
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ Atom.freeRelocations(self, atom_index);
+ const atom = self.getAtom(atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -2542,19 +2540,18 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v
}, &code_buffer, .{
.dwarf = ds,
}, .{
- .parent_atom_index = decl.link.elf.local_sym_index,
+ .parent_atom_index = atom.getSymbolIndex().?,
})
else
try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .none, .{
- .parent_atom_index = decl.link.elf.local_sym_index,
+ .parent_atom_index = atom.getSymbolIndex().?,
});
const code = switch (res) {
- .externally_managed => |x| x,
- .appended => code_buffer.items,
+ .ok => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em);
@@ -2579,47 +2576,38 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v
}
pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module.Decl.Index) !u32 {
- var code_buffer = std.ArrayList(u8).init(self.base.allocator);
+ const gpa = self.base.allocator;
+
+ var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
const mod = self.base.options.module.?;
- const decl = mod.declPtr(decl_index);
-
- const gop = try self.unnamed_const_atoms.getOrPut(self.base.allocator, decl_index);
+ const gop = try self.unnamed_const_atoms.getOrPut(gpa, decl_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
const unnamed_consts = gop.value_ptr;
- const atom = try self.base.allocator.create(TextBlock);
- errdefer self.base.allocator.destroy(atom);
- atom.* = TextBlock.empty;
- try self.managed_atoms.append(self.base.allocator, atom);
-
+ const decl = mod.declPtr(decl_index);
const name_str_index = blk: {
const decl_name = try decl.getFullyQualifiedName(mod);
- defer self.base.allocator.free(decl_name);
-
+ defer gpa.free(decl_name);
const index = unnamed_consts.items.len;
- const name = try std.fmt.allocPrint(self.base.allocator, "__unnamed_{s}_{d}", .{ decl_name, index });
- defer self.base.allocator.free(name);
-
- break :blk try self.makeString(name);
+ const name = try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index });
+ defer gpa.free(name);
+ break :blk try self.shstrtab.insert(gpa, name);
};
- const name = self.getString(name_str_index);
+ const name = self.shstrtab.get(name_str_index).?;
- log.debug("allocating symbol indexes for {s}", .{name});
- atom.local_sym_index = try self.allocateLocalSymbol();
- try self.atom_by_index_table.putNoClobber(self.base.allocator, atom.local_sym_index, atom);
+ const atom_index = try self.createAtom();
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .{
.none = {},
}, .{
- .parent_atom_index = atom.local_sym_index,
+ .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?,
});
const code = switch (res) {
- .externally_managed => |x| x,
- .appended => code_buffer.items,
+ .ok => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try mod.failed_decls.put(mod.gpa, decl_index, em);
@@ -2629,31 +2617,27 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module
};
const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
- const phdr_index = self.phdr_load_ro_index.?;
- const shdr_index = self.phdr_shdr_table.get(phdr_index).?;
- const vaddr = try self.allocateTextBlock(atom, code.len, required_alignment, phdr_index);
- errdefer self.freeTextBlock(atom, phdr_index);
-
- log.debug("allocated text block for {s} at 0x{x}", .{ name, vaddr });
-
- const local_sym = &self.local_symbols.items[atom.local_sym_index];
- local_sym.* = .{
- .st_name = name_str_index,
- .st_info = (elf.STB_LOCAL << 4) | elf.STT_OBJECT,
- .st_other = 0,
- .st_shndx = shdr_index,
- .st_value = vaddr,
- .st_size = code.len,
- };
-
- try self.writeSymbol(atom.local_sym_index);
- try unnamed_consts.append(self.base.allocator, atom);
+ const shdr_index = self.rodata_section_index.?;
+ const phdr_index = self.sections.items(.phdr_index)[shdr_index];
+ const local_sym = self.getAtom(atom_index).getSymbolPtr(self);
+ local_sym.st_name = name_str_index;
+ local_sym.st_info = (elf.STB_LOCAL << 4) | elf.STT_OBJECT;
+ local_sym.st_other = 0;
+ local_sym.st_shndx = shdr_index;
+ local_sym.st_size = code.len;
+ local_sym.st_value = try self.allocateAtom(atom_index, code.len, required_alignment);
+ errdefer self.freeAtom(atom_index);
+
+ log.debug("allocated text block for {s} at 0x{x}", .{ name, local_sym.st_value });
+
+ try self.writeSymbol(self.getAtom(atom_index).getSymbolIndex().?);
+ try unnamed_consts.append(gpa, atom_index);
const section_offset = local_sym.st_value - self.program_headers.items[phdr_index].p_vaddr;
- const file_offset = self.sections.items[shdr_index].sh_offset + section_offset;
+ const file_offset = self.sections.items(.shdr)[shdr_index].sh_offset + section_offset;
try self.base.file.?.pwriteAll(code, file_offset);
- return atom.local_sym_index;
+ return self.getAtom(atom_index).getSymbolIndex().?;
}
pub fn updateDeclExports(
@@ -2672,17 +2656,16 @@ pub fn updateDeclExports(
const tracy = trace(@src());
defer tracy.end();
- try self.global_symbols.ensureUnusedCapacity(self.base.allocator, exports.len);
+ const gpa = self.base.allocator;
+
const decl = module.declPtr(decl_index);
- if (decl.link.elf.local_sym_index == 0) return;
- const decl_sym = self.local_symbols.items[decl.link.elf.local_sym_index];
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const atom = self.getAtom(atom_index);
+ const decl_sym = atom.getSymbol(self);
+ const decl_metadata = self.decls.getPtr(decl_index).?;
+ const shdr_index = decl_metadata.shdr;
- const decl_ptr = self.decls.getPtr(decl_index).?;
- if (decl_ptr.* == null) {
- decl_ptr.* = try self.getDeclPhdrIndex(decl);
- }
- const phdr_index = decl_ptr.*.?;
- const shdr_index = self.phdr_shdr_table.get(phdr_index).?;
+ try self.global_symbols.ensureUnusedCapacity(gpa, exports.len);
for (exports) |exp| {
if (exp.options.section) |section_name| {
@@ -2715,10 +2698,10 @@ pub fn updateDeclExports(
},
};
const stt_bits: u8 = @truncate(u4, decl_sym.st_info);
- if (exp.link.elf.sym_index) |i| {
+ if (decl_metadata.getExport(self, exp.options.name)) |i| {
const sym = &self.global_symbols.items[i];
sym.* = .{
- .st_name = try self.updateString(sym.st_name, exp.options.name),
+ .st_name = try self.shstrtab.insert(gpa, exp.options.name),
.st_info = (stb_bits << 4) | stt_bits,
.st_other = 0,
.st_shndx = shdr_index,
@@ -2726,30 +2709,29 @@ pub fn updateDeclExports(
.st_size = decl_sym.st_size,
};
} else {
- const name = try self.makeString(exp.options.name);
const i = if (self.global_symbol_free_list.popOrNull()) |i| i else blk: {
_ = self.global_symbols.addOneAssumeCapacity();
break :blk self.global_symbols.items.len - 1;
};
+ try decl_metadata.exports.append(gpa, @intCast(u32, i));
self.global_symbols.items[i] = .{
- .st_name = name,
+ .st_name = try self.shstrtab.insert(gpa, exp.options.name),
.st_info = (stb_bits << 4) | stt_bits,
.st_other = 0,
.st_shndx = shdr_index,
.st_value = decl_sym.st_value,
.st_size = decl_sym.st_size,
};
-
- exp.link.elf.sym_index = @intCast(u32, i);
}
}
}
/// Must be called only after a successful call to `updateDecl`.
-pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl: *const Module.Decl) !void {
+pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl_index: Module.Decl.Index) !void {
const tracy = trace(@src());
defer tracy.end();
+ const decl = mod.declPtr(decl_index);
const decl_name = try decl.getFullyQualifiedName(mod);
defer self.base.allocator.free(decl_name);
@@ -2757,16 +2739,18 @@ pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl: *const Module.Decl)
if (self.llvm_object) |_| return;
if (self.dwarf) |*dw| {
- try dw.updateDeclLineNumber(decl);
+ try dw.updateDeclLineNumber(mod, decl_index);
}
}
-pub fn deleteExport(self: *Elf, exp: Export) void {
+pub fn deleteDeclExport(self: *Elf, decl_index: Module.Decl.Index, name: []const u8) void {
if (self.llvm_object) |_| return;
-
- const sym_index = exp.sym_index orelse return;
- self.global_symbol_free_list.append(self.base.allocator, sym_index) catch {};
- self.global_symbols.items[sym_index].st_info = 0;
+ const metadata = self.decls.getPtr(decl_index) orelse return;
+ const sym_index = metadata.getExportPtr(self, name) orelse return;
+ log.debug("deleting export '{s}'", .{name});
+ self.global_symbol_free_list.append(self.base.allocator, sym_index.*) catch {};
+ self.global_symbols.items[sym_index.*].st_info = 0;
+ sym_index.* = 0;
}
fn writeProgHeader(self: *Elf, index: usize) !void {
@@ -2795,7 +2779,7 @@ fn writeSectHeader(self: *Elf, index: usize) !void {
switch (self.ptr_width) {
.p32 => {
var shdr: [1]elf.Elf32_Shdr = undefined;
- shdr[0] = sectHeaderTo32(self.sections.items[index]);
+ shdr[0] = sectHeaderTo32(self.sections.items(.shdr)[index]);
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf32_Shdr, &shdr[0]);
}
@@ -2803,7 +2787,7 @@ fn writeSectHeader(self: *Elf, index: usize) !void {
return self.base.file.?.pwriteAll(mem.sliceAsBytes(&shdr), offset);
},
.p64 => {
- var shdr = [1]elf.Elf64_Shdr{self.sections.items[index]};
+ var shdr = [1]elf.Elf64_Shdr{self.sections.items(.shdr)[index]};
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf64_Shdr, &shdr[0]);
}
@@ -2817,11 +2801,11 @@ fn writeOffsetTableEntry(self: *Elf, index: usize) !void {
const entry_size: u16 = self.archPtrWidthBytes();
if (self.offset_table_count_dirty) {
const needed_size = self.offset_table.items.len * entry_size;
- try self.growAllocSection(self.got_section_index.?, self.phdr_got_index.?, needed_size);
+ try self.growAllocSection(self.got_section_index.?, needed_size);
self.offset_table_count_dirty = false;
}
const endian = self.base.options.target.cpu.arch.endian();
- const shdr = &self.sections.items[self.got_section_index.?];
+ const shdr = &self.sections.items(.shdr)[self.got_section_index.?];
const off = shdr.sh_offset + @as(u64, entry_size) * index;
switch (entry_size) {
2 => {
@@ -2847,7 +2831,7 @@ fn writeSymbol(self: *Elf, index: usize) !void {
const tracy = trace(@src());
defer tracy.end();
- const syms_sect = &self.sections.items[self.symtab_section_index.?];
+ const syms_sect = &self.sections.items(.shdr)[self.symtab_section_index.?];
// Make sure we are not pointlessly writing symbol data that will have to get relocated
// due to running out of space.
if (self.local_symbols.items.len != syms_sect.sh_info) {
@@ -2869,7 +2853,7 @@ fn writeSymbol(self: *Elf, index: usize) !void {
.p64 => syms_sect.sh_offset + @sizeOf(elf.Elf64_Sym) * index,
};
const local = self.local_symbols.items[index];
- log.debug("writing symbol {d}, '{s}' at 0x{x}", .{ index, self.getString(local.st_name), off });
+ log.debug("writing symbol {d}, '{?s}' at 0x{x}", .{ index, self.shstrtab.get(local.st_name), off });
log.debug(" ({})", .{local});
switch (self.ptr_width) {
.p32 => {
@@ -2899,7 +2883,7 @@ fn writeSymbol(self: *Elf, index: usize) !void {
}
fn writeAllGlobalSymbols(self: *Elf) !void {
- const syms_sect = &self.sections.items[self.symtab_section_index.?];
+ const syms_sect = &self.sections.items(.shdr)[self.symtab_section_index.?];
const sym_size: u64 = switch (self.ptr_width) {
.p32 => @sizeOf(elf.Elf32_Sym),
.p64 => @sizeOf(elf.Elf64_Sym),
@@ -3042,7 +3026,7 @@ fn getLDMOption(target: std.Target) ?[]const u8 {
}
}
-fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
+pub fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
return actual_size +| (actual_size / ideal_factor);
}
@@ -3249,10 +3233,58 @@ const CsuObjects = struct {
fn logSymtab(self: Elf) void {
log.debug("locals:", .{});
for (self.local_symbols.items) |sym, id| {
- log.debug(" {d}: {s}: @{x} in {d}", .{ id, self.getString(sym.st_name), sym.st_value, sym.st_shndx });
+ log.debug(" {d}: {?s}: @{x} in {d}", .{ id, self.shstrtab.get(sym.st_name), sym.st_value, sym.st_shndx });
}
log.debug("globals:", .{});
for (self.global_symbols.items) |sym, id| {
- log.debug(" {d}: {s}: @{x} in {d}", .{ id, self.getString(sym.st_name), sym.st_value, sym.st_shndx });
+ log.debug(" {d}: {?s}: @{x} in {d}", .{ id, self.shstrtab.get(sym.st_name), sym.st_value, sym.st_shndx });
}
}
+
+pub fn getProgramHeader(self: *const Elf, shdr_index: u16) elf.Elf64_Phdr {
+ const index = self.sections.items(.phdr_index)[shdr_index];
+ return self.program_headers.items[index];
+}
+
+pub fn getProgramHeaderPtr(self: *Elf, shdr_index: u16) *elf.Elf64_Phdr {
+ const index = self.sections.items(.phdr_index)[shdr_index];
+ return &self.program_headers.items[index];
+}
+
+/// Returns pointer-to-symbol described at sym_index.
+pub fn getSymbolPtr(self: *Elf, sym_index: u32) *elf.Elf64_Sym {
+ return &self.local_symbols.items[sym_index];
+}
+
+/// Returns symbol at sym_index.
+pub fn getSymbol(self: *const Elf, sym_index: u32) elf.Elf64_Sym {
+ return self.local_symbols.items[sym_index];
+}
+
+/// Returns name of the symbol at sym_index.
+pub fn getSymbolName(self: *const Elf, sym_index: u32) []const u8 {
+ const sym = self.local_symbols.items[sym_index];
+ return self.shstrtab.get(sym.st_name).?;
+}
+
+/// Returns name of the global symbol at index.
+pub fn getGlobalName(self: *const Elf, index: u32) []const u8 {
+ const sym = self.global_symbols.items[index];
+ return self.shstrtab.get(sym.st_name).?;
+}
+
+pub fn getAtom(self: *const Elf, atom_index: Atom.Index) Atom {
+ assert(atom_index < self.atoms.items.len);
+ return self.atoms.items[atom_index];
+}
+
+pub fn getAtomPtr(self: *Elf, atom_index: Atom.Index) *Atom {
+ assert(atom_index < self.atoms.items.len);
+ return &self.atoms.items[atom_index];
+}
+
+/// Returns atom if there is an atom referenced by the symbol.
+/// Returns null on failure.
+pub fn getAtomIndexForSymbol(self: *Elf, sym_index: u32) ?Atom.Index {
+ return self.atom_by_index_table.get(sym_index);
+}
diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig
new file mode 100644
index 0000000000..4ab304ef71
--- /dev/null
+++ b/src/link/Elf/Atom.zig
@@ -0,0 +1,100 @@
+const Atom = @This();
+
+const std = @import("std");
+const assert = std.debug.assert;
+const elf = std.elf;
+
+const Elf = @import("../Elf.zig");
+
+/// Each decl always gets a local symbol with the fully qualified name.
+/// The vaddr and size are found here directly.
+/// The file offset is found by computing the vaddr offset from the section vaddr
+/// the symbol references, and adding that to the file offset of the section.
+/// If this field is 0, it means the codegen size = 0 and there is no symbol or
+/// offset table entry.
+local_sym_index: u32,
+
+/// This field is undefined for symbols with size = 0.
+offset_table_index: u32,
+
+/// Points to the previous and next neighbors, based on the `text_offset`.
+/// This can be used to find, for example, the capacity of this `TextBlock`.
+prev_index: ?Index,
+next_index: ?Index,
+
+pub const Index = u32;
+
+pub const Reloc = struct {
+ target: u32,
+ offset: u64,
+ addend: u32,
+ prev_vaddr: u64,
+};
+
+pub fn getSymbolIndex(self: Atom) ?u32 {
+ if (self.local_sym_index == 0) return null;
+ return self.local_sym_index;
+}
+
+pub fn getSymbol(self: Atom, elf_file: *const Elf) elf.Elf64_Sym {
+ return elf_file.getSymbol(self.getSymbolIndex().?);
+}
+
+pub fn getSymbolPtr(self: Atom, elf_file: *Elf) *elf.Elf64_Sym {
+ return elf_file.getSymbolPtr(self.getSymbolIndex().?);
+}
+
+pub fn getName(self: Atom, elf_file: *const Elf) []const u8 {
+ return elf_file.getSymbolName(self.getSymbolIndex().?);
+}
+
+pub fn getOffsetTableAddress(self: Atom, elf_file: *Elf) u64 {
+ assert(self.getSymbolIndex() != null);
+ const target = elf_file.base.options.target;
+ const ptr_bits = target.cpu.arch.ptrBitWidth();
+ const ptr_bytes: u64 = @divExact(ptr_bits, 8);
+ const got = elf_file.program_headers.items[elf_file.phdr_got_index.?];
+ return got.p_vaddr + self.offset_table_index * ptr_bytes;
+}
+
+/// Returns how much room there is to grow in virtual address space.
+/// File offset relocation happens transparently, so it is not included in
+/// this calculation.
+pub fn capacity(self: Atom, elf_file: *const Elf) u64 {
+ const self_sym = self.getSymbol(elf_file);
+ if (self.next_index) |next_index| {
+ const next = elf_file.getAtom(next_index);
+ const next_sym = next.getSymbol(elf_file);
+ return next_sym.st_value - self_sym.st_value;
+ } else {
+ // We are the last block. The capacity is limited only by virtual address space.
+ return std.math.maxInt(u32) - self_sym.st_value;
+ }
+}
+
+pub fn freeListEligible(self: Atom, elf_file: *const Elf) bool {
+ // No need to keep a free list node for the last block.
+ const next_index = self.next_index orelse return false;
+ const next = elf_file.getAtom(next_index);
+ const self_sym = self.getSymbol(elf_file);
+ const next_sym = next.getSymbol(elf_file);
+ const cap = next_sym.st_value - self_sym.st_value;
+ const ideal_cap = Elf.padToIdeal(self_sym.st_size);
+ if (cap <= ideal_cap) return false;
+ const surplus = cap - ideal_cap;
+ return surplus >= Elf.min_text_capacity;
+}
+
+pub fn addRelocation(elf_file: *Elf, atom_index: Index, reloc: Reloc) !void {
+ const gpa = elf_file.base.allocator;
+ const gop = try elf_file.relocs.getOrPut(gpa, atom_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{};
+ }
+ try gop.value_ptr.append(gpa, reloc);
+}
+
+pub fn freeRelocations(elf_file: *Elf, atom_index: Index) void {
+ var removed_relocs = elf_file.relocs.fetchRemove(atom_index);
+ if (removed_relocs) |*relocs| relocs.value.deinit(elf_file.base.allocator);
+}
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index e6924a6717..24ef275c5b 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -66,7 +66,7 @@ const Section = struct {
// TODO is null here necessary, or can we do away with tracking via section
// size in incremental context?
- last_atom: ?*Atom = null,
+ last_atom_index: ?Atom.Index = null,
/// A list of atoms that have surplus capacity. This list can have false
/// positives, as functions grow and shrink over time, only sometimes being added
@@ -83,7 +83,7 @@ const Section = struct {
/// overcapacity can be negative. A simple way to have negative overcapacity is to
/// allocate a fresh atom, which will have ideal capacity, and then grow it
/// by 1 byte. It will then have -1 overcapacity.
- free_list: std.ArrayListUnmanaged(*Atom) = .{},
+ free_list: std.ArrayListUnmanaged(Atom.Index) = .{},
};
base: File,
@@ -140,8 +140,8 @@ locals_free_list: std.ArrayListUnmanaged(u32) = .{},
globals_free_list: std.ArrayListUnmanaged(u32) = .{},
dyld_stub_binder_index: ?u32 = null,
-dyld_private_atom: ?*Atom = null,
-stub_helper_preamble_atom: ?*Atom = null,
+dyld_private_atom_index: ?Atom.Index = null,
+stub_helper_preamble_atom_index: ?Atom.Index = null,
strtab: StringTable(.strtab) = .{},
@@ -164,10 +164,10 @@ segment_table_dirty: bool = false,
cold_start: bool = true,
/// List of atoms that are either synthetic or map directly to the Zig source program.
-managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
+atoms: std.ArrayListUnmanaged(Atom) = .{},
/// Table of atoms indexed by the symbol index.
-atom_by_index_table: std.AutoHashMapUnmanaged(u32, *Atom) = .{},
+atom_by_index_table: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{},
/// Table of unnamed constants associated with a parent `Decl`.
/// We store them here so that we can free the constants whenever the `Decl`
@@ -210,11 +210,36 @@ bindings: BindingTable = .{},
/// this will be a table indexed by index into the list of Atoms.
lazy_bindings: BindingTable = .{},
-/// Table of Decls that are currently alive.
-/// We store them here so that we can properly dispose of any allocated
-/// memory within the atom in the incremental linker.
-/// TODO consolidate this.
-decls: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, ?u8) = .{},
+/// Table of tracked Decls.
+decls: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{},
+
+const DeclMetadata = struct {
+ atom: Atom.Index,
+ section: u8,
+ /// A list of all exports aliases of this Decl.
+ /// TODO do we actually need this at all?
+ exports: std.ArrayListUnmanaged(u32) = .{},
+
+ fn getExport(m: DeclMetadata, macho_file: *const MachO, name: []const u8) ?u32 {
+ for (m.exports.items) |exp| {
+ if (mem.eql(u8, name, macho_file.getSymbolName(.{
+ .sym_index = exp,
+ .file = null,
+ }))) return exp;
+ }
+ return null;
+ }
+
+ fn getExportPtr(m: *DeclMetadata, macho_file: *MachO, name: []const u8) ?*u32 {
+ for (m.exports.items) |*exp| {
+ if (mem.eql(u8, name, macho_file.getSymbolName(.{
+ .sym_index = exp.*,
+ .file = null,
+ }))) return exp;
+ }
+ return null;
+ }
+};
const Entry = struct {
target: SymbolWithLoc,
@@ -229,8 +254,8 @@ const Entry = struct {
return macho_file.getSymbolPtr(.{ .sym_index = entry.sym_index, .file = null });
}
- pub fn getAtom(entry: Entry, macho_file: *MachO) ?*Atom {
- return macho_file.getAtomForSymbol(.{ .sym_index = entry.sym_index, .file = null });
+ pub fn getAtomIndex(entry: Entry, macho_file: *MachO) ?Atom.Index {
+ return macho_file.getAtomIndexForSymbol(.{ .sym_index = entry.sym_index, .file = null });
}
pub fn getName(entry: Entry, macho_file: *MachO) []const u8 {
@@ -238,10 +263,10 @@ const Entry = struct {
}
};
-const BindingTable = std.AutoArrayHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(Atom.Binding));
-const UnnamedConstTable = std.AutoArrayHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(*Atom));
-const RebaseTable = std.AutoArrayHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(u32));
-const RelocationTable = std.AutoArrayHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(Relocation));
+const BindingTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Atom.Binding));
+const UnnamedConstTable = std.AutoArrayHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(Atom.Index));
+const RebaseTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(u32));
+const RelocationTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Relocation));
const PendingUpdate = union(enum) {
resolve_undef: u32,
@@ -286,10 +311,6 @@ pub const default_pagezero_vmsize: u64 = 0x100000000;
/// potential future extensions.
pub const default_headerpad_size: u32 = 0x1000;
-pub const Export = struct {
- sym_index: ?u32 = null,
-};
-
pub fn openPath(allocator: Allocator, options: link.Options) !*MachO {
assert(options.target.ofmt == .macho);
@@ -547,8 +568,8 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
try self.allocateSpecialSymbols();
- for (self.relocs.keys()) |atom| {
- try atom.resolveRelocations(self);
+ for (self.relocs.keys()) |atom_index| {
+ try Atom.resolveRelocations(self, atom_index);
}
if (build_options.enable_logging) {
@@ -999,18 +1020,19 @@ pub fn parseDependentLibs(self: *MachO, syslibroot: ?[]const u8, dependent_libs:
}
}
-pub fn writeAtom(self: *MachO, atom: *Atom, code: []const u8) !void {
+pub fn writeAtom(self: *MachO, atom_index: Atom.Index, code: []const u8) !void {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const section = self.sections.get(sym.n_sect - 1);
const file_offset = section.header.offset + sym.n_value - section.header.addr;
log.debug("writing atom for symbol {s} at file offset 0x{x}", .{ atom.getName(self), file_offset });
try self.base.file.?.pwriteAll(code, file_offset);
- try atom.resolveRelocations(self);
+ try Atom.resolveRelocations(self, atom_index);
}
-fn writePtrWidthAtom(self: *MachO, atom: *Atom) !void {
+fn writePtrWidthAtom(self: *MachO, atom_index: Atom.Index) !void {
var buffer: [@sizeOf(u64)]u8 = [_]u8{0} ** @sizeOf(u64);
- try self.writeAtom(atom, &buffer);
+ try self.writeAtom(atom_index, &buffer);
}
fn markRelocsDirtyByTarget(self: *MachO, target: SymbolWithLoc) void {
@@ -1026,7 +1048,8 @@ fn markRelocsDirtyByTarget(self: *MachO, target: SymbolWithLoc) void {
fn markRelocsDirtyByAddress(self: *MachO, addr: u64) void {
for (self.relocs.values()) |*relocs| {
for (relocs.items) |*reloc| {
- const target_atom = reloc.getTargetAtom(self) orelse continue;
+ const target_atom_index = reloc.getTargetAtomIndex(self) orelse continue;
+ const target_atom = self.getAtom(target_atom_index);
const target_sym = target_atom.getSymbol(self);
if (target_sym.n_value < addr) continue;
reloc.dirty = true;
@@ -1053,31 +1076,38 @@ pub fn allocateSpecialSymbols(self: *MachO) !void {
}
}
-pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !*Atom {
+pub fn createAtom(self: *MachO) !Atom.Index {
const gpa = self.base.allocator;
-
+ const atom_index = @intCast(Atom.Index, self.atoms.items.len);
+ const atom = try self.atoms.addOne(gpa);
const sym_index = try self.allocateSymbol();
- const atom = blk: {
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- atom.sym_index = sym_index;
- atom.size = @sizeOf(u64);
- atom.alignment = @alignOf(u64);
- break :blk atom;
+ try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index);
+ atom.* = .{
+ .sym_index = sym_index,
+ .file = null,
+ .size = 0,
+ .alignment = 0,
+ .prev_index = null,
+ .next_index = null,
};
- errdefer gpa.destroy(atom);
+ log.debug("creating ATOM(%{d}) at index {d}", .{ sym_index, atom_index });
+ return atom_index;
+}
- try self.managed_atoms.append(gpa, atom);
- try self.atom_by_index_table.putNoClobber(gpa, atom.sym_index, atom);
+pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !Atom.Index {
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
+ atom.size = @sizeOf(u64);
+ atom.alignment = @alignOf(u64);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
sym.n_sect = self.got_section_index.? + 1;
- sym.n_value = try self.allocateAtom(atom, atom.size, @alignOf(u64));
+ sym.n_value = try self.allocateAtom(atom_index, atom.size, @alignOf(u64));
log.debug("allocated GOT atom at 0x{x}", .{sym.n_value});
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = switch (self.base.options.target.cpu.arch) {
.aarch64 => @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_UNSIGNED),
.x86_64 => @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_UNSIGNED),
@@ -1092,50 +1122,39 @@ pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !*Atom {
const target_sym = self.getSymbol(target);
if (target_sym.undf()) {
- try atom.addBinding(self, .{
+ try Atom.addBinding(self, atom_index, .{
.target = self.getGlobal(self.getSymbolName(target)).?,
.offset = 0,
});
} else {
- try atom.addRebase(self, 0);
+ try Atom.addRebase(self, atom_index, 0);
}
- return atom;
+ return atom_index;
}
pub fn createDyldPrivateAtom(self: *MachO) !void {
if (self.dyld_stub_binder_index == null) return;
- if (self.dyld_private_atom != null) return;
-
- const gpa = self.base.allocator;
+ if (self.dyld_private_atom_index != null) return;
- const sym_index = try self.allocateSymbol();
- const atom = blk: {
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- atom.sym_index = sym_index;
- atom.size = @sizeOf(u64);
- atom.alignment = @alignOf(u64);
- break :blk atom;
- };
- errdefer gpa.destroy(atom);
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
+ atom.size = @sizeOf(u64);
+ atom.alignment = @alignOf(u64);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
sym.n_sect = self.data_section_index.? + 1;
- self.dyld_private_atom = atom;
+ self.dyld_private_atom_index = atom_index;
- try self.managed_atoms.append(gpa, atom);
- try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom);
-
- sym.n_value = try self.allocateAtom(atom, atom.size, @alignOf(u64));
+ sym.n_value = try self.allocateAtom(atom_index, atom.size, @alignOf(u64));
log.debug("allocated dyld_private atom at 0x{x}", .{sym.n_value});
- try self.writePtrWidthAtom(atom);
+ try self.writePtrWidthAtom(atom_index);
}
pub fn createStubHelperPreambleAtom(self: *MachO) !void {
if (self.dyld_stub_binder_index == null) return;
- if (self.stub_helper_preamble_atom != null) return;
+ if (self.stub_helper_preamble_atom_index != null) return;
const gpa = self.base.allocator;
const arch = self.base.options.target.cpu.arch;
@@ -1144,26 +1163,23 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void {
.aarch64 => 6 * @sizeOf(u32),
else => unreachable,
};
- const sym_index = try self.allocateSymbol();
- const atom = blk: {
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- atom.sym_index = sym_index;
- atom.size = size;
- atom.alignment = switch (arch) {
- .x86_64 => 1,
- .aarch64 => @alignOf(u32),
- else => unreachable,
- };
- break :blk atom;
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
+ atom.size = size;
+ atom.alignment = switch (arch) {
+ .x86_64 => 1,
+ .aarch64 => @alignOf(u32),
+ else => unreachable,
};
- errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
sym.n_sect = self.stub_helper_section_index.? + 1;
- const dyld_private_sym_index = self.dyld_private_atom.?.sym_index;
+ const dyld_private_sym_index = if (self.dyld_private_atom_index) |dyld_index|
+ self.getAtom(dyld_index).getSymbolIndex().?
+ else
+ unreachable;
const code = try gpa.alloc(u8, size);
defer gpa.free(code);
@@ -1182,7 +1198,7 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void {
code[9] = 0xff;
code[10] = 0x25;
- try atom.addRelocations(self, 2, .{ .{
+ try Atom.addRelocations(self, atom_index, 2, .{ .{
.type = @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
.target = .{ .sym_index = dyld_private_sym_index, .file = null },
.offset = 3,
@@ -1222,7 +1238,7 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void {
// br x16
mem.writeIntLittle(u32, code[20..][0..4], aarch64.Instruction.br(.x16).toU32());
- try atom.addRelocations(self, 4, .{ .{
+ try Atom.addRelocations(self, atom_index, 4, .{ .{
.type = @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_PAGE21),
.target = .{ .sym_index = dyld_private_sym_index, .file = null },
.offset = 0,
@@ -1255,17 +1271,14 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void {
else => unreachable,
}
- self.stub_helper_preamble_atom = atom;
-
- try self.managed_atoms.append(gpa, atom);
- try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom);
+ self.stub_helper_preamble_atom_index = atom_index;
- sym.n_value = try self.allocateAtom(atom, size, atom.alignment);
+ sym.n_value = try self.allocateAtom(atom_index, size, atom.alignment);
log.debug("allocated stub preamble atom at 0x{x}", .{sym.n_value});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
}
-pub fn createStubHelperAtom(self: *MachO) !*Atom {
+pub fn createStubHelperAtom(self: *MachO) !Atom.Index {
const gpa = self.base.allocator;
const arch = self.base.options.target.cpu.arch;
const size: u4 = switch (arch) {
@@ -1273,20 +1286,14 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
.aarch64 => 3 * @sizeOf(u32),
else => unreachable,
};
- const sym_index = try self.allocateSymbol();
- const atom = blk: {
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- atom.sym_index = sym_index;
- atom.size = size;
- atom.alignment = switch (arch) {
- .x86_64 => 1,
- .aarch64 => @alignOf(u32),
- else => unreachable,
- };
- break :blk atom;
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
+ atom.size = size;
+ atom.alignment = switch (arch) {
+ .x86_64 => 1,
+ .aarch64 => @alignOf(u32),
+ else => unreachable,
};
- errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
@@ -1296,6 +1303,11 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
defer gpa.free(code);
mem.set(u8, code, 0);
+ const stub_helper_preamble_atom_sym_index = if (self.stub_helper_preamble_atom_index) |stub_index|
+ self.getAtom(stub_index).getSymbolIndex().?
+ else
+ unreachable;
+
switch (arch) {
.x86_64 => {
// pushq
@@ -1304,9 +1316,9 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
// jmpq
code[5] = 0xe9;
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
- .target = .{ .sym_index = self.stub_helper_preamble_atom.?.sym_index, .file = null },
+ .target = .{ .sym_index = stub_helper_preamble_atom_sym_index, .file = null },
.offset = 6,
.addend = 0,
.pcrel = true,
@@ -1327,9 +1339,9 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.b(0).toU32());
// Next 4 bytes 8..12 are just a placeholder populated in `populateLazyBindOffsetsInStubHelper`.
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_BRANCH26),
- .target = .{ .sym_index = self.stub_helper_preamble_atom.?.sym_index, .file = null },
+ .target = .{ .sym_index = stub_helper_preamble_atom_sym_index, .file = null },
.offset = 4,
.addend = 0,
.pcrel = true,
@@ -1339,34 +1351,24 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
else => unreachable,
}
- try self.managed_atoms.append(gpa, atom);
- try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom);
-
- sym.n_value = try self.allocateAtom(atom, size, atom.alignment);
+ sym.n_value = try self.allocateAtom(atom_index, size, atom.alignment);
log.debug("allocated stub helper atom at 0x{x}", .{sym.n_value});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
- return atom;
+ return atom_index;
}
-pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWithLoc) !*Atom {
- const gpa = self.base.allocator;
- const sym_index = try self.allocateSymbol();
- const atom = blk: {
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- atom.sym_index = sym_index;
- atom.size = @sizeOf(u64);
- atom.alignment = @alignOf(u64);
- break :blk atom;
- };
- errdefer gpa.destroy(atom);
+pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWithLoc) !Atom.Index {
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
+ atom.size = @sizeOf(u64);
+ atom.alignment = @alignOf(u64);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
sym.n_sect = self.la_symbol_ptr_section_index.? + 1;
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = switch (self.base.options.target.cpu.arch) {
.aarch64 => @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_UNSIGNED),
.x86_64 => @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_UNSIGNED),
@@ -1378,23 +1380,20 @@ pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWi
.pcrel = false,
.length = 3,
});
- try atom.addRebase(self, 0);
- try atom.addLazyBinding(self, .{
+ try Atom.addRebase(self, atom_index, 0);
+ try Atom.addLazyBinding(self, atom_index, .{
.target = self.getGlobal(self.getSymbolName(target)).?,
.offset = 0,
});
- try self.managed_atoms.append(gpa, atom);
- try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom);
-
- sym.n_value = try self.allocateAtom(atom, atom.size, @alignOf(u64));
+ sym.n_value = try self.allocateAtom(atom_index, atom.size, @alignOf(u64));
log.debug("allocated lazy pointer atom at 0x{x} ({s})", .{ sym.n_value, self.getSymbolName(target) });
- try self.writePtrWidthAtom(atom);
+ try self.writePtrWidthAtom(atom_index);
- return atom;
+ return atom_index;
}
-pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
+pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !Atom.Index {
const gpa = self.base.allocator;
const arch = self.base.options.target.cpu.arch;
const size: u4 = switch (arch) {
@@ -1402,21 +1401,15 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
.aarch64 => 3 * @sizeOf(u32),
else => unreachable, // unhandled architecture type
};
- const sym_index = try self.allocateSymbol();
- const atom = blk: {
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- atom.sym_index = sym_index;
- atom.size = size;
- atom.alignment = switch (arch) {
- .x86_64 => 1,
- .aarch64 => @alignOf(u32),
- else => unreachable, // unhandled architecture type
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
+ atom.size = size;
+ atom.alignment = switch (arch) {
+ .x86_64 => 1,
+ .aarch64 => @alignOf(u32),
+ else => unreachable, // unhandled architecture type
- };
- break :blk atom;
};
- errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
@@ -1432,7 +1425,7 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
code[0] = 0xff;
code[1] = 0x25;
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
.target = .{ .sym_index = laptr_sym_index, .file = null },
.offset = 2,
@@ -1453,7 +1446,7 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
// br x16
mem.writeIntLittle(u32, code[8..12], aarch64.Instruction.br(.x16).toU32());
- try atom.addRelocations(self, 2, .{
+ try Atom.addRelocations(self, atom_index, 2, .{
.{
.type = @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_PAGE21),
.target = .{ .sym_index = laptr_sym_index, .file = null },
@@ -1475,14 +1468,11 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
else => unreachable,
}
- try self.managed_atoms.append(gpa, atom);
- try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom);
-
- sym.n_value = try self.allocateAtom(atom, size, atom.alignment);
+ sym.n_value = try self.allocateAtom(atom_index, size, atom.alignment);
log.debug("allocated stub atom at 0x{x}", .{sym.n_value});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
- return atom;
+ return atom_index;
}
pub fn createMhExecuteHeaderSymbol(self: *MachO) !void {
@@ -1616,10 +1606,13 @@ pub fn resolveSymbolsInDylibs(self: *MachO) !void {
if (self.stubs_table.contains(global)) break :blk;
const stub_index = try self.allocateStubEntry(global);
- const stub_helper_atom = try self.createStubHelperAtom();
- const laptr_atom = try self.createLazyPointerAtom(stub_helper_atom.sym_index, global);
- const stub_atom = try self.createStubAtom(laptr_atom.sym_index);
- self.stubs.items[stub_index].sym_index = stub_atom.sym_index;
+ const stub_helper_atom_index = try self.createStubHelperAtom();
+ const stub_helper_atom = self.getAtom(stub_helper_atom_index);
+ const laptr_atom_index = try self.createLazyPointerAtom(stub_helper_atom.getSymbolIndex().?, global);
+ const laptr_atom = self.getAtom(laptr_atom_index);
+ const stub_atom_index = try self.createStubAtom(laptr_atom.getSymbolIndex().?);
+ const stub_atom = self.getAtom(stub_atom_index);
+ self.stubs.items[stub_index].sym_index = stub_atom.getSymbolIndex().?;
self.markRelocsDirtyByTarget(global);
}
@@ -1716,10 +1709,11 @@ pub fn resolveDyldStubBinder(self: *MachO) !void {
// Add dyld_stub_binder as the final GOT entry.
const got_index = try self.allocateGotEntry(global);
- const got_atom = try self.createGotAtom(global);
- self.got_entries.items[got_index].sym_index = got_atom.sym_index;
+ const got_atom_index = try self.createGotAtom(global);
+ const got_atom = self.getAtom(got_atom_index);
+ self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?;
- try self.writePtrWidthAtom(got_atom);
+ try self.writePtrWidthAtom(got_atom_index);
}
pub fn deinit(self: *MachO) void {
@@ -1769,12 +1763,12 @@ pub fn deinit(self: *MachO) void {
}
self.sections.deinit(gpa);
- for (self.managed_atoms.items) |atom| {
- gpa.destroy(atom);
- }
- self.managed_atoms.deinit(gpa);
+ self.atoms.deinit(gpa);
if (self.base.options.module) |_| {
+ for (self.decls.values()) |*m| {
+ m.exports.deinit(gpa);
+ }
self.decls.deinit(gpa);
} else {
assert(self.decls.count() == 0);
@@ -1808,12 +1802,14 @@ pub fn deinit(self: *MachO) void {
self.lazy_bindings.deinit(gpa);
}
-fn freeAtom(self: *MachO, atom: *Atom) void {
- log.debug("freeAtom {*}", .{atom});
+fn freeAtom(self: *MachO, atom_index: Atom.Index) void {
+ const gpa = self.base.allocator;
+ log.debug("freeAtom {d}", .{atom_index});
// Remove any relocs and base relocs associated with this Atom
- self.freeRelocationsForAtom(atom);
+ Atom.freeRelocations(self, atom_index);
+ const atom = self.getAtom(atom_index);
const sect_id = atom.getSymbol(self).n_sect - 1;
const free_list = &self.sections.items(.free_list)[sect_id];
var already_have_free_list_node = false;
@@ -1821,69 +1817,94 @@ fn freeAtom(self: *MachO, atom: *Atom) void {
var i: usize = 0;
// TODO turn free_list into a hash map
while (i < free_list.items.len) {
- if (free_list.items[i] == atom) {
+ if (free_list.items[i] == atom_index) {
_ = free_list.swapRemove(i);
continue;
}
- if (free_list.items[i] == atom.prev) {
+ if (free_list.items[i] == atom.prev_index) {
already_have_free_list_node = true;
}
i += 1;
}
}
- const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
- if (maybe_last_atom.*) |last_atom| {
- if (last_atom == atom) {
- if (atom.prev) |prev| {
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
+ if (maybe_last_atom_index.*) |last_atom_index| {
+ if (last_atom_index == atom_index) {
+ if (atom.prev_index) |prev_index| {
// TODO shrink the section size here
- maybe_last_atom.* = prev;
+ maybe_last_atom_index.* = prev_index;
} else {
- maybe_last_atom.* = null;
+ maybe_last_atom_index.* = null;
}
}
}
- if (atom.prev) |prev| {
- prev.next = atom.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
- if (!already_have_free_list_node and prev.freeListEligible(self)) {
+ if (!already_have_free_list_node and prev.*.freeListEligible(self)) {
// The free list is heuristics, it doesn't have to be perfect, so we can ignore
// the OOM here.
- free_list.append(self.base.allocator, prev) catch {};
+ free_list.append(gpa, prev_index) catch {};
}
} else {
- atom.prev = null;
+ self.getAtomPtr(atom_index).prev_index = null;
}
- if (atom.next) |next| {
- next.prev = atom.prev;
+ if (atom.next_index) |next_index| {
+ self.getAtomPtr(next_index).prev_index = atom.prev_index;
} else {
- atom.next = null;
+ self.getAtomPtr(atom_index).next_index = null;
}
- if (self.d_sym) |*d_sym| {
- d_sym.dwarf.freeAtom(&atom.dbg_info_atom);
+ // Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
+ const sym_index = atom.getSymbolIndex().?;
+
+ self.locals_free_list.append(gpa, sym_index) catch {};
+
+ // Try freeing GOT atom if this decl had one
+ const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
+ if (self.got_entries_table.get(got_target)) |got_index| {
+ self.got_entries_free_list.append(gpa, @intCast(u32, got_index)) catch {};
+ self.got_entries.items[got_index] = .{
+ .target = .{ .sym_index = 0, .file = null },
+ .sym_index = 0,
+ };
+ _ = self.got_entries_table.remove(got_target);
+
+ if (self.d_sym) |*d_sym| {
+ d_sym.swapRemoveRelocs(sym_index);
+ }
+
+ log.debug(" adding GOT index {d} to free list (target local@{d})", .{ got_index, sym_index });
}
+
+ self.locals.items[sym_index].n_type = 0;
+ _ = self.atom_by_index_table.remove(sym_index);
+ log.debug(" adding local symbol index {d} to free list", .{sym_index});
+ self.getAtomPtr(atom_index).sym_index = 0;
}
-fn shrinkAtom(self: *MachO, atom: *Atom, new_block_size: u64) void {
+fn shrinkAtom(self: *MachO, atom_index: Atom.Index, new_block_size: u64) void {
_ = self;
- _ = atom;
+ _ = atom_index;
_ = new_block_size;
// TODO check the new capacity, and if it crosses the size threshold into a big enough
// capacity, insert a free list node for it.
}
-fn growAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !u64 {
+fn growAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const align_ok = mem.alignBackwardGeneric(u64, sym.n_value, alignment) == sym.n_value;
const need_realloc = !align_ok or new_atom_size > atom.capacity(self);
if (!need_realloc) return sym.n_value;
- return self.allocateAtom(atom, new_atom_size, alignment);
+ return self.allocateAtom(atom_index, new_atom_size, alignment);
}
-fn allocateSymbol(self: *MachO) !u32 {
+pub fn allocateSymbol(self: *MachO) !u32 {
try self.locals.ensureUnusedCapacity(self.base.allocator, 1);
const index = blk: {
@@ -1975,16 +1996,6 @@ pub fn allocateStubEntry(self: *MachO, target: SymbolWithLoc) !u32 {
return index;
}
-pub fn allocateDeclIndexes(self: *MachO, decl_index: Module.Decl.Index) !void {
- if (self.llvm_object) |_| return;
- const decl = self.base.options.module.?.declPtr(decl_index);
- if (decl.link.macho.sym_index != 0) return;
-
- decl.link.macho.sym_index = try self.allocateSymbol();
- try self.atom_by_index_table.putNoClobber(self.base.allocator, decl.link.macho.sym_index, &decl.link.macho);
- try self.decls.putNoClobber(self.base.allocator, decl_index, null);
-}
-
pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .macho) {
@panic("Attempted to compile for object format that was disabled by build configuration");
@@ -1997,8 +2008,12 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
+
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
self.freeUnnamedConsts(decl_index);
- self.freeRelocationsForAtom(&decl.link.macho);
+ Atom.freeRelocations(self, atom_index);
+
+ const atom = self.getAtom(atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -2017,7 +2032,7 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none);
const code = switch (res) {
- .appended => code_buffer.items,
+ .ok => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em);
@@ -2028,13 +2043,7 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
const addr = try self.updateDeclCode(decl_index, code);
if (decl_state) |*ds| {
- try self.d_sym.?.dwarf.commitDeclState(
- module,
- decl_index,
- addr,
- decl.link.macho.size,
- ds,
- );
+ try self.d_sym.?.dwarf.commitDeclState(module, decl_index, addr, atom.size, ds);
}
// Since we updated the vaddr and the size, each corresponding export symbol also
@@ -2069,21 +2078,13 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu
log.debug("allocating symbol indexes for {?s}", .{name});
- const atom = try gpa.create(Atom);
- errdefer gpa.destroy(atom);
- atom.* = Atom.empty;
-
- atom.sym_index = try self.allocateSymbol();
-
- try self.managed_atoms.append(gpa, atom);
- try self.atom_by_index_table.putNoClobber(gpa, atom.sym_index, atom);
+ const atom_index = try self.createAtom();
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .none, .{
- .parent_atom_index = atom.sym_index,
+ .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?,
});
const code = switch (res) {
- .externally_managed => |x| x,
- .appended => code_buffer.items,
+ .ok => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em);
@@ -2093,26 +2094,27 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu
};
const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
+ const atom = self.getAtomPtr(atom_index);
atom.size = code.len;
atom.alignment = required_alignment;
// TODO: work out logic for disambiguating functions from function pointers
- // const sect_id = self.getDeclOutputSection(decl);
+ // const sect_id = self.getDeclOutputSection(decl_index);
const sect_id = self.data_const_section_index.?;
const symbol = atom.getSymbolPtr(self);
symbol.n_strx = name_str_index;
symbol.n_type = macho.N_SECT;
symbol.n_sect = sect_id + 1;
- symbol.n_value = try self.allocateAtom(atom, code.len, required_alignment);
- errdefer self.freeAtom(atom);
+ symbol.n_value = try self.allocateAtom(atom_index, code.len, required_alignment);
+ errdefer self.freeAtom(atom_index);
- try unnamed_consts.append(gpa, atom);
+ try unnamed_consts.append(gpa, atom_index);
log.debug("allocated atom for {?s} at 0x{x}", .{ name, symbol.n_value });
log.debug(" (required alignment 0x{x})", .{required_alignment});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
- return atom.sym_index;
+ return atom.getSymbolIndex().?;
}
pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) !void {
@@ -2137,7 +2139,9 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
}
}
- self.freeRelocationsForAtom(&decl.link.macho);
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ Atom.freeRelocations(self, atom_index);
+ const atom = self.getAtom(atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -2156,19 +2160,18 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
}, &code_buffer, .{
.dwarf = ds,
}, .{
- .parent_atom_index = decl.link.macho.sym_index,
+ .parent_atom_index = atom.getSymbolIndex().?,
})
else
try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .none, .{
- .parent_atom_index = decl.link.macho.sym_index,
+ .parent_atom_index = atom.getSymbolIndex().?,
});
const code = switch (res) {
- .externally_managed => |x| x,
- .appended => code_buffer.items,
+ .ok => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em);
@@ -2178,13 +2181,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
const addr = try self.updateDeclCode(decl_index, code);
if (decl_state) |*ds| {
- try self.d_sym.?.dwarf.commitDeclState(
- module,
- decl_index,
- addr,
- decl.link.macho.size,
- ds,
- );
+ try self.d_sym.?.dwarf.commitDeclState(module, decl_index, addr, atom.size, ds);
}
// Since we updated the vaddr and the size, each corresponding export symbol also
@@ -2192,7 +2189,20 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
try self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
}
-fn getDeclOutputSection(self: *MachO, decl: *Module.Decl) u8 {
+pub fn getOrCreateAtomForDecl(self: *MachO, decl_index: Module.Decl.Index) !Atom.Index {
+ const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{
+ .atom = try self.createAtom(),
+ .section = self.getDeclOutputSection(decl_index),
+ .exports = .{},
+ };
+ }
+ return gop.value_ptr.atom;
+}
+
+fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 {
+ const decl = self.base.options.module.?.declPtr(decl_index);
const ty = decl.ty;
const val = decl.val;
const zig_ty = ty.zigTypeTag();
@@ -2339,17 +2349,15 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
const decl = mod.declPtr(decl_index);
const required_alignment = decl.getAlignment(self.base.options.target);
- assert(decl.link.macho.sym_index != 0); // Caller forgot to call allocateDeclIndexes()
const sym_name = try decl.getFullyQualifiedName(mod);
defer self.base.allocator.free(sym_name);
- const atom = &decl.link.macho;
- const decl_ptr = self.decls.getPtr(decl_index).?;
- if (decl_ptr.* == null) {
- decl_ptr.* = self.getDeclOutputSection(decl);
- }
- const sect_id = decl_ptr.*.?;
+ const decl_metadata = self.decls.get(decl_index).?;
+ const atom_index = decl_metadata.atom;
+ const atom = self.getAtom(atom_index);
+ const sym_index = atom.getSymbolIndex().?;
+ const sect_id = decl_metadata.section;
const code_len = code.len;
if (atom.size != 0) {
@@ -2359,31 +2367,31 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
sym.n_sect = sect_id + 1;
sym.n_desc = 0;
- const capacity = decl.link.macho.capacity(self);
+ const capacity = atom.capacity(self);
const need_realloc = code_len > capacity or !mem.isAlignedGeneric(u64, sym.n_value, required_alignment);
if (need_realloc) {
- const vaddr = try self.growAtom(atom, code_len, required_alignment);
+ const vaddr = try self.growAtom(atom_index, code_len, required_alignment);
log.debug("growing {s} and moving from 0x{x} to 0x{x}", .{ sym_name, sym.n_value, vaddr });
log.debug(" (required alignment 0x{x})", .{required_alignment});
if (vaddr != sym.n_value) {
sym.n_value = vaddr;
log.debug(" (updating GOT entry)", .{});
- const got_target = SymbolWithLoc{ .sym_index = atom.sym_index, .file = null };
- const got_atom = self.getGotAtomForSymbol(got_target).?;
+ const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
+ const got_atom_index = self.getGotAtomIndexForSymbol(got_target).?;
self.markRelocsDirtyByTarget(got_target);
- try self.writePtrWidthAtom(got_atom);
+ try self.writePtrWidthAtom(got_atom_index);
}
} else if (code_len < atom.size) {
- self.shrinkAtom(atom, code_len);
- } else if (atom.next == null) {
+ self.shrinkAtom(atom_index, code_len);
+ } else if (atom.next_index == null) {
const header = &self.sections.items(.header)[sect_id];
const segment = self.getSegment(sect_id);
const needed_size = (sym.n_value + code_len) - segment.vmaddr;
header.size = needed_size;
}
- atom.size = code_len;
+ self.getAtomPtr(atom_index).size = code_len;
} else {
const name_str_index = try self.strtab.insert(gpa, sym_name);
const sym = atom.getSymbolPtr(self);
@@ -2392,32 +2400,32 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
sym.n_sect = sect_id + 1;
sym.n_desc = 0;
- const vaddr = try self.allocateAtom(atom, code_len, required_alignment);
- errdefer self.freeAtom(atom);
+ const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment);
+ errdefer self.freeAtom(atom_index);
log.debug("allocated atom for {s} at 0x{x}", .{ sym_name, vaddr });
log.debug(" (required alignment 0x{x})", .{required_alignment});
- atom.size = code_len;
+ self.getAtomPtr(atom_index).size = code_len;
sym.n_value = vaddr;
- const got_target = SymbolWithLoc{ .sym_index = atom.sym_index, .file = null };
+ const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
const got_index = try self.allocateGotEntry(got_target);
- const got_atom = try self.createGotAtom(got_target);
- self.got_entries.items[got_index].sym_index = got_atom.sym_index;
- try self.writePtrWidthAtom(got_atom);
+ const got_atom_index = try self.createGotAtom(got_target);
+ const got_atom = self.getAtom(got_atom_index);
+ self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?;
+ try self.writePtrWidthAtom(got_atom_index);
}
self.markRelocsDirtyByTarget(atom.getSymbolWithLoc());
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
return atom.getSymbol(self).n_value;
}
-pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void {
- _ = module;
+pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl_index: Module.Decl.Index) !void {
if (self.d_sym) |*d_sym| {
- try d_sym.dwarf.updateDeclLineNumber(decl);
+ try d_sym.dwarf.updateDeclLineNumber(module, decl_index);
}
}
@@ -2434,14 +2442,17 @@ pub fn updateDeclExports(
if (self.llvm_object) |llvm_object|
return llvm_object.updateDeclExports(module, decl_index, exports);
}
+
const tracy = trace(@src());
defer tracy.end();
const gpa = self.base.allocator;
const decl = module.declPtr(decl_index);
- if (decl.link.macho.sym_index == 0) return;
- const decl_sym = decl.link.macho.getSymbol(self);
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const atom = self.getAtom(atom_index);
+ const decl_sym = atom.getSymbol(self);
+ const decl_metadata = self.decls.getPtr(decl_index).?;
for (exports) |exp| {
const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{exp.options.name});
@@ -2479,9 +2490,9 @@ pub fn updateDeclExports(
continue;
}
- const sym_index = exp.link.macho.sym_index orelse blk: {
+ const sym_index = decl_metadata.getExport(self, exp_name) orelse blk: {
const sym_index = try self.allocateSymbol();
- exp.link.macho.sym_index = sym_index;
+ try decl_metadata.exports.append(gpa, sym_index);
break :blk sym_index;
};
const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
@@ -2529,16 +2540,18 @@ pub fn updateDeclExports(
}
}
-pub fn deleteExport(self: *MachO, exp: Export) void {
+pub fn deleteDeclExport(self: *MachO, decl_index: Module.Decl.Index, name: []const u8) Allocator.Error!void {
if (self.llvm_object) |_| return;
- const sym_index = exp.sym_index orelse return;
+ const metadata = self.decls.getPtr(decl_index) orelse return;
const gpa = self.base.allocator;
+ const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{name});
+ defer gpa.free(exp_name);
+ const sym_index = metadata.getExportPtr(self, exp_name) orelse return;
- const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
+ const sym_loc = SymbolWithLoc{ .sym_index = sym_index.*, .file = null };
const sym = self.getSymbolPtr(sym_loc);
- const sym_name = self.getSymbolName(sym_loc);
- log.debug("deleting export '{s}'", .{sym_name});
+ log.debug("deleting export '{s}'", .{exp_name});
assert(sym.sect() and sym.ext());
sym.* = .{
.n_strx = 0,
@@ -2547,9 +2560,9 @@ pub fn deleteExport(self: *MachO, exp: Export) void {
.n_desc = 0,
.n_value = 0,
};
- self.locals_free_list.append(gpa, sym_index) catch {};
+ self.locals_free_list.append(gpa, sym_index.*) catch {};
- if (self.resolver.fetchRemove(sym_name)) |entry| {
+ if (self.resolver.fetchRemove(exp_name)) |entry| {
defer gpa.free(entry.key);
self.globals_free_list.append(gpa, entry.value) catch {};
self.globals.items[entry.value] = .{
@@ -2557,17 +2570,8 @@ pub fn deleteExport(self: *MachO, exp: Export) void {
.file = null,
};
}
-}
-fn freeRelocationsForAtom(self: *MachO, atom: *Atom) void {
- var removed_relocs = self.relocs.fetchOrderedRemove(atom);
- if (removed_relocs) |*relocs| relocs.value.deinit(self.base.allocator);
- var removed_rebases = self.rebases.fetchOrderedRemove(atom);
- if (removed_rebases) |*rebases| rebases.value.deinit(self.base.allocator);
- var removed_bindings = self.bindings.fetchOrderedRemove(atom);
- if (removed_bindings) |*bindings| bindings.value.deinit(self.base.allocator);
- var removed_lazy_bindings = self.lazy_bindings.fetchOrderedRemove(atom);
- if (removed_lazy_bindings) |*lazy_bindings| lazy_bindings.value.deinit(self.base.allocator);
+ sym_index.* = 0;
}
fn freeUnnamedConsts(self: *MachO, decl_index: Module.Decl.Index) void {
@@ -2575,11 +2579,6 @@ fn freeUnnamedConsts(self: *MachO, decl_index: Module.Decl.Index) void {
const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return;
for (unnamed_consts.items) |atom| {
self.freeAtom(atom);
- self.locals_free_list.append(gpa, atom.sym_index) catch {};
- self.locals.items[atom.sym_index].n_type = 0;
- _ = self.atom_by_index_table.remove(atom.sym_index);
- log.debug(" adding local symbol index {d} to free list", .{atom.sym_index});
- atom.sym_index = 0;
}
unnamed_consts.clearAndFree(gpa);
}
@@ -2593,67 +2592,37 @@ pub fn freeDecl(self: *MachO, decl_index: Module.Decl.Index) void {
log.debug("freeDecl {*}", .{decl});
- const kv = self.decls.fetchSwapRemove(decl_index);
- if (kv.?.value) |_| {
- self.freeAtom(&decl.link.macho);
+ if (self.decls.fetchSwapRemove(decl_index)) |const_kv| {
+ var kv = const_kv;
+ self.freeAtom(kv.value.atom);
self.freeUnnamedConsts(decl_index);
- }
-
- // Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
- const gpa = self.base.allocator;
- const sym_index = decl.link.macho.sym_index;
- if (sym_index != 0) {
- self.locals_free_list.append(gpa, sym_index) catch {};
-
- // Try freeing GOT atom if this decl had one
- const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
- if (self.got_entries_table.get(got_target)) |got_index| {
- self.got_entries_free_list.append(gpa, @intCast(u32, got_index)) catch {};
- self.got_entries.items[got_index] = .{
- .target = .{ .sym_index = 0, .file = null },
- .sym_index = 0,
- };
- _ = self.got_entries_table.remove(got_target);
-
- if (self.d_sym) |*d_sym| {
- d_sym.swapRemoveRelocs(sym_index);
- }
-
- log.debug(" adding GOT index {d} to free list (target local@{d})", .{ got_index, sym_index });
- }
-
- self.locals.items[sym_index].n_type = 0;
- _ = self.atom_by_index_table.remove(sym_index);
- log.debug(" adding local symbol index {d} to free list", .{sym_index});
- decl.link.macho.sym_index = 0;
+ kv.value.exports.deinit(self.base.allocator);
}
if (self.d_sym) |*d_sym| {
- d_sym.dwarf.freeDecl(decl);
+ d_sym.dwarf.freeDecl(decl_index);
}
}
pub fn getDeclVAddr(self: *MachO, decl_index: Module.Decl.Index, reloc_info: File.RelocInfo) !u64 {
- const mod = self.base.options.module.?;
- const decl = mod.declPtr(decl_index);
-
assert(self.llvm_object == null);
- assert(decl.link.macho.sym_index != 0);
- const atom = self.getAtomForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
- try atom.addRelocation(self, .{
+ const this_atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const sym_index = self.getAtom(this_atom_index).getSymbolIndex().?;
+ const atom_index = self.getAtomIndexForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
+ try Atom.addRelocation(self, atom_index, .{
.type = switch (self.base.options.target.cpu.arch) {
.aarch64 => @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_UNSIGNED),
.x86_64 => @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_UNSIGNED),
else => unreachable,
},
- .target = .{ .sym_index = decl.link.macho.sym_index, .file = null },
+ .target = .{ .sym_index = sym_index, .file = null },
.offset = @intCast(u32, reloc_info.offset),
.addend = reloc_info.addend,
.pcrel = false,
.length = 3,
});
- try atom.addRebase(self, @intCast(u32, reloc_info.offset));
+ try Atom.addRebase(self, atom_index, @intCast(u32, reloc_info.offset));
return 0;
}
@@ -2885,34 +2854,36 @@ fn moveSectionInVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void
// TODO: enforce order by increasing VM addresses in self.sections container.
for (self.sections.items(.header)[sect_id + 1 ..]) |*next_header, next_sect_id| {
const index = @intCast(u8, sect_id + 1 + next_sect_id);
- const maybe_last_atom = &self.sections.items(.last_atom)[index];
const next_segment = self.getSegmentPtr(index);
next_header.addr += diff;
next_segment.vmaddr += diff;
- if (maybe_last_atom.*) |last_atom| {
- var atom = last_atom;
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[index];
+ if (maybe_last_atom_index.*) |last_atom_index| {
+ var atom_index = last_atom_index;
while (true) {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbolPtr(self);
sym.n_value += diff;
- if (atom.prev) |prev| {
- atom = prev;
+ if (atom.prev_index) |prev_index| {
+ atom_index = prev_index;
} else break;
}
}
}
}
-fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !u64 {
+fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 {
const tracy = trace(@src());
defer tracy.end();
+ const atom = self.getAtom(atom_index);
const sect_id = atom.getSymbol(self).n_sect - 1;
const segment = self.getSegmentPtr(sect_id);
const header = &self.sections.items(.header)[sect_id];
const free_list = &self.sections.items(.free_list)[sect_id];
- const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
const requires_padding = blk: {
if (!header.isCode()) break :blk false;
if (header.isSymbolStubs()) break :blk false;
@@ -2926,7 +2897,7 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
// It would be simpler to do it inside the for loop below, but that would cause a
// problem if an error was returned later in the function. So this action
// is actually carried out at the end of the function, when errors are no longer possible.
- var atom_placement: ?*Atom = null;
+ var atom_placement: ?Atom.Index = null;
var free_list_removal: ?usize = null;
// First we look for an appropriately sized free list node.
@@ -2934,7 +2905,8 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
var vaddr = blk: {
var i: usize = 0;
while (i < free_list.items.len) {
- const big_atom = free_list.items[i];
+ const big_atom_index = free_list.items[i];
+ const big_atom = self.getAtom(big_atom_index);
// We now have a pointer to a live atom that has too much capacity.
// Is it enough that we could fit this new atom?
const sym = big_atom.getSymbol(self);
@@ -2962,30 +2934,35 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
const keep_free_list_node = remaining_capacity >= min_text_capacity;
// Set up the metadata to be updated, after errors are no longer possible.
- atom_placement = big_atom;
+ atom_placement = big_atom_index;
if (!keep_free_list_node) {
free_list_removal = i;
}
break :blk new_start_vaddr;
- } else if (maybe_last_atom.*) |last| {
+ } else if (maybe_last_atom_index.*) |last_index| {
+ const last = self.getAtom(last_index);
const last_symbol = last.getSymbol(self);
const ideal_capacity = if (requires_padding) padToIdeal(last.size) else last.size;
const ideal_capacity_end_vaddr = last_symbol.n_value + ideal_capacity;
const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment);
- atom_placement = last;
+ atom_placement = last_index;
break :blk new_start_vaddr;
} else {
break :blk mem.alignForwardGeneric(u64, segment.vmaddr, alignment);
}
};
- const expand_section = atom_placement == null or atom_placement.?.next == null;
+ const expand_section = if (atom_placement) |placement_index|
+ self.getAtom(placement_index).next_index == null
+ else
+ true;
if (expand_section) {
const sect_capacity = self.allocatedSize(header.offset);
const needed_size = (vaddr + new_atom_size) - segment.vmaddr;
if (needed_size > sect_capacity) {
const new_offset = self.findFreeSpace(needed_size, self.page_size);
- const current_size = if (maybe_last_atom.*) |last_atom| blk: {
+ const current_size = if (maybe_last_atom_index.*) |last_atom_index| blk: {
+ const last_atom = self.getAtom(last_atom_index);
const sym = last_atom.getSymbol(self);
break :blk (sym.n_value + last_atom.size) - segment.vmaddr;
} else 0;
@@ -3017,7 +2994,7 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
header.size = needed_size;
segment.filesize = mem.alignForwardGeneric(u64, needed_size, self.page_size);
segment.vmsize = mem.alignForwardGeneric(u64, needed_size, self.page_size);
- maybe_last_atom.* = atom;
+ maybe_last_atom_index.* = atom_index;
self.segment_table_dirty = true;
}
@@ -3026,21 +3003,31 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
if (header.@"align" < align_pow) {
header.@"align" = align_pow;
}
+ {
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.size = new_atom_size;
+ atom_ptr.alignment = @intCast(u32, alignment);
+ }
- if (atom.prev) |prev| {
- prev.next = atom.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
}
- if (atom.next) |next| {
- next.prev = atom.prev;
+ if (atom.next_index) |next_index| {
+ const next = self.getAtomPtr(next_index);
+ next.prev_index = atom.prev_index;
}
- if (atom_placement) |big_atom| {
- atom.prev = big_atom;
- atom.next = big_atom.next;
- big_atom.next = atom;
+ if (atom_placement) |big_atom_index| {
+ const big_atom = self.getAtomPtr(big_atom_index);
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = big_atom_index;
+ atom_ptr.next_index = big_atom.next_index;
+ big_atom.next_index = atom_index;
} else {
- atom.prev = null;
- atom.next = null;
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = null;
+ atom_ptr.next_index = null;
}
if (free_list_removal) |i| {
_ = free_list.swapRemove(i);
@@ -3180,8 +3167,9 @@ fn collectRebaseData(self: *MachO, rebase: *Rebase) !void {
const gpa = self.base.allocator;
const slice = self.sections.slice();
- for (self.rebases.keys()) |atom, i| {
- log.debug(" ATOM(%{d}, '{s}')", .{ atom.sym_index, atom.getName(self) });
+ for (self.rebases.keys()) |atom_index, i| {
+ const atom = self.getAtom(atom_index);
+ log.debug(" ATOM(%{?d}, '{s}')", .{ atom.getSymbolIndex(), atom.getName(self) });
const sym = atom.getSymbol(self);
const segment_index = slice.items(.segment_index)[sym.n_sect - 1];
@@ -3209,8 +3197,9 @@ fn collectBindData(self: *MachO, bind: anytype, raw_bindings: anytype) !void {
const gpa = self.base.allocator;
const slice = self.sections.slice();
- for (raw_bindings.keys()) |atom, i| {
- log.debug(" ATOM(%{d}, '{s}')", .{ atom.sym_index, atom.getName(self) });
+ for (raw_bindings.keys()) |atom_index, i| {
+ const atom = self.getAtom(atom_index);
+ log.debug(" ATOM(%{?d}, '{s}')", .{ atom.getSymbolIndex(), atom.getName(self) });
const sym = atom.getSymbol(self);
const segment_index = slice.items(.segment_index)[sym.n_sect - 1];
@@ -3384,7 +3373,7 @@ fn populateLazyBindOffsetsInStubHelper(self: *MachO, lazy_bind: LazyBind) !void
if (lazy_bind.size() == 0) return;
const stub_helper_section_index = self.stub_helper_section_index.?;
- assert(self.stub_helper_preamble_atom != null);
+ assert(self.stub_helper_preamble_atom_index != null);
const section = self.sections.get(stub_helper_section_index);
@@ -3394,10 +3383,11 @@ fn populateLazyBindOffsetsInStubHelper(self: *MachO, lazy_bind: LazyBind) !void
else => unreachable,
};
const header = section.header;
- var atom = section.last_atom.?;
+ var atom_index = section.last_atom_index.?;
var index: usize = lazy_bind.offsets.items.len;
while (index > 0) : (index -= 1) {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const file_offset = header.offset + sym.n_value - header.addr + stub_offset;
const bind_offset = lazy_bind.offsets.items[index - 1];
@@ -3410,7 +3400,7 @@ fn populateLazyBindOffsetsInStubHelper(self: *MachO, lazy_bind: LazyBind) !void
try self.base.file.?.pwriteAll(mem.asBytes(&bind_offset), file_offset);
- atom = atom.prev.?;
+ atom_index = atom.prev_index.?;
}
}
@@ -3853,25 +3843,35 @@ pub fn getOrPutGlobalPtr(self: *MachO, name: []const u8) !GetOrPutGlobalPtrResul
return GetOrPutGlobalPtrResult{ .found_existing = false, .value_ptr = ptr };
}
+pub fn getAtom(self: *MachO, atom_index: Atom.Index) Atom {
+ assert(atom_index < self.atoms.items.len);
+ return self.atoms.items[atom_index];
+}
+
+pub fn getAtomPtr(self: *MachO, atom_index: Atom.Index) *Atom {
+ assert(atom_index < self.atoms.items.len);
+ return &self.atoms.items[atom_index];
+}
+
/// Returns atom if there is an atom referenced by the symbol described by `sym_with_loc` descriptor.
/// Returns null on failure.
-pub fn getAtomForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?*Atom {
+pub fn getAtomIndexForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?Atom.Index {
assert(sym_with_loc.file == null);
return self.atom_by_index_table.get(sym_with_loc.sym_index);
}
/// Returns GOT atom that references `sym_with_loc` if one exists.
/// Returns null otherwise.
-pub fn getGotAtomForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?*Atom {
+pub fn getGotAtomIndexForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?Atom.Index {
const got_index = self.got_entries_table.get(sym_with_loc) orelse return null;
- return self.got_entries.items[got_index].getAtom(self);
+ return self.got_entries.items[got_index].getAtomIndex(self);
}
/// Returns stubs atom that references `sym_with_loc` if one exists.
/// Returns null otherwise.
-pub fn getStubsAtomForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?*Atom {
+pub fn getStubsAtomIndexForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?Atom.Index {
const stubs_index = self.stubs_table.get(sym_with_loc) orelse return null;
- return self.stubs.items[stubs_index].getAtom(self);
+ return self.stubs.items[stubs_index].getAtomIndex(self);
}
/// Returns symbol location corresponding to the set entrypoint.
@@ -4257,30 +4257,35 @@ pub fn logAtoms(self: *MachO) void {
log.debug("atoms:", .{});
const slice = self.sections.slice();
- for (slice.items(.last_atom)) |last, i| {
- var atom = last orelse continue;
+ for (slice.items(.last_atom_index)) |last_atom_index, i| {
+ var atom_index = last_atom_index orelse continue;
const header = slice.items(.header)[i];
- while (atom.prev) |prev| {
- atom = prev;
+ while (true) {
+ const atom = self.getAtom(atom_index);
+ if (atom.prev_index) |prev_index| {
+ atom_index = prev_index;
+ } else break;
}
log.debug("{s},{s}", .{ header.segName(), header.sectName() });
while (true) {
- self.logAtom(atom);
- if (atom.next) |next| {
- atom = next;
+ self.logAtom(atom_index);
+ const atom = self.getAtom(atom_index);
+ if (atom.next_index) |next_index| {
+ atom_index = next_index;
} else break;
}
}
}
-pub fn logAtom(self: *MachO, atom: *const Atom) void {
+pub fn logAtom(self: *MachO, atom_index: Atom.Index) void {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const sym_name = atom.getName(self);
- log.debug(" ATOM(%{d}, '{s}') @ {x} (sizeof({x}), alignof({x})) in object({?d}) in sect({d})", .{
- atom.sym_index,
+ log.debug(" ATOM(%{?d}, '{s}') @ {x} (sizeof({x}), alignof({x})) in object({?d}) in sect({d})", .{
+ atom.getSymbolIndex(),
sym_name,
sym.n_value,
atom.size,
diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig
index 47ef974cb1..5fb94b7c13 100644
--- a/src/link/MachO/Atom.zig
+++ b/src/link/MachO/Atom.zig
@@ -13,7 +13,6 @@ const trace = @import("../../tracy.zig").trace;
const Allocator = mem.Allocator;
const Arch = std.Target.Cpu.Arch;
-const Dwarf = @import("../Dwarf.zig");
const MachO = @import("../MachO.zig");
const Relocation = @import("Relocation.zig");
const SymbolWithLoc = MachO.SymbolWithLoc;
@@ -39,10 +38,11 @@ size: u64,
alignment: u32,
/// Points to the previous and next neighbours
-next: ?*Atom,
-prev: ?*Atom,
+/// TODO use the same trick as with symbols: reserve index 0 as null atom
+next_index: ?Index,
+prev_index: ?Index,
-dbg_info_atom: Dwarf.Atom,
+pub const Index = u32;
pub const Binding = struct {
target: SymbolWithLoc,
@@ -54,15 +54,10 @@ pub const SymbolAtOffset = struct {
offset: u64,
};
-pub const empty = Atom{
- .sym_index = 0,
- .file = null,
- .size = 0,
- .alignment = 0,
- .prev = null,
- .next = null,
- .dbg_info_atom = undefined,
-};
+pub fn getSymbolIndex(self: Atom) ?u32 {
+ if (self.sym_index == 0) return null;
+ return self.sym_index;
+}
/// Returns symbol referencing this atom.
pub fn getSymbol(self: Atom, macho_file: *MachO) macho.nlist_64 {
@@ -71,20 +66,23 @@ pub fn getSymbol(self: Atom, macho_file: *MachO) macho.nlist_64 {
/// Returns pointer-to-symbol referencing this atom.
pub fn getSymbolPtr(self: Atom, macho_file: *MachO) *macho.nlist_64 {
+ const sym_index = self.getSymbolIndex().?;
return macho_file.getSymbolPtr(.{
- .sym_index = self.sym_index,
+ .sym_index = sym_index,
.file = self.file,
});
}
pub fn getSymbolWithLoc(self: Atom) SymbolWithLoc {
- return .{ .sym_index = self.sym_index, .file = self.file };
+ const sym_index = self.getSymbolIndex().?;
+ return .{ .sym_index = sym_index, .file = self.file };
}
/// Returns the name of this atom.
pub fn getName(self: Atom, macho_file: *MachO) []const u8 {
+ const sym_index = self.getSymbolIndex().?;
return macho_file.getSymbolName(.{
- .sym_index = self.sym_index,
+ .sym_index = sym_index,
.file = self.file,
});
}
@@ -94,7 +92,8 @@ pub fn getName(self: Atom, macho_file: *MachO) []const u8 {
/// this calculation.
pub fn capacity(self: Atom, macho_file: *MachO) u64 {
const self_sym = self.getSymbol(macho_file);
- if (self.next) |next| {
+ if (self.next_index) |next_index| {
+ const next = macho_file.getAtom(next_index);
const next_sym = next.getSymbol(macho_file);
return next_sym.n_value - self_sym.n_value;
} else {
@@ -106,7 +105,8 @@ pub fn capacity(self: Atom, macho_file: *MachO) u64 {
pub fn freeListEligible(self: Atom, macho_file: *MachO) bool {
// No need to keep a free list node for the last atom.
- const next = self.next orelse return false;
+ const next_index = self.next_index orelse return false;
+ const next = macho_file.getAtom(next_index);
const self_sym = self.getSymbol(macho_file);
const next_sym = next.getSymbol(macho_file);
const cap = next_sym.n_value - self_sym.n_value;
@@ -116,19 +116,19 @@ pub fn freeListEligible(self: Atom, macho_file: *MachO) bool {
return surplus >= MachO.min_text_capacity;
}
-pub fn addRelocation(self: *Atom, macho_file: *MachO, reloc: Relocation) !void {
- return self.addRelocations(macho_file, 1, .{reloc});
+pub fn addRelocation(macho_file: *MachO, atom_index: Index, reloc: Relocation) !void {
+ return addRelocations(macho_file, atom_index, 1, .{reloc});
}
pub fn addRelocations(
- self: *Atom,
macho_file: *MachO,
+ atom_index: Index,
comptime count: comptime_int,
relocs: [count]Relocation,
) !void {
const gpa = macho_file.base.allocator;
const target = macho_file.base.options.target;
- const gop = try macho_file.relocs.getOrPut(gpa, self);
+ const gop = try macho_file.relocs.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
@@ -142,56 +142,72 @@ pub fn addRelocations(
}
}
-pub fn addRebase(self: *Atom, macho_file: *MachO, offset: u32) !void {
+pub fn addRebase(macho_file: *MachO, atom_index: Index, offset: u32) !void {
const gpa = macho_file.base.allocator;
- log.debug(" (adding rebase at offset 0x{x} in %{d})", .{ offset, self.sym_index });
- const gop = try macho_file.rebases.getOrPut(gpa, self);
+ const atom = macho_file.getAtom(atom_index);
+ log.debug(" (adding rebase at offset 0x{x} in %{?d})", .{ offset, atom.getSymbolIndex() });
+ const gop = try macho_file.rebases.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, offset);
}
-pub fn addBinding(self: *Atom, macho_file: *MachO, binding: Binding) !void {
+pub fn addBinding(macho_file: *MachO, atom_index: Index, binding: Binding) !void {
const gpa = macho_file.base.allocator;
- log.debug(" (adding binding to symbol {s} at offset 0x{x} in %{d})", .{
+ const atom = macho_file.getAtom(atom_index);
+ log.debug(" (adding binding to symbol {s} at offset 0x{x} in %{?d})", .{
macho_file.getSymbolName(binding.target),
binding.offset,
- self.sym_index,
+ atom.getSymbolIndex(),
});
- const gop = try macho_file.bindings.getOrPut(gpa, self);
+ const gop = try macho_file.bindings.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, binding);
}
-pub fn addLazyBinding(self: *Atom, macho_file: *MachO, binding: Binding) !void {
+pub fn addLazyBinding(macho_file: *MachO, atom_index: Index, binding: Binding) !void {
const gpa = macho_file.base.allocator;
- log.debug(" (adding lazy binding to symbol {s} at offset 0x{x} in %{d})", .{
+ const atom = macho_file.getAtom(atom_index);
+ log.debug(" (adding lazy binding to symbol {s} at offset 0x{x} in %{?d})", .{
macho_file.getSymbolName(binding.target),
binding.offset,
- self.sym_index,
+ atom.getSymbolIndex(),
});
- const gop = try macho_file.lazy_bindings.getOrPut(gpa, self);
+ const gop = try macho_file.lazy_bindings.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, binding);
}
-pub fn resolveRelocations(self: *Atom, macho_file: *MachO) !void {
- const relocs = macho_file.relocs.get(self) orelse return;
- const source_sym = self.getSymbol(macho_file);
+pub fn resolveRelocations(macho_file: *MachO, atom_index: Index) !void {
+ const atom = macho_file.getAtom(atom_index);
+ const relocs = macho_file.relocs.get(atom_index) orelse return;
+ const source_sym = atom.getSymbol(macho_file);
const source_section = macho_file.sections.get(source_sym.n_sect - 1).header;
const file_offset = source_section.offset + source_sym.n_value - source_section.addr;
- log.debug("relocating '{s}'", .{self.getName(macho_file)});
+ log.debug("relocating '{s}'", .{atom.getName(macho_file)});
for (relocs.items) |*reloc| {
if (!reloc.dirty) continue;
- try reloc.resolve(self, macho_file, file_offset);
+ try reloc.resolve(macho_file, atom_index, file_offset);
reloc.dirty = false;
}
}
+
+pub fn freeRelocations(macho_file: *MachO, atom_index: Index) void {
+ const gpa = macho_file.base.allocator;
+ var removed_relocs = macho_file.relocs.fetchOrderedRemove(atom_index);
+ if (removed_relocs) |*relocs| relocs.value.deinit(gpa);
+ var removed_rebases = macho_file.rebases.fetchOrderedRemove(atom_index);
+ if (removed_rebases) |*rebases| rebases.value.deinit(gpa);
+ var removed_bindings = macho_file.bindings.fetchOrderedRemove(atom_index);
+ if (removed_bindings) |*bindings| bindings.value.deinit(gpa);
+ var removed_lazy_bindings = macho_file.lazy_bindings.fetchOrderedRemove(atom_index);
+ if (removed_lazy_bindings) |*lazy_bindings| lazy_bindings.value.deinit(gpa);
+}
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index 7c22f441cd..0a5c8b0372 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -82,11 +82,11 @@ pub fn populateMissingMetadata(self: *DebugSymbols) !void {
}
if (self.debug_str_section_index == null) {
- assert(self.dwarf.strtab.items.len == 0);
- try self.dwarf.strtab.append(self.allocator, 0);
+ assert(self.dwarf.strtab.buffer.items.len == 0);
+ try self.dwarf.strtab.buffer.append(self.allocator, 0);
self.debug_str_section_index = try self.allocateSection(
"__debug_str",
- @intCast(u32, self.dwarf.strtab.items.len),
+ @intCast(u32, self.dwarf.strtab.buffer.items.len),
0,
);
self.debug_string_table_dirty = true;
@@ -291,10 +291,10 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void {
{
const sect_index = self.debug_str_section_index.?;
- if (self.debug_string_table_dirty or self.dwarf.strtab.items.len != self.getSection(sect_index).size) {
- const needed_size = @intCast(u32, self.dwarf.strtab.items.len);
+ if (self.debug_string_table_dirty or self.dwarf.strtab.buffer.items.len != self.getSection(sect_index).size) {
+ const needed_size = @intCast(u32, self.dwarf.strtab.buffer.items.len);
try self.growSection(sect_index, needed_size, false);
- try self.file.pwriteAll(self.dwarf.strtab.items, self.getSection(sect_index).offset);
+ try self.file.pwriteAll(self.dwarf.strtab.buffer.items, self.getSection(sect_index).offset);
self.debug_string_table_dirty = false;
}
}
diff --git a/src/link/MachO/Relocation.zig b/src/link/MachO/Relocation.zig
index ca6bf9d681..07e5cf1aa2 100644
--- a/src/link/MachO/Relocation.zig
+++ b/src/link/MachO/Relocation.zig
@@ -29,33 +29,35 @@ pub fn fmtType(self: Relocation, target: std.Target) []const u8 {
}
}
-pub fn getTargetAtom(self: Relocation, macho_file: *MachO) ?*Atom {
+pub fn getTargetAtomIndex(self: Relocation, macho_file: *MachO) ?Atom.Index {
switch (macho_file.base.options.target.cpu.arch) {
.aarch64 => switch (@intToEnum(macho.reloc_type_arm64, self.type)) {
.ARM64_RELOC_GOT_LOAD_PAGE21,
.ARM64_RELOC_GOT_LOAD_PAGEOFF12,
.ARM64_RELOC_POINTER_TO_GOT,
- => return macho_file.getGotAtomForSymbol(self.target),
+ => return macho_file.getGotAtomIndexForSymbol(self.target),
else => {},
},
.x86_64 => switch (@intToEnum(macho.reloc_type_x86_64, self.type)) {
.X86_64_RELOC_GOT,
.X86_64_RELOC_GOT_LOAD,
- => return macho_file.getGotAtomForSymbol(self.target),
+ => return macho_file.getGotAtomIndexForSymbol(self.target),
else => {},
},
else => unreachable,
}
- if (macho_file.getStubsAtomForSymbol(self.target)) |stubs_atom| return stubs_atom;
- return macho_file.getAtomForSymbol(self.target);
+ if (macho_file.getStubsAtomIndexForSymbol(self.target)) |stubs_atom| return stubs_atom;
+ return macho_file.getAtomIndexForSymbol(self.target);
}
-pub fn resolve(self: Relocation, atom: *Atom, macho_file: *MachO, base_offset: u64) !void {
+pub fn resolve(self: Relocation, macho_file: *MachO, atom_index: Atom.Index, base_offset: u64) !void {
const arch = macho_file.base.options.target.cpu.arch;
+ const atom = macho_file.getAtom(atom_index);
const source_sym = atom.getSymbol(macho_file);
const source_addr = source_sym.n_value + self.offset;
- const target_atom = self.getTargetAtom(macho_file) orelse return;
+ const target_atom_index = self.getTargetAtomIndex(macho_file) orelse return;
+ const target_atom = macho_file.getAtom(target_atom_index);
const target_addr = @intCast(i64, target_atom.getSymbol(macho_file).n_value) + self.addend;
log.debug(" ({x}: [() => 0x{x} ({s})) ({s})", .{
diff --git a/src/link/MachO/load_commands.zig b/src/link/MachO/load_commands.zig
index 0e3760526c..a452551a0a 100644
--- a/src/link/MachO/load_commands.zig
+++ b/src/link/MachO/load_commands.zig
@@ -12,7 +12,7 @@ pub const default_dyld_path: [*:0]const u8 = "/usr/lib/dyld";
fn calcInstallNameLen(cmd_size: u64, name: []const u8, assume_max_path_len: bool) u64 {
const darwin_path_max = 1024;
- const name_len = if (assume_max_path_len) darwin_path_max else std.mem.len(name) + 1;
+ const name_len = if (assume_max_path_len) darwin_path_max else name.len + 1;
return mem.alignForwardGeneric(u64, cmd_size + name_len, @alignOf(u64));
}
diff --git a/src/link/MachO/zld.zig b/src/link/MachO/zld.zig
index 4cb346aa47..81fae399ef 100644
--- a/src/link/MachO/zld.zig
+++ b/src/link/MachO/zld.zig
@@ -3596,7 +3596,8 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
man.hash.addOptionalBytes(options.sysroot);
try man.addOptionalFile(options.entitlements);
- // We don't actually care whether it's a cache hit or miss; we just need the digest and the lock.
+ // We don't actually care whether it's a cache hit or miss; we just
+ // need the digest and the lock.
_ = try man.hit();
digest = man.final();
@@ -4177,9 +4178,11 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
log.debug("failed to save linking hash digest file: {s}", .{@errorName(err)});
};
// Again failure here only means an unnecessary cache miss.
- man.writeManifest() catch |err| {
- log.debug("failed to write cache manifest when linking: {s}", .{@errorName(err)});
- };
+ if (man.have_exclusive_lock) {
+ man.writeManifest() catch |err| {
+ log.debug("failed to write cache manifest when linking: {s}", .{@errorName(err)});
+ };
+ }
// We hang on to this lock so that the output file path can be used without
// other processes clobbering it.
macho_file.base.lock = man.toOwnedLock();
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index 5423269bf0..87e3ca5c22 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -21,14 +21,7 @@ const Allocator = std.mem.Allocator;
const log = std.log.scoped(.link);
const assert = std.debug.assert;
-const FnDeclOutput = struct {
- /// this code is modified when relocated so it is mutable
- code: []u8,
- /// this might have to be modified in the linker, so thats why its mutable
- lineinfo: []u8,
- start_line: u32,
- end_line: u32,
-};
+pub const base_tag = .plan9;
base: link.File,
sixtyfour_bit: bool,
@@ -101,6 +94,9 @@ got_index_free_list: std.ArrayListUnmanaged(usize) = .{},
syms_index_free_list: std.ArrayListUnmanaged(usize) = .{},
+decl_blocks: std.ArrayListUnmanaged(DeclBlock) = .{},
+decls: std.AutoHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{},
+
const Reloc = struct {
target: Module.Decl.Index,
offset: u64,
@@ -115,6 +111,42 @@ const Bases = struct {
const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(struct { info: DeclBlock, code: []const u8 }));
+pub const PtrWidth = enum { p32, p64 };
+
+pub const DeclBlock = struct {
+ type: aout.Sym.Type,
+ /// offset in the text or data sects
+ offset: ?u64,
+ /// offset into syms
+ sym_index: ?usize,
+ /// offset into got
+ got_index: ?usize,
+
+ pub const Index = u32;
+};
+
+const DeclMetadata = struct {
+ index: DeclBlock.Index,
+ exports: std.ArrayListUnmanaged(usize) = .{},
+
+ fn getExport(m: DeclMetadata, p9: *const Plan9, name: []const u8) ?usize {
+ for (m.exports.items) |exp| {
+ const sym = p9.syms.items[exp];
+ if (mem.eql(u8, name, sym.name)) return exp;
+ }
+ return null;
+ }
+};
+
+const FnDeclOutput = struct {
+ /// this code is modified when relocated so it is mutable
+ code: []u8,
+ /// this might have to be modified in the linker, so thats why its mutable
+ lineinfo: []u8,
+ start_line: u32,
+ end_line: u32,
+};
+
fn getAddr(self: Plan9, addr: u64, t: aout.Sym.Type) u64 {
return addr + switch (t) {
.T, .t, .l, .L => self.bases.text,
@@ -127,22 +159,6 @@ fn getSymAddr(self: Plan9, s: aout.Sym) u64 {
return self.getAddr(s.value, s.type);
}
-pub const DeclBlock = struct {
- type: aout.Sym.Type,
- /// offset in the text or data sects
- offset: ?u64,
- /// offset into syms
- sym_index: ?usize,
- /// offset into got
- got_index: ?usize,
- pub const empty = DeclBlock{
- .type = .t,
- .offset = null,
- .sym_index = null,
- .got_index = null,
- };
-};
-
pub fn defaultBaseAddrs(arch: std.Target.Cpu.Arch) Bases {
return switch (arch) {
.x86_64 => .{
@@ -164,8 +180,6 @@ pub fn defaultBaseAddrs(arch: std.Target.Cpu.Arch) Bases {
};
}
-pub const PtrWidth = enum { p32, p64 };
-
pub fn createEmpty(gpa: Allocator, options: link.Options) !*Plan9 {
if (options.use_llvm)
return error.LLVMBackendDoesNotSupportPlan9;
@@ -271,7 +285,7 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv
const decl = module.declPtr(decl_index);
self.freeUnnamedConsts(decl_index);
- try self.seeDecl(decl_index);
+ _ = try self.seeDecl(decl_index);
log.debug("codegen decl {*} ({s})", .{ decl, decl.name });
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
@@ -299,7 +313,7 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv
},
);
const code = switch (res) {
- .appended => try code_buffer.toOwnedSlice(),
+ .ok => try code_buffer.toOwnedSlice(),
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em);
@@ -313,11 +327,11 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv
.end_line = end_line,
};
try self.putFn(decl_index, out);
- return self.updateFinish(decl);
+ return self.updateFinish(decl_index);
}
pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.Index) !u32 {
- try self.seeDecl(decl_index);
+ _ = try self.seeDecl(decl_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -358,8 +372,7 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I
.parent_atom_index = @enumToInt(decl_index),
});
const code = switch (res) {
- .externally_managed => |x| x,
- .appended => code_buffer.items,
+ .ok => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try mod.failed_decls.put(mod.gpa, decl_index, em);
@@ -388,7 +401,7 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index)
}
}
- try self.seeDecl(decl_index);
+ _ = try self.seeDecl(decl_index);
log.debug("codegen decl {*} ({s}) ({d})", .{ decl, decl.name, decl_index });
@@ -403,8 +416,7 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index)
.parent_atom_index = @enumToInt(decl_index),
});
const code = switch (res) {
- .externally_managed => |x| x,
- .appended => code_buffer.items,
+ .ok => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em);
@@ -416,28 +428,31 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index)
if (self.data_decl_table.fetchPutAssumeCapacity(decl_index, duped_code)) |old_entry| {
self.base.allocator.free(old_entry.value);
}
- return self.updateFinish(decl);
+ return self.updateFinish(decl_index);
}
/// called at the end of update{Decl,Func}
-fn updateFinish(self: *Plan9, decl: *Module.Decl) !void {
+fn updateFinish(self: *Plan9, decl_index: Module.Decl.Index) !void {
+ const decl = self.base.options.module.?.declPtr(decl_index);
const is_fn = (decl.ty.zigTypeTag() == .Fn);
log.debug("update the symbol table and got for decl {*} ({s})", .{ decl, decl.name });
const sym_t: aout.Sym.Type = if (is_fn) .t else .d;
+
+ const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index);
// write the internal linker metadata
- decl.link.plan9.type = sym_t;
+ decl_block.type = sym_t;
// write the symbol
- // we already have the got index because that got allocated in allocateDeclIndexes
+ // we already have the got index
const sym: aout.Sym = .{
.value = undefined, // the value of stuff gets filled in in flushModule
- .type = decl.link.plan9.type,
+ .type = decl_block.type,
.name = mem.span(decl.name),
};
- if (decl.link.plan9.sym_index) |s| {
+ if (decl_block.sym_index) |s| {
self.syms.items[s] = sym;
} else {
const s = try self.allocateSymbolIndex();
- decl.link.plan9.sym_index = s;
+ decl_block.sym_index = s;
self.syms.items[s] = sym;
}
}
@@ -552,6 +567,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
while (it.next()) |entry| {
const decl_index = entry.key_ptr.*;
const decl = mod.declPtr(decl_index);
+ const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index);
const out = entry.value_ptr.*;
log.debug("write text decl {*} ({s}), lines {d} to {d}", .{ decl, decl.name, out.start_line + 1, out.end_line });
{
@@ -570,16 +586,16 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
iovecs_i += 1;
const off = self.getAddr(text_i, .t);
text_i += out.code.len;
- decl.link.plan9.offset = off;
+ decl_block.offset = off;
if (!self.sixtyfour_bit) {
- mem.writeIntNative(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off));
- mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
+ mem.writeIntNative(u32, got_table[decl_block.got_index.? * 4 ..][0..4], @intCast(u32, off));
+ mem.writeInt(u32, got_table[decl_block.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
} else {
- mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
+ mem.writeInt(u64, got_table[decl_block.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
}
- self.syms.items[decl.link.plan9.sym_index.?].value = off;
+ self.syms.items[decl_block.sym_index.?].value = off;
if (mod.decl_exports.get(decl_index)) |exports| {
- try self.addDeclExports(mod, decl, exports.items);
+ try self.addDeclExports(mod, decl_index, exports.items);
}
}
}
@@ -600,6 +616,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
while (it.next()) |entry| {
const decl_index = entry.key_ptr.*;
const decl = mod.declPtr(decl_index);
+ const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index);
const code = entry.value_ptr.*;
log.debug("write data decl {*} ({s})", .{ decl, decl.name });
@@ -608,15 +625,15 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
iovecs_i += 1;
const off = self.getAddr(data_i, .d);
data_i += code.len;
- decl.link.plan9.offset = off;
+ decl_block.offset = off;
if (!self.sixtyfour_bit) {
- mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
+ mem.writeInt(u32, got_table[decl_block.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
} else {
- mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
+ mem.writeInt(u64, got_table[decl_block.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
}
- self.syms.items[decl.link.plan9.sym_index.?].value = off;
+ self.syms.items[decl_block.sym_index.?].value = off;
if (mod.decl_exports.get(decl_index)) |exports| {
- try self.addDeclExports(mod, decl, exports.items);
+ try self.addDeclExports(mod, decl_index, exports.items);
}
}
// write the unnamed constants after the other data decls
@@ -678,7 +695,8 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
for (kv.value_ptr.items) |reloc| {
const target_decl_index = reloc.target;
const target_decl = mod.declPtr(target_decl_index);
- const target_decl_offset = target_decl.link.plan9.offset.?;
+ const target_decl_block = self.getDeclBlock(self.decls.get(target_decl_index).?.index);
+ const target_decl_offset = target_decl_block.offset.?;
const offset = reloc.offset;
const addend = reloc.addend;
@@ -711,35 +729,43 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
fn addDeclExports(
self: *Plan9,
module: *Module,
- decl: *Module.Decl,
+ decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
+ const metadata = self.decls.getPtr(decl_index).?;
+ const decl_block = self.getDeclBlock(metadata.index);
+
for (exports) |exp| {
// plan9 does not support custom sections
if (exp.options.section) |section_name| {
if (!mem.eql(u8, section_name, ".text") or !mem.eql(u8, section_name, ".data")) {
- try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "plan9 does not support extra sections", .{}));
+ try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(
+ self.base.allocator,
+ module.declPtr(decl_index).srcLoc(),
+ "plan9 does not support extra sections",
+ .{},
+ ));
break;
}
}
const sym = .{
- .value = decl.link.plan9.offset.?,
- .type = decl.link.plan9.type.toGlobal(),
+ .value = decl_block.offset.?,
+ .type = decl_block.type.toGlobal(),
.name = exp.options.name,
};
- if (exp.link.plan9) |i| {
+ if (metadata.getExport(self, exp.options.name)) |i| {
self.syms.items[i] = sym;
} else {
try self.syms.append(self.base.allocator, sym);
- exp.link.plan9 = self.syms.items.len - 1;
+ try metadata.exports.append(self.base.allocator, self.syms.items.len - 1);
}
}
}
pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void {
// TODO audit the lifetimes of decls table entries. It's possible to get
- // allocateDeclIndexes and then freeDecl without any updateDecl in between.
+ // freeDecl without any updateDecl in between.
// However that is planned to change, see the TODO comment in Module.zig
// in the deleteUnusedDecl function.
const mod = self.base.options.module.?;
@@ -762,13 +788,18 @@ pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void {
self.base.allocator.free(removed_entry.value);
}
}
- if (decl.link.plan9.got_index) |i| {
- // TODO: if this catch {} is triggered, an assertion in flushModule will be triggered, because got_index_free_list will have the wrong length
- self.got_index_free_list.append(self.base.allocator, i) catch {};
- }
- if (decl.link.plan9.sym_index) |i| {
- self.syms_index_free_list.append(self.base.allocator, i) catch {};
- self.syms.items[i] = aout.Sym.undefined_symbol;
+ if (self.decls.fetchRemove(decl_index)) |const_kv| {
+ var kv = const_kv;
+ const decl_block = self.getDeclBlock(kv.value.index);
+ if (decl_block.got_index) |i| {
+ // TODO: if this catch {} is triggered, an assertion in flushModule will be triggered, because got_index_free_list will have the wrong length
+ self.got_index_free_list.append(self.base.allocator, i) catch {};
+ }
+ if (decl_block.sym_index) |i| {
+ self.syms_index_free_list.append(self.base.allocator, i) catch {};
+ self.syms.items[i] = aout.Sym.undefined_symbol;
+ }
+ kv.value.exports.deinit(self.base.allocator);
}
self.freeUnnamedConsts(decl_index);
{
@@ -788,12 +819,30 @@ fn freeUnnamedConsts(self: *Plan9, decl_index: Module.Decl.Index) void {
unnamed_consts.clearAndFree(self.base.allocator);
}
-pub fn seeDecl(self: *Plan9, decl_index: Module.Decl.Index) !void {
- const mod = self.base.options.module.?;
- const decl = mod.declPtr(decl_index);
- if (decl.link.plan9.got_index == null) {
- decl.link.plan9.got_index = self.allocateGotIndex();
+fn createDeclBlock(self: *Plan9) !DeclBlock.Index {
+ const gpa = self.base.allocator;
+ const index = @intCast(DeclBlock.Index, self.decl_blocks.items.len);
+ const decl_block = try self.decl_blocks.addOne(gpa);
+ decl_block.* = .{
+ .type = .t,
+ .offset = null,
+ .sym_index = null,
+ .got_index = null,
+ };
+ return index;
+}
+
+pub fn seeDecl(self: *Plan9, decl_index: Module.Decl.Index) !DeclBlock.Index {
+ const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
+ if (!gop.found_existing) {
+ const index = try self.createDeclBlock();
+ self.getDeclBlockPtr(index).got_index = self.allocateGotIndex();
+ gop.value_ptr.* = .{
+ .index = index,
+ .exports = .{},
+ };
}
+ return gop.value_ptr.index;
}
pub fn updateDeclExports(
@@ -802,7 +851,7 @@ pub fn updateDeclExports(
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
- try self.seeDecl(decl_index);
+ _ = try self.seeDecl(decl_index);
// we do all the things in flush
_ = module;
_ = exports;
@@ -844,10 +893,17 @@ pub fn deinit(self: *Plan9) void {
self.syms_index_free_list.deinit(gpa);
self.file_segments.deinit(gpa);
self.path_arena.deinit();
+ self.decl_blocks.deinit(gpa);
+
+ {
+ var it = self.decls.iterator();
+ while (it.next()) |entry| {
+ entry.value_ptr.exports.deinit(gpa);
+ }
+ self.decls.deinit(gpa);
+ }
}
-pub const Export = ?usize;
-pub const base_tag = .plan9;
pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Plan9 {
if (options.use_llvm)
return error.LLVMBackendDoesNotSupportPlan9;
@@ -913,20 +969,19 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
}
}
- const mod = self.base.options.module.?;
-
// write the data symbols
{
var it = self.data_decl_table.iterator();
while (it.next()) |entry| {
const decl_index = entry.key_ptr.*;
- const decl = mod.declPtr(decl_index);
- const sym = self.syms.items[decl.link.plan9.sym_index.?];
+ const decl_metadata = self.decls.get(decl_index).?;
+ const decl_block = self.getDeclBlock(decl_metadata.index);
+ const sym = self.syms.items[decl_block.sym_index.?];
try self.writeSym(writer, sym);
if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| {
- for (exports.items) |e| {
- try self.writeSym(writer, self.syms.items[e.link.plan9.?]);
- }
+ for (exports.items) |e| if (decl_metadata.getExport(self, e.options.name)) |exp_i| {
+ try self.writeSym(writer, self.syms.items[exp_i]);
+ };
}
}
}
@@ -945,32 +1000,28 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
var submap_it = symidx_and_submap.functions.iterator();
while (submap_it.next()) |entry| {
const decl_index = entry.key_ptr.*;
- const decl = mod.declPtr(decl_index);
- const sym = self.syms.items[decl.link.plan9.sym_index.?];
+ const decl_metadata = self.decls.get(decl_index).?;
+ const decl_block = self.getDeclBlock(decl_metadata.index);
+ const sym = self.syms.items[decl_block.sym_index.?];
try self.writeSym(writer, sym);
if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| {
- for (exports.items) |e| {
- const s = self.syms.items[e.link.plan9.?];
+ for (exports.items) |e| if (decl_metadata.getExport(self, e.options.name)) |exp_i| {
+ const s = self.syms.items[exp_i];
if (mem.eql(u8, s.name, "_start"))
self.entry_val = s.value;
try self.writeSym(writer, s);
- }
+ };
}
}
}
}
}
-/// this will be removed, moved to updateFinish
-pub fn allocateDeclIndexes(self: *Plan9, decl_index: Module.Decl.Index) !void {
- _ = self;
- _ = decl_index;
-}
/// Must be called only after a successful call to `updateDecl`.
-pub fn updateDeclLineNumber(self: *Plan9, mod: *Module, decl: *const Module.Decl) !void {
+pub fn updateDeclLineNumber(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !void {
_ = self;
_ = mod;
- _ = decl;
+ _ = decl_index;
}
pub fn getDeclVAddr(
@@ -1011,3 +1062,11 @@ pub fn getDeclVAddr(
});
return undefined;
}
+
+pub fn getDeclBlock(self: *const Plan9, index: DeclBlock.Index) DeclBlock {
+ return self.decl_blocks.items[index];
+}
+
+fn getDeclBlockPtr(self: *Plan9, index: DeclBlock.Index) *DeclBlock {
+ return &self.decl_blocks.items[index];
+}
diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig
index 7dbd3a42ce..14a29e4498 100644
--- a/src/link/SpirV.zig
+++ b/src/link/SpirV.zig
@@ -42,13 +42,6 @@ const SpvModule = @import("../codegen/spirv/Module.zig");
const spec = @import("../codegen/spirv/spec.zig");
const IdResult = spec.IdResult;
-// TODO: Should this struct be used at all rather than just a hashmap of aux data for every decl?
-pub const FnData = struct {
- // We're going to fill these in flushModule, and we're going to fill them unconditionally,
- // so just set it to undefined.
- id: IdResult = undefined,
-};
-
base: link.File,
/// This linker backend does not try to incrementally link output SPIR-V code.
@@ -209,16 +202,19 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No
// so that we can access them before processing them.
// TODO: We're allocating an ID unconditionally now, are there
// declarations which don't generate a result?
- // TODO: fn_link is used here, but thats probably not the right field. It will work anyway though.
+ var ids = std.AutoHashMap(Module.Decl.Index, IdResult).init(self.base.allocator);
+ defer ids.deinit();
+ try ids.ensureTotalCapacity(@intCast(u32, self.decl_table.count()));
+
for (self.decl_table.keys()) |decl_index| {
const decl = module.declPtr(decl_index);
if (decl.has_tv) {
- decl.fn_link.spirv.id = spv.allocId();
+ ids.putAssumeCapacityNoClobber(decl_index, spv.allocId());
}
}
// Now, actually generate the code for all declarations.
- var decl_gen = codegen.DeclGen.init(self.base.allocator, module, &spv);
+ var decl_gen = codegen.DeclGen.init(self.base.allocator, module, &spv, &ids);
defer decl_gen.deinit();
var it = self.decl_table.iterator();
@@ -231,7 +227,7 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No
const liveness = entry.value_ptr.liveness;
// Note, if `decl` is not a function, air/liveness may be undefined.
- if (try decl_gen.gen(decl, air, liveness)) |msg| {
+ if (try decl_gen.gen(decl_index, air, liveness)) |msg| {
try module.failed_decls.put(module.gpa, decl_index, msg);
return; // TODO: Attempt to generate more decls?
}
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 7154cd7bc1..17391b017a 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -9,7 +9,7 @@ const fs = std.fs;
const leb = std.leb;
const log = std.log.scoped(.link);
-const Atom = @import("Wasm/Atom.zig");
+pub const Atom = @import("Wasm/Atom.zig");
const Dwarf = @import("Dwarf.zig");
const Module = @import("../Module.zig");
const Compilation = @import("../Compilation.zig");
@@ -31,10 +31,7 @@ const Object = @import("Wasm/Object.zig");
const Archive = @import("Wasm/Archive.zig");
const types = @import("Wasm/types.zig");
-pub const base_tag = link.File.Tag.wasm;
-
-/// deprecated: Use `@import("Wasm/Atom.zig");`
-pub const DeclBlock = Atom;
+pub const base_tag: link.File.Tag = .wasm;
base: link.File,
/// Output name of the file
@@ -47,18 +44,16 @@ llvm_object: ?*LlvmObject = null,
/// TODO: Allow setting this through a flag?
host_name: []const u8 = "env",
/// List of all `Decl` that are currently alive.
-/// This is ment for bookkeeping so we can safely cleanup all codegen memory
-/// when calling `deinit`
-decls: std.AutoHashMapUnmanaged(Module.Decl.Index, void) = .{},
+/// Each index maps to the corresponding `Atom.Index`.
+decls: std.AutoHashMapUnmanaged(Module.Decl.Index, Atom.Index) = .{},
/// List of all symbols generated by Zig code.
symbols: std.ArrayListUnmanaged(Symbol) = .{},
/// List of symbol indexes which are free to be used.
symbols_free_list: std.ArrayListUnmanaged(u32) = .{},
/// Maps atoms to their segment index
-atoms: std.AutoHashMapUnmanaged(u32, *Atom) = .{},
-/// Atoms managed and created by the linker. This contains atoms
-/// from object files, and not Atoms generated by a Decl.
-managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
+atoms: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{},
+/// List of all atoms.
+managed_atoms: std.ArrayListUnmanaged(Atom) = .{},
/// Represents the index into `segments` where the 'code' section
/// lives.
code_section_index: ?u32 = null,
@@ -148,7 +143,7 @@ undefs: std.StringArrayHashMapUnmanaged(SymbolLoc) = .{},
/// Maps a symbol's location to an atom. This can be used to find meta
/// data of a symbol, such as its size, or its offset to perform a relocation.
/// Undefined (and synthetic) symbols do not have an Atom and therefore cannot be mapped.
-symbol_atom: std.AutoHashMapUnmanaged(SymbolLoc, *Atom) = .{},
+symbol_atom: std.AutoHashMapUnmanaged(SymbolLoc, Atom.Index) = .{},
/// Maps a symbol's location to its export name, which may differ from the decl's name
/// which does the exporting.
/// Note: The value represents the offset into the string table, rather than the actual string.
@@ -165,14 +160,14 @@ error_table_symbol: ?u32 = null,
// unit contains Zig code. The lifetime of these atoms are extended
// until the end of the compiler's lifetime. Meaning they're not freed
// during `flush()` in incremental-mode.
-debug_info_atom: ?*Atom = null,
-debug_line_atom: ?*Atom = null,
-debug_loc_atom: ?*Atom = null,
-debug_ranges_atom: ?*Atom = null,
-debug_abbrev_atom: ?*Atom = null,
-debug_str_atom: ?*Atom = null,
-debug_pubnames_atom: ?*Atom = null,
-debug_pubtypes_atom: ?*Atom = null,
+debug_info_atom: ?Atom.Index = null,
+debug_line_atom: ?Atom.Index = null,
+debug_loc_atom: ?Atom.Index = null,
+debug_ranges_atom: ?Atom.Index = null,
+debug_abbrev_atom: ?Atom.Index = null,
+debug_str_atom: ?Atom.Index = null,
+debug_pubnames_atom: ?Atom.Index = null,
+debug_pubtypes_atom: ?Atom.Index = null,
pub const Segment = struct {
alignment: u32,
@@ -183,13 +178,9 @@ pub const Segment = struct {
pub const FnData = struct {
/// Reference to the wasm type that represents this function.
type_index: u32,
- /// Contains debug information related to this function.
- /// For Wasm, the offset is relative to the code-section.
- src_fn: Dwarf.SrcFn,
pub const empty: FnData = .{
.type_index = undefined,
- .src_fn = Dwarf.SrcFn.empty,
};
};
@@ -434,10 +425,10 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
// at the end during `initializeCallCtorsFunction`.
}
- if (!options.strip and options.module != null) {
- wasm_bin.dwarf = Dwarf.init(allocator, &wasm_bin.base, options.target);
- try wasm_bin.initDebugSections();
- }
+ // if (!options.strip and options.module != null) {
+ // wasm_bin.dwarf = Dwarf.init(allocator, &wasm_bin.base, options.target);
+ // try wasm_bin.initDebugSections();
+ // }
return wasm_bin;
}
@@ -478,6 +469,7 @@ fn createSyntheticSymbol(wasm: *Wasm, name: []const u8, tag: Symbol.Tag) !Symbol
try wasm.globals.put(wasm.base.allocator, name_offset, loc);
return loc;
}
+
/// Initializes symbols and atoms for the debug sections
/// Initialization is only done when compiling Zig code.
/// When Zig is invoked as a linker instead, the atoms
@@ -520,6 +512,36 @@ fn parseObjectFile(wasm: *Wasm, path: []const u8) !bool {
return true;
}
+/// For a given `Module.Decl.Index` returns its corresponding `Atom.Index`.
+/// When the index was not found, a new `Atom` will be created, and its index will be returned.
+/// The newly created Atom is empty with default fields as specified by `Atom.empty`.
+pub fn getOrCreateAtomForDecl(wasm: *Wasm, decl_index: Module.Decl.Index) !Atom.Index {
+ const gop = try wasm.decls.getOrPut(wasm.base.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = try wasm.createAtom();
+ }
+ return gop.value_ptr.*;
+}
+
+/// Creates a new empty `Atom` and returns its `Atom.Index`
+fn createAtom(wasm: *Wasm) !Atom.Index {
+ const index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+ const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
+ atom.* = Atom.empty;
+ atom.sym_index = try wasm.allocateSymbol();
+ try wasm.symbol_atom.putNoClobber(wasm.base.allocator, .{ .file = null, .index = atom.sym_index }, index);
+
+ return index;
+}
+
+pub inline fn getAtom(wasm: *const Wasm, index: Atom.Index) Atom {
+ return wasm.managed_atoms.items[index];
+}
+
+pub inline fn getAtomPtr(wasm: *Wasm, index: Atom.Index) *Atom {
+ return &wasm.managed_atoms.items[index];
+}
+
/// Parses an archive file and will then parse each object file
/// that was found in the archive file.
/// Returns false when the file is not an archive file.
@@ -861,15 +883,16 @@ fn resolveLazySymbols(wasm: *Wasm) !void {
try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
_ = wasm.resolved_symbols.swapRemove(loc); // we don't want to emit this symbol, only use it for relocations.
- const atom = try wasm.base.allocator.create(Atom);
- errdefer wasm.base.allocator.destroy(atom);
- try wasm.managed_atoms.append(wasm.base.allocator, atom);
+ // TODO: Can we use `createAtom` here while also re-using the symbol
+ // from `createSyntheticSymbol`.
+ const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+ const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
atom.* = Atom.empty;
atom.sym_index = loc.index;
atom.alignment = 1;
- try wasm.parseAtom(atom, .{ .data = .synthetic });
- try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom);
+ try wasm.parseAtom(atom_index, .{ .data = .synthetic });
+ try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom_index);
}
if (wasm.undefs.fetchSwapRemove("__heap_end")) |kv| {
@@ -877,15 +900,14 @@ fn resolveLazySymbols(wasm: *Wasm) !void {
try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
_ = wasm.resolved_symbols.swapRemove(loc);
- const atom = try wasm.base.allocator.create(Atom);
- errdefer wasm.base.allocator.destroy(atom);
- try wasm.managed_atoms.append(wasm.base.allocator, atom);
+ const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+ const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
atom.* = Atom.empty;
atom.sym_index = loc.index;
atom.alignment = 1;
- try wasm.parseAtom(atom, .{ .data = .synthetic });
- try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom);
+ try wasm.parseAtom(atom_index, .{ .data = .synthetic });
+ try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom_index);
}
}
@@ -924,16 +946,6 @@ pub fn deinit(wasm: *Wasm) void {
if (wasm.llvm_object) |llvm_object| llvm_object.destroy(gpa);
}
- if (wasm.base.options.module) |mod| {
- var decl_it = wasm.decls.keyIterator();
- while (decl_it.next()) |decl_index_ptr| {
- const decl = mod.declPtr(decl_index_ptr.*);
- decl.link.wasm.deinit(gpa);
- }
- } else {
- assert(wasm.decls.count() == 0);
- }
-
for (wasm.func_types.items) |*func_type| {
func_type.deinit(gpa);
}
@@ -958,9 +970,8 @@ pub fn deinit(wasm: *Wasm) void {
wasm.symbol_atom.deinit(gpa);
wasm.export_names.deinit(gpa);
wasm.atoms.deinit(gpa);
- for (wasm.managed_atoms.items) |managed_atom| {
- managed_atom.deinit(gpa);
- gpa.destroy(managed_atom);
+ for (wasm.managed_atoms.items) |*managed_atom| {
+ managed_atom.deinit(wasm);
}
wasm.managed_atoms.deinit(gpa);
wasm.segments.deinit(gpa);
@@ -986,31 +997,23 @@ pub fn deinit(wasm: *Wasm) void {
}
}
-pub fn allocateDeclIndexes(wasm: *Wasm, decl_index: Module.Decl.Index) !void {
- if (wasm.llvm_object) |_| return;
- const decl = wasm.base.options.module.?.declPtr(decl_index);
- if (decl.link.wasm.sym_index != 0) return;
-
+/// Allocates a new symbol and returns its index.
+/// Will re-use slots when a symbol was freed at an earlier stage.
+pub fn allocateSymbol(wasm: *Wasm) !u32 {
try wasm.symbols.ensureUnusedCapacity(wasm.base.allocator, 1);
- try wasm.decls.putNoClobber(wasm.base.allocator, decl_index, {});
-
- const atom = &decl.link.wasm;
-
var symbol: Symbol = .{
.name = undefined, // will be set after updateDecl
.flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
.tag = undefined, // will be set after updateDecl
.index = undefined, // will be set after updateDecl
};
-
if (wasm.symbols_free_list.popOrNull()) |index| {
- atom.sym_index = index;
wasm.symbols.items[index] = symbol;
- } else {
- atom.sym_index = @intCast(u32, wasm.symbols.items.len);
- wasm.symbols.appendAssumeCapacity(symbol);
+ return index;
}
- try wasm.symbol_atom.putNoClobber(wasm.base.allocator, atom.symbolLoc(), atom);
+ const index = @intCast(u32, wasm.symbols.items.len);
+ wasm.symbols.appendAssumeCapacity(symbol);
+ return index;
}
pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
@@ -1026,15 +1029,24 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
- assert(decl.link.wasm.sym_index != 0); // Must call allocateDeclIndexes()
+ const atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
+ const atom = wasm.getAtomPtr(atom_index);
+ atom.clear();
- decl.link.wasm.clear();
-
- var decl_state: ?Dwarf.DeclState = if (wasm.dwarf) |*dwarf| try dwarf.initDeclState(mod, decl_index) else null;
- defer if (decl_state) |*ds| ds.deinit();
+ // var decl_state: ?Dwarf.DeclState = if (wasm.dwarf) |*dwarf| try dwarf.initDeclState(mod, decl_index) else null;
+ // defer if (decl_state) |*ds| ds.deinit();
var code_writer = std.ArrayList(u8).init(wasm.base.allocator);
defer code_writer.deinit();
+ // const result = try codegen.generateFunction(
+ // &wasm.base,
+ // decl.srcLoc(),
+ // func,
+ // air,
+ // liveness,
+ // &code_writer,
+ // if (decl_state) |*ds| .{ .dwarf = ds } else .none,
+ // );
const result = try codegen.generateFunction(
&wasm.base,
decl.srcLoc(),
@@ -1042,11 +1054,11 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes
air,
liveness,
&code_writer,
- if (decl_state) |*ds| .{ .dwarf = ds } else .none,
+ .none,
);
const code = switch (result) {
- .appended => code_writer.items,
+ .ok => code_writer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try mod.failed_decls.put(mod.gpa, decl_index, em);
@@ -1054,19 +1066,19 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes
},
};
- if (wasm.dwarf) |*dwarf| {
- try dwarf.commitDeclState(
- mod,
- decl_index,
- // Actual value will be written after relocation.
- // For Wasm, this is the offset relative to the code section
- // which isn't known until flush().
- 0,
- code.len,
- &decl_state.?,
- );
- }
- return wasm.finishUpdateDecl(decl, code);
+ // if (wasm.dwarf) |*dwarf| {
+ // try dwarf.commitDeclState(
+ // mod,
+ // decl_index,
+ // // Actual value will be written after relocation.
+ // // For Wasm, this is the offset relative to the code section
+ // // which isn't known until flush().
+ // 0,
+ // code.len,
+ // &decl_state.?,
+ // );
+ // }
+ return wasm.finishUpdateDecl(decl_index, code);
}
// Generate code for the Decl, storing it in memory to be later written to
@@ -1083,20 +1095,20 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi
defer tracy.end();
const decl = mod.declPtr(decl_index);
- assert(decl.link.wasm.sym_index != 0); // Must call allocateDeclIndexes()
-
- decl.link.wasm.clear();
-
if (decl.val.castTag(.function)) |_| {
return;
} else if (decl.val.castTag(.extern_fn)) |_| {
return;
}
+ const atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
+ const atom = wasm.getAtomPtr(atom_index);
+ atom.clear();
+
if (decl.isExtern()) {
const variable = decl.getVariable().?;
const name = mem.sliceTo(decl.name, 0);
- return wasm.addOrUpdateImport(name, decl.link.wasm.sym_index, variable.lib_name, null);
+ return wasm.addOrUpdateImport(name, atom.sym_index, variable.lib_name, null);
}
const val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
@@ -1109,12 +1121,11 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi
.{ .ty = decl.ty, .val = val },
&code_writer,
.none,
- .{ .parent_atom_index = decl.link.wasm.sym_index },
+ .{ .parent_atom_index = atom.sym_index },
);
const code = switch (res) {
- .externally_managed => |x| x,
- .appended => code_writer.items,
+ .ok => code_writer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try mod.failed_decls.put(mod.gpa, decl_index, em);
@@ -1122,26 +1133,29 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi
},
};
- return wasm.finishUpdateDecl(decl, code);
+ return wasm.finishUpdateDecl(decl_index, code);
}
-pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Module, decl: *const Module.Decl) !void {
+pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !void {
if (wasm.llvm_object) |_| return;
if (wasm.dwarf) |*dw| {
const tracy = trace(@src());
defer tracy.end();
+ const decl = mod.declPtr(decl_index);
const decl_name = try decl.getFullyQualifiedName(mod);
defer wasm.base.allocator.free(decl_name);
log.debug("updateDeclLineNumber {s}{*}", .{ decl_name, decl });
- try dw.updateDeclLineNumber(decl);
+ try dw.updateDeclLineNumber(mod, decl_index);
}
}
-fn finishUpdateDecl(wasm: *Wasm, decl: *Module.Decl, code: []const u8) !void {
+fn finishUpdateDecl(wasm: *Wasm, decl_index: Module.Decl.Index, code: []const u8) !void {
const mod = wasm.base.options.module.?;
- const atom: *Atom = &decl.link.wasm;
+ const decl = mod.declPtr(decl_index);
+ const atom_index = wasm.decls.get(decl_index).?;
+ const atom = wasm.getAtomPtr(atom_index);
const symbol = &wasm.symbols.items[atom.sym_index];
const full_name = try decl.getFullyQualifiedName(mod);
defer wasm.base.allocator.free(full_name);
@@ -1149,8 +1163,8 @@ fn finishUpdateDecl(wasm: *Wasm, decl: *Module.Decl, code: []const u8) !void {
try atom.code.appendSlice(wasm.base.allocator, code);
try wasm.resolved_symbols.put(wasm.base.allocator, atom.symbolLoc(), {});
- if (code.len == 0) return;
atom.size = @intCast(u32, code.len);
+ if (code.len == 0) return;
atom.alignment = decl.ty.abiAlignment(wasm.base.options.target);
}
@@ -1207,58 +1221,51 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In
const decl = mod.declPtr(decl_index);
// Create and initialize a new local symbol and atom
- const local_index = decl.link.wasm.locals.items.len;
+ const atom_index = try wasm.createAtom();
+ const parent_atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
+ const parent_atom = wasm.getAtomPtr(parent_atom_index);
+ const local_index = parent_atom.locals.items.len;
+ try parent_atom.locals.append(wasm.base.allocator, atom_index);
const fqdn = try decl.getFullyQualifiedName(mod);
defer wasm.base.allocator.free(fqdn);
const name = try std.fmt.allocPrintZ(wasm.base.allocator, "__unnamed_{s}_{d}", .{ fqdn, local_index });
defer wasm.base.allocator.free(name);
- var symbol: Symbol = .{
- .name = try wasm.string_table.put(wasm.base.allocator, name),
- .flags = 0,
- .tag = .data,
- .index = undefined,
- };
- symbol.setFlag(.WASM_SYM_BINDING_LOCAL);
-
- const atom = try decl.link.wasm.locals.addOne(wasm.base.allocator);
- atom.* = Atom.empty;
- atom.alignment = tv.ty.abiAlignment(wasm.base.options.target);
- try wasm.symbols.ensureUnusedCapacity(wasm.base.allocator, 1);
-
- if (wasm.symbols_free_list.popOrNull()) |index| {
- atom.sym_index = index;
- wasm.symbols.items[index] = symbol;
- } else {
- atom.sym_index = @intCast(u32, wasm.symbols.items.len);
- wasm.symbols.appendAssumeCapacity(symbol);
- }
- try wasm.resolved_symbols.putNoClobber(wasm.base.allocator, atom.symbolLoc(), {});
- try wasm.symbol_atom.putNoClobber(wasm.base.allocator, atom.symbolLoc(), atom);
-
var value_bytes = std.ArrayList(u8).init(wasm.base.allocator);
defer value_bytes.deinit();
- const result = try codegen.generateSymbol(
- &wasm.base,
- decl.srcLoc(),
- tv,
- &value_bytes,
- .none,
- .{
- .parent_atom_index = atom.sym_index,
- .addend = null,
- },
- );
- const code = switch (result) {
- .externally_managed => |x| x,
- .appended => value_bytes.items,
- .fail => |em| {
- decl.analysis = .codegen_failure;
- try mod.failed_decls.put(mod.gpa, decl_index, em);
- return error.AnalysisFail;
- },
+ const code = code: {
+ const atom = wasm.getAtomPtr(atom_index);
+ atom.alignment = tv.ty.abiAlignment(wasm.base.options.target);
+ wasm.symbols.items[atom.sym_index] = .{
+ .name = try wasm.string_table.put(wasm.base.allocator, name),
+ .flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
+ .tag = .data,
+ .index = undefined,
+ };
+ try wasm.resolved_symbols.putNoClobber(wasm.base.allocator, atom.symbolLoc(), {});
+
+ const result = try codegen.generateSymbol(
+ &wasm.base,
+ decl.srcLoc(),
+ tv,
+ &value_bytes,
+ .none,
+ .{
+ .parent_atom_index = atom.sym_index,
+ .addend = null,
+ },
+ );
+ break :code switch (result) {
+ .ok => value_bytes.items,
+ .fail => |em| {
+ decl.analysis = .codegen_failure;
+ try mod.failed_decls.put(mod.gpa, decl_index, em);
+ return error.AnalysisFail;
+ },
+ };
};
+ const atom = wasm.getAtomPtr(atom_index);
atom.size = @intCast(u32, code.len);
try atom.code.appendSlice(wasm.base.allocator, code);
return atom.sym_index;
@@ -1306,10 +1313,13 @@ pub fn getDeclVAddr(
) !u64 {
const mod = wasm.base.options.module.?;
const decl = mod.declPtr(decl_index);
- const target_symbol_index = decl.link.wasm.sym_index;
- assert(target_symbol_index != 0);
+
+ const target_atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
+ const target_symbol_index = wasm.getAtom(target_atom_index).sym_index;
+
assert(reloc_info.parent_atom_index != 0);
- const atom = wasm.symbol_atom.get(.{ .file = null, .index = reloc_info.parent_atom_index }).?;
+ const atom_index = wasm.symbol_atom.get(.{ .file = null, .index = reloc_info.parent_atom_index }).?;
+ const atom = wasm.getAtomPtr(atom_index);
const is_wasm32 = wasm.base.options.target.cpu.arch == .wasm32;
if (decl.ty.zigTypeTag() == .Fn) {
assert(reloc_info.addend == 0); // addend not allowed for function relocations
@@ -1337,9 +1347,10 @@ pub fn getDeclVAddr(
return target_symbol_index;
}
-pub fn deleteExport(wasm: *Wasm, exp: Export) void {
+pub fn deleteDeclExport(wasm: *Wasm, decl_index: Module.Decl.Index) void {
if (wasm.llvm_object) |_| return;
- const sym_index = exp.sym_index orelse return;
+ const atom_index = wasm.decls.get(decl_index) orelse return;
+ const sym_index = wasm.getAtom(atom_index).sym_index;
const loc: SymbolLoc = .{ .file = null, .index = sym_index };
const symbol = loc.getSymbol(wasm);
const symbol_name = wasm.string_table.get(symbol.name);
@@ -1365,6 +1376,8 @@ pub fn updateDeclExports(
}
const decl = mod.declPtr(decl_index);
+ const atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
+ const atom = wasm.getAtom(atom_index);
for (exports) |exp| {
if (exp.options.section) |section| {
@@ -1379,7 +1392,7 @@ pub fn updateDeclExports(
const export_name = try wasm.string_table.put(wasm.base.allocator, exp.options.name);
if (wasm.globals.getPtr(export_name)) |existing_loc| {
- if (existing_loc.index == decl.link.wasm.sym_index) continue;
+ if (existing_loc.index == atom.sym_index) continue;
const existing_sym: Symbol = existing_loc.getSymbol(wasm).*;
const exp_is_weak = exp.options.linkage == .Internal or exp.options.linkage == .Weak;
@@ -1400,15 +1413,16 @@ pub fn updateDeclExports(
} else if (exp_is_weak) {
continue; // to-be-exported symbol is weak, so we keep the existing symbol
} else {
- existing_loc.index = decl.link.wasm.sym_index;
+ // TODO: Revisit this, why was this needed?
+ existing_loc.index = atom.sym_index;
existing_loc.file = null;
- exp.link.wasm.sym_index = existing_loc.index;
+ // exp.link.wasm.sym_index = existing_loc.index;
}
}
- const exported_decl = mod.declPtr(exp.exported_decl);
- const sym_index = exported_decl.link.wasm.sym_index;
- const sym_loc = exported_decl.link.wasm.symbolLoc();
+ const exported_atom_index = try wasm.getOrCreateAtomForDecl(exp.exported_decl);
+ const exported_atom = wasm.getAtom(exported_atom_index);
+ const sym_loc = exported_atom.symbolLoc();
const symbol = sym_loc.getSymbol(wasm);
switch (exp.options.linkage) {
.Internal => {
@@ -1444,7 +1458,6 @@ pub fn updateDeclExports(
// if the symbol was previously undefined, remove it as an import
_ = wasm.imports.remove(sym_loc);
_ = wasm.undefs.swapRemove(exp.options.name);
- exp.link.wasm.sym_index = sym_index;
}
}
@@ -1454,11 +1467,13 @@ pub fn freeDecl(wasm: *Wasm, decl_index: Module.Decl.Index) void {
}
const mod = wasm.base.options.module.?;
const decl = mod.declPtr(decl_index);
- const atom = &decl.link.wasm;
+ const atom_index = wasm.decls.get(decl_index).?;
+ const atom = wasm.getAtomPtr(atom_index);
wasm.symbols_free_list.append(wasm.base.allocator, atom.sym_index) catch {};
_ = wasm.decls.remove(decl_index);
wasm.symbols.items[atom.sym_index].tag = .dead;
- for (atom.locals.items) |local_atom| {
+ for (atom.locals.items) |local_atom_index| {
+ const local_atom = wasm.getAtom(local_atom_index);
const local_symbol = &wasm.symbols.items[local_atom.sym_index];
local_symbol.tag = .dead; // also for any local symbol
wasm.symbols_free_list.append(wasm.base.allocator, local_atom.sym_index) catch {};
@@ -1472,12 +1487,20 @@ pub fn freeDecl(wasm: *Wasm, decl_index: Module.Decl.Index) void {
_ = wasm.resolved_symbols.swapRemove(atom.symbolLoc());
_ = wasm.symbol_atom.remove(atom.symbolLoc());
- if (wasm.dwarf) |*dwarf| {
- dwarf.freeDecl(decl);
- dwarf.freeAtom(&atom.dbg_info_atom);
- }
+ // if (wasm.dwarf) |*dwarf| {
+ // dwarf.freeDecl(decl_index);
+ // }
- atom.deinit(wasm.base.allocator);
+ if (atom.next) |next_atom_index| {
+ const next_atom = wasm.getAtomPtr(next_atom_index);
+ next_atom.prev = atom.prev;
+ atom.next = null;
+ }
+ if (atom.prev) |prev_index| {
+ const prev_atom = wasm.getAtomPtr(prev_index);
+ prev_atom.next = atom.next;
+ atom.prev = null;
+ }
}
/// Appends a new entry to the indirect function table
@@ -1599,7 +1622,8 @@ const Kind = union(enum) {
};
/// Parses an Atom and inserts its metadata into the corresponding sections.
-fn parseAtom(wasm: *Wasm, atom: *Atom, kind: Kind) !void {
+fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
+ const atom = wasm.getAtomPtr(atom_index);
const symbol = (SymbolLoc{ .file = null, .index = atom.sym_index }).getSymbol(wasm);
const final_index: u32 = switch (kind) {
.function => |fn_data| result: {
@@ -1674,18 +1698,20 @@ fn parseAtom(wasm: *Wasm, atom: *Atom, kind: Kind) !void {
const segment: *Segment = &wasm.segments.items[final_index];
segment.alignment = std.math.max(segment.alignment, atom.alignment);
- try wasm.appendAtomAtIndex(final_index, atom);
+ try wasm.appendAtomAtIndex(final_index, atom_index);
}
/// From a given index, append the given `Atom` at the back of the linked list.
/// Simply inserts it into the map of atoms when it doesn't exist yet.
-pub fn appendAtomAtIndex(wasm: *Wasm, index: u32, atom: *Atom) !void {
- if (wasm.atoms.getPtr(index)) |last| {
- last.*.next = atom;
- atom.prev = last.*;
- last.* = atom;
+pub fn appendAtomAtIndex(wasm: *Wasm, index: u32, atom_index: Atom.Index) !void {
+ const atom = wasm.getAtomPtr(atom_index);
+ if (wasm.atoms.getPtr(index)) |last_index_ptr| {
+ const last = wasm.getAtomPtr(last_index_ptr.*);
+ last.*.next = atom_index;
+ atom.prev = last_index_ptr.*;
+ last_index_ptr.* = atom_index;
} else {
- try wasm.atoms.putNoClobber(wasm.base.allocator, index, atom);
+ try wasm.atoms.putNoClobber(wasm.base.allocator, index, atom_index);
}
}
@@ -1695,16 +1721,17 @@ fn allocateDebugAtoms(wasm: *Wasm) !void {
if (wasm.dwarf == null) return;
const allocAtom = struct {
- fn f(bin: *Wasm, maybe_index: *?u32, atom: *Atom) !void {
+ fn f(bin: *Wasm, maybe_index: *?u32, atom_index: Atom.Index) !void {
const index = maybe_index.* orelse idx: {
const index = @intCast(u32, bin.segments.items.len);
try bin.appendDummySegment();
maybe_index.* = index;
break :idx index;
};
+ const atom = bin.getAtomPtr(atom_index);
atom.size = @intCast(u32, atom.code.items.len);
bin.symbols.items[atom.sym_index].index = index;
- try bin.appendAtomAtIndex(index, atom);
+ try bin.appendAtomAtIndex(index, atom_index);
}
}.f;
@@ -1726,15 +1753,16 @@ fn allocateAtoms(wasm: *Wasm) !void {
var it = wasm.atoms.iterator();
while (it.next()) |entry| {
const segment = &wasm.segments.items[entry.key_ptr.*];
- var atom: *Atom = entry.value_ptr.*.getFirst();
+ var atom_index = entry.value_ptr.*;
var offset: u32 = 0;
while (true) {
+ const atom = wasm.getAtomPtr(atom_index);
const symbol_loc = atom.symbolLoc();
if (wasm.code_section_index) |index| {
if (index == entry.key_ptr.*) {
if (!wasm.resolved_symbols.contains(symbol_loc)) {
// only allocate resolved function body's.
- atom = atom.next orelse break;
+ atom_index = atom.prev orelse break;
continue;
}
}
@@ -1748,8 +1776,7 @@ fn allocateAtoms(wasm: *Wasm) !void {
atom.size,
});
offset += atom.size;
- try wasm.symbol_atom.put(wasm.base.allocator, symbol_loc, atom); // Update atom pointers
- atom = atom.next orelse break;
+ atom_index = atom.prev orelse break;
}
segment.size = std.mem.alignForwardGeneric(u32, offset, segment.alignment);
}
@@ -1883,8 +1910,8 @@ fn initializeCallCtorsFunction(wasm: *Wasm) !void {
symbol.index = func_index;
// create the atom that will be output into the final binary
- const atom = try wasm.base.allocator.create(Atom);
- errdefer wasm.base.allocator.destroy(atom);
+ const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+ const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
atom.* = .{
.size = @intCast(u32, function_body.items.len),
.offset = 0,
@@ -1894,15 +1921,14 @@ fn initializeCallCtorsFunction(wasm: *Wasm) !void {
.next = null,
.prev = null,
.code = function_body.moveToUnmanaged(),
- .dbg_info_atom = undefined,
};
- try wasm.managed_atoms.append(wasm.base.allocator, atom);
- try wasm.appendAtomAtIndex(wasm.code_section_index.?, atom);
- try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom);
+ try wasm.appendAtomAtIndex(wasm.code_section_index.?, atom_index);
+ try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom_index);
// `allocateAtoms` has already been called, set the atom's offset manually.
// This is fine to do manually as we insert the atom at the very end.
- atom.offset = atom.prev.?.offset + atom.prev.?.size;
+ const prev_atom = wasm.getAtom(atom.prev.?);
+ atom.offset = prev_atom.offset + prev_atom.size;
}
fn setupImports(wasm: *Wasm) !void {
@@ -2105,7 +2131,8 @@ fn setupExports(wasm: *Wasm) !void {
break :blk try wasm.string_table.put(wasm.base.allocator, sym_name);
};
const exp: types.Export = if (symbol.tag == .data) exp: {
- const atom = wasm.symbol_atom.get(sym_loc).?;
+ const atom_index = wasm.symbol_atom.get(sym_loc).?;
+ const atom = wasm.getAtom(atom_index);
const va = atom.getVA(wasm, symbol);
const global_index = @intCast(u32, wasm.imported_globals_count + wasm.wasm_globals.items.len);
try wasm.wasm_globals.append(wasm.base.allocator, .{
@@ -2210,7 +2237,8 @@ fn setupMemory(wasm: *Wasm) !void {
const segment_index = wasm.data_segments.get(".synthetic").?;
const segment = &wasm.segments.items[segment_index];
segment.offset = 0; // for simplicity we store the entire VA into atom's offset.
- const atom = wasm.symbol_atom.get(loc).?;
+ const atom_index = wasm.symbol_atom.get(loc).?;
+ const atom = wasm.getAtomPtr(atom_index);
atom.offset = @intCast(u32, mem.alignForwardGeneric(u64, memory_ptr, heap_alignment));
}
@@ -2243,7 +2271,8 @@ fn setupMemory(wasm: *Wasm) !void {
const segment_index = wasm.data_segments.get(".synthetic").?;
const segment = &wasm.segments.items[segment_index];
segment.offset = 0;
- const atom = wasm.symbol_atom.get(loc).?;
+ const atom_index = wasm.symbol_atom.get(loc).?;
+ const atom = wasm.getAtomPtr(atom_index);
atom.offset = @intCast(u32, memory_ptr);
}
@@ -2369,15 +2398,14 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 {
// and then return said symbol's index. The final table will be populated
// during `flush` when we know all possible error names.
- // As sym_index '0' is reserved, we use it for our stack pointer symbol
- const symbol_index = wasm.symbols_free_list.popOrNull() orelse blk: {
- const index = @intCast(u32, wasm.symbols.items.len);
- _ = try wasm.symbols.addOne(wasm.base.allocator);
- break :blk index;
- };
+ const atom_index = try wasm.createAtom();
+ const atom = wasm.getAtomPtr(atom_index);
+ const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
+ atom.alignment = slice_ty.abiAlignment(wasm.base.options.target);
+ const sym_index = atom.sym_index;
const sym_name = try wasm.string_table.put(wasm.base.allocator, "__zig_err_name_table");
- const symbol = &wasm.symbols.items[symbol_index];
+ const symbol = &wasm.symbols.items[sym_index];
symbol.* = .{
.name = sym_name,
.tag = .data,
@@ -2386,20 +2414,11 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 {
};
symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
- const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
+ try wasm.resolved_symbols.put(wasm.base.allocator, atom.symbolLoc(), {});
- const atom = try wasm.base.allocator.create(Atom);
- atom.* = Atom.empty;
- atom.sym_index = symbol_index;
- atom.alignment = slice_ty.abiAlignment(wasm.base.options.target);
- try wasm.managed_atoms.append(wasm.base.allocator, atom);
- const loc = atom.symbolLoc();
- try wasm.resolved_symbols.put(wasm.base.allocator, loc, {});
- try wasm.symbol_atom.put(wasm.base.allocator, loc, atom);
-
- log.debug("Error name table was created with symbol index: ({d})", .{symbol_index});
- wasm.error_table_symbol = symbol_index;
- return symbol_index;
+ log.debug("Error name table was created with symbol index: ({d})", .{sym_index});
+ wasm.error_table_symbol = sym_index;
+ return sym_index;
}
/// Populates the error name table, when `error_table_symbol` is not null.
@@ -2408,22 +2427,17 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 {
/// The table is what is being pointed to within the runtime bodies that are generated.
fn populateErrorNameTable(wasm: *Wasm) !void {
const symbol_index = wasm.error_table_symbol orelse return;
- const atom: *Atom = wasm.symbol_atom.get(.{ .file = null, .index = symbol_index }).?;
+ const atom_index = wasm.symbol_atom.get(.{ .file = null, .index = symbol_index }).?;
+ const atom = wasm.getAtomPtr(atom_index);
+
// Rather than creating a symbol for each individual error name,
// we create a symbol for the entire region of error names. We then calculate
// the pointers into the list using addends which are appended to the relocation.
- const names_atom = try wasm.base.allocator.create(Atom);
- names_atom.* = Atom.empty;
- try wasm.managed_atoms.append(wasm.base.allocator, names_atom);
- const names_symbol_index = wasm.symbols_free_list.popOrNull() orelse blk: {
- const index = @intCast(u32, wasm.symbols.items.len);
- _ = try wasm.symbols.addOne(wasm.base.allocator);
- break :blk index;
- };
- names_atom.sym_index = names_symbol_index;
+ const names_atom_index = try wasm.createAtom();
+ const names_atom = wasm.getAtomPtr(names_atom_index);
names_atom.alignment = 1;
const sym_name = try wasm.string_table.put(wasm.base.allocator, "__zig_err_names");
- const names_symbol = &wasm.symbols.items[names_symbol_index];
+ const names_symbol = &wasm.symbols.items[names_atom.sym_index];
names_symbol.* = .{
.name = sym_name,
.tag = .data,
@@ -2447,7 +2461,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
try atom.code.writer(wasm.base.allocator).writeIntLittle(u32, len - 1);
// create relocation to the error name
try atom.relocs.append(wasm.base.allocator, .{
- .index = names_symbol_index,
+ .index = names_atom.sym_index,
.relocation_type = .R_WASM_MEMORY_ADDR_I32,
.offset = offset,
.addend = @intCast(i32, addend),
@@ -2466,61 +2480,53 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
const name_loc = names_atom.symbolLoc();
try wasm.resolved_symbols.put(wasm.base.allocator, name_loc, {});
- try wasm.symbol_atom.put(wasm.base.allocator, name_loc, names_atom);
+ try wasm.symbol_atom.put(wasm.base.allocator, name_loc, names_atom_index);
// link the atoms with the rest of the binary so they can be allocated
// and relocations will be performed.
- try wasm.parseAtom(atom, .{ .data = .read_only });
- try wasm.parseAtom(names_atom, .{ .data = .read_only });
+ try wasm.parseAtom(atom_index, .{ .data = .read_only });
+ try wasm.parseAtom(names_atom_index, .{ .data = .read_only });
}
/// From a given index variable, creates a new debug section.
/// This initializes the index, appends a new segment,
/// and finally, creates a managed `Atom`.
-pub fn createDebugSectionForIndex(wasm: *Wasm, index: *?u32, name: []const u8) !*Atom {
+pub fn createDebugSectionForIndex(wasm: *Wasm, index: *?u32, name: []const u8) !Atom.Index {
const new_index = @intCast(u32, wasm.segments.items.len);
index.* = new_index;
try wasm.appendDummySegment();
- const sym_index = wasm.symbols_free_list.popOrNull() orelse idx: {
- const tmp_index = @intCast(u32, wasm.symbols.items.len);
- _ = try wasm.symbols.addOne(wasm.base.allocator);
- break :idx tmp_index;
- };
- wasm.symbols.items[sym_index] = .{
+ const atom_index = try wasm.createAtom();
+ const atom = wasm.getAtomPtr(atom_index);
+ wasm.symbols.items[atom.sym_index] = .{
.tag = .section,
.name = try wasm.string_table.put(wasm.base.allocator, name),
.index = 0,
.flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
};
- const atom = try wasm.base.allocator.create(Atom);
- atom.* = Atom.empty;
atom.alignment = 1; // debug sections are always 1-byte-aligned
- atom.sym_index = sym_index;
- try wasm.managed_atoms.append(wasm.base.allocator, atom);
- try wasm.symbol_atom.put(wasm.base.allocator, atom.symbolLoc(), atom);
- return atom;
+ return atom_index;
}
fn resetState(wasm: *Wasm) void {
for (wasm.segment_info.values()) |segment_info| {
wasm.base.allocator.free(segment_info.name);
}
- if (wasm.base.options.module) |mod| {
- var decl_it = wasm.decls.keyIterator();
- while (decl_it.next()) |decl_index_ptr| {
- const decl = mod.declPtr(decl_index_ptr.*);
- const atom = &decl.link.wasm;
- atom.next = null;
- atom.prev = null;
-
- for (atom.locals.items) |*local_atom| {
- local_atom.next = null;
- local_atom.prev = null;
- }
+
+ var atom_it = wasm.decls.valueIterator();
+ while (atom_it.next()) |atom_index| {
+ const atom = wasm.getAtomPtr(atom_index.*);
+ atom.next = null;
+ atom.prev = null;
+
+ for (atom.locals.items) |local_atom_index| {
+ const local_atom = wasm.getAtomPtr(local_atom_index);
+ local_atom.next = null;
+ local_atom.prev = null;
}
}
+
wasm.functions.clearRetainingCapacity();
wasm.exports.clearRetainingCapacity();
wasm.segments.clearRetainingCapacity();
@@ -2817,28 +2823,29 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
try wasm.setupStart();
try wasm.setupImports();
if (wasm.base.options.module) |mod| {
- var decl_it = wasm.decls.keyIterator();
- while (decl_it.next()) |decl_index_ptr| {
- const decl = mod.declPtr(decl_index_ptr.*);
+ var decl_it = wasm.decls.iterator();
+ while (decl_it.next()) |entry| {
+ const decl = mod.declPtr(entry.key_ptr.*);
if (decl.isExtern()) continue;
- const atom = &decl.*.link.wasm;
+ const atom_index = entry.value_ptr.*;
if (decl.ty.zigTypeTag() == .Fn) {
- try wasm.parseAtom(atom, .{ .function = decl.fn_link.wasm });
+ try wasm.parseAtom(atom_index, .{ .function = decl.fn_link.? });
} else if (decl.getVariable()) |variable| {
if (!variable.is_mutable) {
- try wasm.parseAtom(atom, .{ .data = .read_only });
+ try wasm.parseAtom(atom_index, .{ .data = .read_only });
} else if (variable.init.isUndefDeep()) {
- try wasm.parseAtom(atom, .{ .data = .uninitialized });
+ try wasm.parseAtom(atom_index, .{ .data = .uninitialized });
} else {
- try wasm.parseAtom(atom, .{ .data = .initialized });
+ try wasm.parseAtom(atom_index, .{ .data = .initialized });
}
} else {
- try wasm.parseAtom(atom, .{ .data = .read_only });
+ try wasm.parseAtom(atom_index, .{ .data = .read_only });
}
// also parse atoms for a decl's locals
- for (atom.locals.items) |*local_atom| {
- try wasm.parseAtom(local_atom, .{ .data = .read_only });
+ const atom = wasm.getAtomPtr(atom_index);
+ for (atom.locals.items) |local_atom_index| {
+ try wasm.parseAtom(local_atom_index, .{ .data = .read_only });
}
}
@@ -3083,20 +3090,22 @@ fn writeToFile(
var code_section_size: u32 = 0;
if (wasm.code_section_index) |code_index| {
const header_offset = try reserveVecSectionHeader(&binary_bytes);
- var atom: *Atom = wasm.atoms.get(code_index).?.getFirst();
+ var atom_index = wasm.atoms.get(code_index).?;
// The code section must be sorted in line with the function order.
var sorted_atoms = try std.ArrayList(*Atom).initCapacity(wasm.base.allocator, wasm.functions.count());
defer sorted_atoms.deinit();
while (true) {
+ var atom = wasm.getAtomPtr(atom_index);
if (wasm.resolved_symbols.contains(atom.symbolLoc())) {
if (!is_obj) {
atom.resolveRelocs(wasm);
}
sorted_atoms.appendAssumeCapacity(atom);
}
- atom = atom.next orelse break;
+ // atom = if (atom.prev) |prev| wasm.getAtomPtr(prev) else break;
+ atom_index = atom.prev orelse break;
}
const atom_sort_fn = struct {
@@ -3136,11 +3145,11 @@ fn writeToFile(
// do not output 'bss' section unless we import memory and therefore
// want to guarantee the data is zero initialized
if (!import_memory and std.mem.eql(u8, entry.key_ptr.*, ".bss")) continue;
- const atom_index = entry.value_ptr.*;
- const segment = wasm.segments.items[atom_index];
+ const segment_index = entry.value_ptr.*;
+ const segment = wasm.segments.items[segment_index];
if (segment.size == 0) continue; // do not emit empty segments
segment_count += 1;
- var atom: *Atom = wasm.atoms.getPtr(atom_index).?.*.getFirst();
+ var atom_index = wasm.atoms.get(segment_index).?;
// flag and index to memory section (currently, there can only be 1 memory section in wasm)
try leb.writeULEB128(binary_writer, @as(u32, 0));
@@ -3151,6 +3160,7 @@ fn writeToFile(
// fill in the offset table and the data segments
var current_offset: u32 = 0;
while (true) {
+ const atom = wasm.getAtomPtr(atom_index);
if (!is_obj) {
atom.resolveRelocs(wasm);
}
@@ -3166,8 +3176,8 @@ fn writeToFile(
try binary_writer.writeAll(atom.code.items);
current_offset += atom.size;
- if (atom.next) |next| {
- atom = next;
+ if (atom.prev) |prev| {
+ atom_index = prev;
} else {
// also pad with zeroes when last atom to ensure
// segments are aligned.
@@ -3209,15 +3219,15 @@ fn writeToFile(
}
if (!wasm.base.options.strip) {
- if (wasm.dwarf) |*dwarf| {
- const mod = wasm.base.options.module.?;
- try dwarf.writeDbgAbbrev();
- // for debug info and ranges, the address is always 0,
- // as locations are always offsets relative to 'code' section.
- try dwarf.writeDbgInfoHeader(mod, 0, code_section_size);
- try dwarf.writeDbgAranges(0, code_section_size);
- try dwarf.writeDbgLineHeader();
- }
+ // if (wasm.dwarf) |*dwarf| {
+ // const mod = wasm.base.options.module.?;
+ // try dwarf.writeDbgAbbrev();
+ // // for debug info and ranges, the address is always 0,
+ // // as locations are always offsets relative to 'code' section.
+ // try dwarf.writeDbgInfoHeader(mod, 0, code_section_size);
+ // try dwarf.writeDbgAranges(0, code_section_size);
+ // try dwarf.writeDbgLineHeader();
+ // }
var debug_bytes = std.ArrayList(u8).init(wasm.base.allocator);
defer debug_bytes.deinit();
@@ -3240,11 +3250,11 @@ fn writeToFile(
for (debug_sections) |item| {
if (item.index) |index| {
- var atom = wasm.atoms.get(index).?.getFirst();
+ var atom = wasm.getAtomPtr(wasm.atoms.get(index).?);
while (true) {
atom.resolveRelocs(wasm);
try debug_bytes.appendSlice(atom.code.items);
- atom = atom.next orelse break;
+ atom = if (atom.prev) |prev| wasm.getAtomPtr(prev) else break;
}
try emitDebugSection(&binary_bytes, debug_bytes.items, item.name);
debug_bytes.clearRetainingCapacity();
@@ -3976,7 +3986,8 @@ fn emitSymbolTable(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), symbol_table:
if (symbol.isDefined()) {
try leb.writeULEB128(writer, symbol.index);
- const atom = wasm.symbol_atom.get(sym_loc).?;
+ const atom_index = wasm.symbol_atom.get(sym_loc).?;
+ const atom = wasm.getAtom(atom_index);
try leb.writeULEB128(writer, @as(u32, atom.offset));
try leb.writeULEB128(writer, @as(u32, atom.size));
}
@@ -4054,7 +4065,7 @@ fn emitCodeRelocations(
const reloc_start = binary_bytes.items.len;
var count: u32 = 0;
- var atom: *Atom = wasm.atoms.get(code_index).?.getFirst();
+ var atom: *Atom = wasm.getAtomPtr(wasm.atoms.get(code_index).?);
// for each atom, we calculate the uleb size and append that
var size_offset: u32 = 5; // account for code section size leb128
while (true) {
@@ -4072,7 +4083,7 @@ fn emitCodeRelocations(
}
log.debug("Emit relocation: {}", .{relocation});
}
- atom = atom.next orelse break;
+ atom = if (atom.prev) |prev| wasm.getAtomPtr(prev) else break;
}
if (count == 0) return;
var buf: [5]u8 = undefined;
@@ -4103,7 +4114,7 @@ fn emitDataRelocations(
// for each atom, we calculate the uleb size and append that
var size_offset: u32 = 5; // account for code section size leb128
for (wasm.data_segments.values()) |segment_index| {
- var atom: *Atom = wasm.atoms.get(segment_index).?.getFirst();
+ var atom: *Atom = wasm.getAtomPtr(wasm.atoms.get(segment_index).?);
while (true) {
size_offset += getULEB128Size(atom.size);
for (atom.relocs.items) |relocation| {
@@ -4122,7 +4133,7 @@ fn emitDataRelocations(
}
log.debug("Emit relocation: {}", .{relocation});
}
- atom = atom.next orelse break;
+ atom = if (atom.prev) |prev| wasm.getAtomPtr(prev) else break;
}
}
if (count == 0) return;
diff --git a/src/link/Wasm/Atom.zig b/src/link/Wasm/Atom.zig
index de9cefebdc..e719f8dfcc 100644
--- a/src/link/Wasm/Atom.zig
+++ b/src/link/Wasm/Atom.zig
@@ -4,7 +4,6 @@ const std = @import("std");
const types = @import("types.zig");
const Wasm = @import("../Wasm.zig");
const Symbol = @import("Symbol.zig");
-const Dwarf = @import("../Dwarf.zig");
const leb = std.leb;
const log = std.log.scoped(.link);
@@ -30,17 +29,17 @@ file: ?u16,
/// Next atom in relation to this atom.
/// When null, this atom is the last atom
-next: ?*Atom,
+next: ?Atom.Index,
/// Previous atom in relation to this atom.
/// is null when this atom is the first in its order
-prev: ?*Atom,
+prev: ?Atom.Index,
/// Contains atoms local to a decl, all managed by this `Atom`.
/// When the parent atom is being freed, it will also do so for all local atoms.
-locals: std.ArrayListUnmanaged(Atom) = .{},
+locals: std.ArrayListUnmanaged(Atom.Index) = .{},
-/// Represents the debug Atom that holds all debug information of this Atom.
-dbg_info_atom: Dwarf.Atom,
+/// Alias to an unsigned 32-bit integer
+pub const Index = u32;
/// Represents a default empty wasm `Atom`
pub const empty: Atom = .{
@@ -51,18 +50,15 @@ pub const empty: Atom = .{
.prev = null,
.size = 0,
.sym_index = 0,
- .dbg_info_atom = undefined,
};
/// Frees all resources owned by this `Atom`.
-pub fn deinit(atom: *Atom, gpa: Allocator) void {
+pub fn deinit(atom: *Atom, wasm: *Wasm) void {
+ const gpa = wasm.base.allocator;
atom.relocs.deinit(gpa);
atom.code.deinit(gpa);
-
- for (atom.locals.items) |*local| {
- local.deinit(gpa);
- }
atom.locals.deinit(gpa);
+ atom.* = undefined;
}
/// Sets the length of relocations and code to '0',
@@ -83,18 +79,16 @@ pub fn format(atom: Atom, comptime fmt: []const u8, options: std.fmt.FormatOptio
});
}
-/// Returns the first `Atom` from a given atom
-pub fn getFirst(atom: *Atom) *Atom {
- var tmp = atom;
- while (tmp.prev) |prev| tmp = prev;
- return tmp;
-}
-
/// Returns the location of the symbol that represents this `Atom`
pub fn symbolLoc(atom: Atom) Wasm.SymbolLoc {
return .{ .file = atom.file, .index = atom.sym_index };
}
+pub fn getSymbolIndex(atom: Atom) ?u32 {
+ if (atom.sym_index == 0) return null;
+ return atom.sym_index;
+}
+
/// Returns the virtual address of the `Atom`. This is the address starting
/// from the first entry within a section.
pub fn getVA(atom: Atom, wasm: *const Wasm, symbol: *const Symbol) u32 {
@@ -192,20 +186,28 @@ fn relocationValue(atom: Atom, relocation: types.Relocation, wasm_bin: *const Wa
if (symbol.isUndefined()) {
return 0;
}
- const target_atom = wasm_bin.symbol_atom.get(target_loc).?;
+ const target_atom_index = wasm_bin.symbol_atom.get(target_loc) orelse {
+ // this can only occur during incremental-compilation when a relocation
+ // still points to a freed decl. It is fine to emit the value 0 here
+ // as no actual code will point towards it.
+ return 0;
+ };
+ const target_atom = wasm_bin.getAtom(target_atom_index);
const va = @intCast(i32, target_atom.getVA(wasm_bin, symbol));
return @intCast(u32, va + relocation.addend);
},
.R_WASM_EVENT_INDEX_LEB => return symbol.index,
.R_WASM_SECTION_OFFSET_I32 => {
- const target_atom = wasm_bin.symbol_atom.get(target_loc).?;
+ const target_atom_index = wasm_bin.symbol_atom.get(target_loc).?;
+ const target_atom = wasm_bin.getAtom(target_atom_index);
const rel_value = @intCast(i32, target_atom.offset) + relocation.addend;
return @intCast(u32, rel_value);
},
.R_WASM_FUNCTION_OFFSET_I32 => {
- const target_atom = wasm_bin.symbol_atom.get(target_loc) orelse {
+ const target_atom_index = wasm_bin.symbol_atom.get(target_loc) orelse {
return @bitCast(u32, @as(i32, -1));
};
+ const target_atom = wasm_bin.getAtom(target_atom_index);
const offset: u32 = 11 + Wasm.getULEB128Size(target_atom.size); // Header (11 bytes fixed-size) + body size (leb-encoded)
const rel_value = @intCast(i32, target_atom.offset + offset) + relocation.addend;
return @intCast(u32, rel_value);
diff --git a/src/link/Wasm/Object.zig b/src/link/Wasm/Object.zig
index 8f49d68712..7d4f6a4e36 100644
--- a/src/link/Wasm/Object.zig
+++ b/src/link/Wasm/Object.zig
@@ -901,14 +901,9 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
continue; // found unknown section, so skip parsing into atom as we do not know how to handle it.
};
- const atom = try gpa.create(Atom);
+ const atom_index = @intCast(Atom.Index, wasm_bin.managed_atoms.items.len);
+ const atom = try wasm_bin.managed_atoms.addOne(gpa);
atom.* = Atom.empty;
- errdefer {
- atom.deinit(gpa);
- gpa.destroy(atom);
- }
-
- try wasm_bin.managed_atoms.append(gpa, atom);
atom.file = object_index;
atom.size = relocatable_data.size;
atom.alignment = relocatable_data.getAlignment(object);
@@ -938,12 +933,12 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
.index = relocatable_data.getIndex(),
})) |symbols| {
atom.sym_index = symbols.pop();
- try wasm_bin.symbol_atom.putNoClobber(gpa, atom.symbolLoc(), atom);
+ try wasm_bin.symbol_atom.putNoClobber(gpa, atom.symbolLoc(), atom_index);
// symbols referencing the same atom will be added as alias
// or as 'parent' when they are global.
while (symbols.popOrNull()) |idx| {
- try wasm_bin.symbol_atom.putNoClobber(gpa, .{ .file = atom.file, .index = idx }, atom);
+ try wasm_bin.symbol_atom.putNoClobber(gpa, .{ .file = atom.file, .index = idx }, atom_index);
const alias_symbol = object.symtable[idx];
if (alias_symbol.isGlobal()) {
atom.sym_index = idx;
@@ -956,7 +951,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
segment.alignment = std.math.max(segment.alignment, atom.alignment);
}
- try wasm_bin.appendAtomAtIndex(final_index, atom);
+ try wasm_bin.appendAtomAtIndex(final_index, atom_index);
log.debug("Parsed into atom: '{s}' at segment index {d}", .{ object.string_table.get(object.symtable[atom.sym_index].name), final_index });
}
}
diff --git a/src/main.zig b/src/main.zig
index fdc761ac92..f634c259ff 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -893,7 +893,7 @@ fn buildOutputType(
i: usize = 0,
fn next(it: *@This()) ?[]const u8 {
if (it.i >= it.args.len) {
- if (it.resp_file) |*resp| return if (resp.next()) |sentinel| std.mem.span(sentinel) else null;
+ if (it.resp_file) |*resp| return resp.next();
return null;
}
defer it.i += 1;
@@ -901,7 +901,7 @@ fn buildOutputType(
}
fn nextOrFatal(it: *@This()) []const u8 {
if (it.i >= it.args.len) {
- if (it.resp_file) |*resp| if (resp.next()) |sentinel| return std.mem.span(sentinel);
+ if (it.resp_file) |*resp| if (resp.next()) |ret| return ret;
fatal("expected parameter after {s}", .{it.args[it.i - 1]});
}
defer it.i += 1;
@@ -3915,6 +3915,7 @@ pub const usage_build =
;
pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
+ var color: Color = .auto;
var prominent_compile_errors: bool = false;
// We want to release all the locks before executing the child process, so we make a nice
@@ -4117,6 +4118,7 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
// Here we borrow main package's table and will replace it with a fresh
// one after this process completes.
main_pkg.fetchAndAddDependencies(
+ arena,
&thread_pool,
&http_client,
build_directory,
@@ -4125,6 +4127,7 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
&dependencies_source,
&build_roots_source,
"",
+ color,
) catch |err| switch (err) {
error.PackageFetchFailed => process.exit(1),
else => |e| return e,
@@ -4361,12 +4364,12 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
};
defer gpa.free(source_code);
- var tree = std.zig.parse(gpa, source_code) catch |err| {
+ var tree = Ast.parse(gpa, source_code, .zig) catch |err| {
fatal("error parsing stdin: {}", .{err});
};
defer tree.deinit(gpa);
- try printErrsMsgToStdErr(gpa, arena, tree.errors, tree, "<stdin>", color);
+ try printErrsMsgToStdErr(gpa, arena, tree, "<stdin>", color);
var has_ast_error = false;
if (check_ast_flag) {
const Module = @import("Module.zig");
@@ -4566,10 +4569,10 @@ fn fmtPathFile(
// Add to set after no longer possible to get error.IsDir.
if (try fmt.seen.fetchPut(stat.inode, {})) |_| return;
- var tree = try std.zig.parse(fmt.gpa, source_code);
+ var tree = try Ast.parse(fmt.gpa, source_code, .zig);
defer tree.deinit(fmt.gpa);
- try printErrsMsgToStdErr(fmt.gpa, fmt.arena, tree.errors, tree, file_path, fmt.color);
+ try printErrsMsgToStdErr(fmt.gpa, fmt.arena, tree, file_path, fmt.color);
if (tree.errors.len != 0) {
fmt.any_error = true;
return;
@@ -4649,14 +4652,14 @@ fn fmtPathFile(
}
}
-fn printErrsMsgToStdErr(
+pub fn printErrsMsgToStdErr(
gpa: mem.Allocator,
arena: mem.Allocator,
- parse_errors: []const Ast.Error,
tree: Ast,
path: []const u8,
color: Color,
) !void {
+ const parse_errors: []const Ast.Error = tree.errors;
var i: usize = 0;
while (i < parse_errors.len) : (i += 1) {
const parse_error = parse_errors[i];
@@ -4973,7 +4976,7 @@ pub const ClangArgIterator = struct {
// rather than an argument to a parameter.
// We adjust the len below when necessary.
self.other_args = (self.argv.ptr + self.next_index)[0..1];
- var arg = mem.span(self.argv[self.next_index]);
+ var arg = self.argv[self.next_index];
self.incrementArgIndex();
if (mem.startsWith(u8, arg, "@")) {
@@ -5017,7 +5020,7 @@ pub const ClangArgIterator = struct {
self.has_next = true;
self.other_args = (self.argv.ptr + self.next_index)[0..1]; // We adjust len below when necessary.
- arg = mem.span(self.argv[self.next_index]);
+ arg = self.argv[self.next_index];
self.incrementArgIndex();
}
@@ -5312,11 +5315,11 @@ pub fn cmdAstCheck(
file.pkg = try Package.create(gpa, "root", null, file.sub_file_path);
defer file.pkg.destroy(gpa);
- file.tree = try std.zig.parse(gpa, file.source);
+ file.tree = try Ast.parse(gpa, file.source, .zig);
file.tree_loaded = true;
defer file.tree.deinit(gpa);
- try printErrsMsgToStdErr(gpa, arena, file.tree.errors, file.tree, file.sub_file_path, color);
+ try printErrsMsgToStdErr(gpa, arena, file.tree, file.sub_file_path, color);
if (file.tree.errors.len != 0) {
process.exit(1);
}
@@ -5438,11 +5441,11 @@ pub fn cmdChangelist(
file.source = source;
file.source_loaded = true;
- file.tree = try std.zig.parse(gpa, file.source);
+ file.tree = try Ast.parse(gpa, file.source, .zig);
file.tree_loaded = true;
defer file.tree.deinit(gpa);
- try printErrsMsgToStdErr(gpa, arena, file.tree.errors, file.tree, old_source_file, .auto);
+ try printErrsMsgToStdErr(gpa, arena, file.tree, old_source_file, .auto);
if (file.tree.errors.len != 0) {
process.exit(1);
}
@@ -5476,10 +5479,10 @@ pub fn cmdChangelist(
if (new_amt != new_stat.size)
return error.UnexpectedEndOfFile;
- var new_tree = try std.zig.parse(gpa, new_source);
+ var new_tree = try Ast.parse(gpa, new_source, .zig);
defer new_tree.deinit(gpa);
- try printErrsMsgToStdErr(gpa, arena, new_tree.errors, new_tree, new_source_file, .auto);
+ try printErrsMsgToStdErr(gpa, arena, new_tree, new_source_file, .auto);
if (new_tree.errors.len != 0) {
process.exit(1);
}
diff --git a/src/mingw.zig b/src/mingw.zig
index 1fee8e90a4..06880743c6 100644
--- a/src/mingw.zig
+++ b/src/mingw.zig
@@ -106,6 +106,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
.msvcrt_os_lib => {
const extra_flags = try arena.dupe([]const u8, &[_][]const u8{
"-DHAVE_CONFIG_H",
+ "-D__LIBMSVCRT__",
"-D__LIBMSVCRT_OS__",
"-I",
diff --git a/src/print_zir.zig b/src/print_zir.zig
index 6e8923bed9..e5fc8815ed 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -332,6 +332,7 @@ const Writer = struct {
.float_cast,
.int_cast,
.ptr_cast,
+ .qual_cast,
.truncate,
.align_cast,
.div_exact,
diff --git a/src/translate_c.zig b/src/translate_c.zig
index 410e3277ee..17e8d5c82c 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -4519,7 +4519,10 @@ fn transCreateNodeAssign(
defer block_scope.deinit();
const tmp = try block_scope.makeMangledName(c, "tmp");
- const rhs_node = try transExpr(c, &block_scope.base, rhs, .used);
+ var rhs_node = try transExpr(c, &block_scope.base, rhs, .used);
+ if (!exprIsBooleanType(lhs) and isBoolRes(rhs_node)) {
+ rhs_node = try Tag.bool_to_int.create(c.arena, rhs_node);
+ }
const tmp_decl = try Tag.var_simple.create(c.arena, .{ .name = tmp, .init = rhs_node });
try block_scope.statements.append(tmp_decl);
diff --git a/src/type.zig b/src/type.zig
index 2f1b1b78fb..a13e30cb4c 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -2937,24 +2937,24 @@ pub const Type = extern union {
.anyframe_T,
=> return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) },
- .c_short => return AbiAlignmentAdvanced{ .scalar = CType.short.alignment(target) },
- .c_ushort => return AbiAlignmentAdvanced{ .scalar = CType.ushort.alignment(target) },
- .c_int => return AbiAlignmentAdvanced{ .scalar = CType.int.alignment(target) },
- .c_uint => return AbiAlignmentAdvanced{ .scalar = CType.uint.alignment(target) },
- .c_long => return AbiAlignmentAdvanced{ .scalar = CType.long.alignment(target) },
- .c_ulong => return AbiAlignmentAdvanced{ .scalar = CType.ulong.alignment(target) },
- .c_longlong => return AbiAlignmentAdvanced{ .scalar = CType.longlong.alignment(target) },
- .c_ulonglong => return AbiAlignmentAdvanced{ .scalar = CType.ulonglong.alignment(target) },
- .c_longdouble => return AbiAlignmentAdvanced{ .scalar = CType.longdouble.alignment(target) },
+ .c_short => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.short) },
+ .c_ushort => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ushort) },
+ .c_int => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.int) },
+ .c_uint => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.uint) },
+ .c_long => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.long) },
+ .c_ulong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulong) },
+ .c_longlong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longlong) },
+ .c_ulonglong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulonglong) },
+ .c_longdouble => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) },
.f16 => return AbiAlignmentAdvanced{ .scalar = 2 },
- .f32 => return AbiAlignmentAdvanced{ .scalar = CType.float.alignment(target) },
- .f64 => switch (CType.double.sizeInBits(target)) {
- 64 => return AbiAlignmentAdvanced{ .scalar = CType.double.alignment(target) },
+ .f32 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.float) },
+ .f64 => switch (target.c_type_bit_size(.double)) {
+ 64 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.double) },
else => return AbiAlignmentAdvanced{ .scalar = 8 },
},
- .f80 => switch (CType.longdouble.sizeInBits(target)) {
- 80 => return AbiAlignmentAdvanced{ .scalar = CType.longdouble.alignment(target) },
+ .f80 => switch (target.c_type_bit_size(.longdouble)) {
+ 80 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) },
else => {
var payload: Payload.Bits = .{
.base = .{ .tag = .int_unsigned },
@@ -2964,8 +2964,8 @@ pub const Type = extern union {
return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, target) };
},
},
- .f128 => switch (CType.longdouble.sizeInBits(target)) {
- 128 => return AbiAlignmentAdvanced{ .scalar = CType.longdouble.alignment(target) },
+ .f128 => switch (target.c_type_bit_size(.longdouble)) {
+ 128 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) },
else => return AbiAlignmentAdvanced{ .scalar = 16 },
},
@@ -3434,21 +3434,22 @@ pub const Type = extern union {
else => return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) },
},
- .c_short => return AbiSizeAdvanced{ .scalar = @divExact(CType.short.sizeInBits(target), 8) },
- .c_ushort => return AbiSizeAdvanced{ .scalar = @divExact(CType.ushort.sizeInBits(target), 8) },
- .c_int => return AbiSizeAdvanced{ .scalar = @divExact(CType.int.sizeInBits(target), 8) },
- .c_uint => return AbiSizeAdvanced{ .scalar = @divExact(CType.uint.sizeInBits(target), 8) },
- .c_long => return AbiSizeAdvanced{ .scalar = @divExact(CType.long.sizeInBits(target), 8) },
- .c_ulong => return AbiSizeAdvanced{ .scalar = @divExact(CType.ulong.sizeInBits(target), 8) },
- .c_longlong => return AbiSizeAdvanced{ .scalar = @divExact(CType.longlong.sizeInBits(target), 8) },
- .c_ulonglong => return AbiSizeAdvanced{ .scalar = @divExact(CType.ulonglong.sizeInBits(target), 8) },
+ .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) },
+ .c_ushort => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ushort) },
+ .c_int => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.int) },
+ .c_uint => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.uint) },
+ .c_long => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.long) },
+ .c_ulong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulong) },
+ .c_longlong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longlong) },
+ .c_ulonglong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulonglong) },
+ .c_longdouble => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) },
.f16 => return AbiSizeAdvanced{ .scalar = 2 },
.f32 => return AbiSizeAdvanced{ .scalar = 4 },
.f64 => return AbiSizeAdvanced{ .scalar = 8 },
.f128 => return AbiSizeAdvanced{ .scalar = 16 },
- .f80 => switch (CType.longdouble.sizeInBits(target)) {
- 80 => return AbiSizeAdvanced{ .scalar = std.mem.alignForward(10, CType.longdouble.alignment(target)) },
+ .f80 => switch (target.c_type_bit_size(.longdouble)) {
+ 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) },
else => {
var payload: Payload.Bits = .{
.base = .{ .tag = .int_unsigned },
@@ -3458,14 +3459,6 @@ pub const Type = extern union {
return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, target) };
},
},
- .c_longdouble => switch (CType.longdouble.sizeInBits(target)) {
- 16 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f16, target) },
- 32 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f32, target) },
- 64 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f64, target) },
- 80 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f80, target) },
- 128 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f128, target) },
- else => unreachable,
- },
// TODO revisit this when we have the concept of the error tag type
.anyerror_void_error_union,
@@ -3748,15 +3741,15 @@ pub const Type = extern union {
.manyptr_const_u8_sentinel_0,
=> return target.cpu.arch.ptrBitWidth(),
- .c_short => return CType.short.sizeInBits(target),
- .c_ushort => return CType.ushort.sizeInBits(target),
- .c_int => return CType.int.sizeInBits(target),
- .c_uint => return CType.uint.sizeInBits(target),
- .c_long => return CType.long.sizeInBits(target),
- .c_ulong => return CType.ulong.sizeInBits(target),
- .c_longlong => return CType.longlong.sizeInBits(target),
- .c_ulonglong => return CType.ulonglong.sizeInBits(target),
- .c_longdouble => return CType.longdouble.sizeInBits(target),
+ .c_short => return target.c_type_bit_size(.short),
+ .c_ushort => return target.c_type_bit_size(.ushort),
+ .c_int => return target.c_type_bit_size(.int),
+ .c_uint => return target.c_type_bit_size(.uint),
+ .c_long => return target.c_type_bit_size(.long),
+ .c_ulong => return target.c_type_bit_size(.ulong),
+ .c_longlong => return target.c_type_bit_size(.longlong),
+ .c_ulonglong => return target.c_type_bit_size(.ulonglong),
+ .c_longdouble => return target.c_type_bit_size(.longdouble),
.error_set,
.error_set_single,
@@ -4631,14 +4624,14 @@ pub const Type = extern union {
.i128 => return .{ .signedness = .signed, .bits = 128 },
.usize => return .{ .signedness = .unsigned, .bits = target.cpu.arch.ptrBitWidth() },
.isize => return .{ .signedness = .signed, .bits = target.cpu.arch.ptrBitWidth() },
- .c_short => return .{ .signedness = .signed, .bits = CType.short.sizeInBits(target) },
- .c_ushort => return .{ .signedness = .unsigned, .bits = CType.ushort.sizeInBits(target) },
- .c_int => return .{ .signedness = .signed, .bits = CType.int.sizeInBits(target) },
- .c_uint => return .{ .signedness = .unsigned, .bits = CType.uint.sizeInBits(target) },
- .c_long => return .{ .signedness = .signed, .bits = CType.long.sizeInBits(target) },
- .c_ulong => return .{ .signedness = .unsigned, .bits = CType.ulong.sizeInBits(target) },
- .c_longlong => return .{ .signedness = .signed, .bits = CType.longlong.sizeInBits(target) },
- .c_ulonglong => return .{ .signedness = .unsigned, .bits = CType.ulonglong.sizeInBits(target) },
+ .c_short => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) },
+ .c_ushort => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) },
+ .c_int => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.int) },
+ .c_uint => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.uint) },
+ .c_long => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.long) },
+ .c_ulong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) },
+ .c_longlong => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) },
+ .c_ulonglong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) },
.enum_full, .enum_nonexhaustive => ty = ty.cast(Payload.EnumFull).?.data.tag_ty,
.enum_numbered => ty = ty.castTag(.enum_numbered).?.data.tag_ty,
@@ -4724,7 +4717,7 @@ pub const Type = extern union {
.f64 => 64,
.f80 => 80,
.f128, .comptime_float => 128,
- .c_longdouble => CType.longdouble.sizeInBits(target),
+ .c_longdouble => target.c_type_bit_size(.longdouble),
else => unreachable,
};
@@ -6689,537 +6682,3 @@ pub const Type = extern union {
/// to packed struct layout to find out all the places in the codebase you need to edit!
pub const packed_struct_layout_version = 2;
};
-
-pub const CType = enum {
- short,
- ushort,
- int,
- uint,
- long,
- ulong,
- longlong,
- ulonglong,
- longdouble,
-
- // We don't have a `c_float`/`c_double` type in Zig, but these
- // are useful for querying target-correct alignment and checking
- // whether C's double is f64 or f32
- float,
- double,
-
- pub fn sizeInBits(self: CType, target: Target) u16 {
- switch (target.os.tag) {
- .freestanding, .other => switch (target.cpu.arch) {
- .msp430 => switch (self) {
- .short, .ushort, .int, .uint => return 16,
- .float, .long, .ulong => return 32,
- .longlong, .ulonglong, .double, .longdouble => return 64,
- },
- .avr => switch (self) {
- .short, .ushort, .int, .uint => return 16,
- .long, .ulong, .float, .double, .longdouble => return 32,
- .longlong, .ulonglong => return 64,
- },
- .tce, .tcele => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .long, .ulong, .longlong, .ulonglong => return 32,
- .float, .double, .longdouble => return 32,
- },
- .mips64, .mips64el => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => return if (target.abi != .gnuabin32) 64 else 32,
- .longlong, .ulonglong, .double => return 64,
- .longdouble => return 128,
- },
- .x86_64 => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => switch (target.abi) {
- .gnux32, .muslx32 => return 32,
- else => return 64,
- },
- .longlong, .ulonglong, .double => return 64,
- .longdouble => return 80,
- },
- else => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => return target.cpu.arch.ptrBitWidth(),
- .longlong, .ulonglong, .double => return 64,
- .longdouble => switch (target.cpu.arch) {
- .x86 => switch (target.abi) {
- .android => return 64,
- else => return 80,
- },
-
- .powerpc,
- .powerpcle,
- .powerpc64,
- .powerpc64le,
- => switch (target.abi) {
- .musl,
- .musleabi,
- .musleabihf,
- .muslx32,
- => return 64,
- else => return 128,
- },
-
- .riscv32,
- .riscv64,
- .aarch64,
- .aarch64_be,
- .aarch64_32,
- .s390x,
- .sparc,
- .sparc64,
- .sparcel,
- .wasm32,
- .wasm64,
- => return 128,
-
- else => return 64,
- },
- },
- },
-
- .linux,
- .freebsd,
- .netbsd,
- .dragonfly,
- .openbsd,
- .wasi,
- .emscripten,
- .plan9,
- .solaris,
- .haiku,
- .ananas,
- .fuchsia,
- .minix,
- => switch (target.cpu.arch) {
- .msp430 => switch (self) {
- .short, .ushort, .int, .uint => return 16,
- .long, .ulong, .float => return 32,
- .longlong, .ulonglong, .double, .longdouble => return 64,
- },
- .avr => switch (self) {
- .short, .ushort, .int, .uint => return 16,
- .long, .ulong, .float, .double, .longdouble => return 32,
- .longlong, .ulonglong => return 64,
- },
- .tce, .tcele => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .long, .ulong, .longlong, .ulonglong => return 32,
- .float, .double, .longdouble => return 32,
- },
- .mips64, .mips64el => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => return if (target.abi != .gnuabin32) 64 else 32,
- .longlong, .ulonglong, .double => return 64,
- .longdouble => if (target.os.tag == .freebsd) return 64 else return 128,
- },
- .x86_64 => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => switch (target.abi) {
- .gnux32, .muslx32 => return 32,
- else => return 64,
- },
- .longlong, .ulonglong, .double => return 64,
- .longdouble => return 80,
- },
- else => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => return target.cpu.arch.ptrBitWidth(),
- .longlong, .ulonglong, .double => return 64,
- .longdouble => switch (target.cpu.arch) {
- .x86 => switch (target.abi) {
- .android => return 64,
- else => return 80,
- },
-
- .powerpc,
- .powerpcle,
- => switch (target.abi) {
- .musl,
- .musleabi,
- .musleabihf,
- .muslx32,
- => return 64,
- else => switch (target.os.tag) {
- .freebsd, .netbsd, .openbsd => return 64,
- else => return 128,
- },
- },
-
- .powerpc64,
- .powerpc64le,
- => switch (target.abi) {
- .musl,
- .musleabi,
- .musleabihf,
- .muslx32,
- => return 64,
- else => switch (target.os.tag) {
- .freebsd, .openbsd => return 64,
- else => return 128,
- },
- },
-
- .riscv32,
- .riscv64,
- .aarch64,
- .aarch64_be,
- .aarch64_32,
- .s390x,
- .mips64,
- .mips64el,
- .sparc,
- .sparc64,
- .sparcel,
- .wasm32,
- .wasm64,
- => return 128,
-
- else => return 64,
- },
- },
- },
-
- .windows, .uefi => switch (target.cpu.arch) {
- .x86 => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => return 32,
- .longlong, .ulonglong, .double => return 64,
- .longdouble => switch (target.abi) {
- .gnu, .gnuilp32, .cygnus => return 80,
- else => return 64,
- },
- },
- .x86_64 => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => switch (target.abi) {
- .cygnus => return 64,
- else => return 32,
- },
- .longlong, .ulonglong, .double => return 64,
- .longdouble => switch (target.abi) {
- .gnu, .gnuilp32, .cygnus => return 80,
- else => return 64,
- },
- },
- else => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => return 32,
- .longlong, .ulonglong, .double => return 64,
- .longdouble => return 64,
- },
- },
-
- .macos, .ios, .tvos, .watchos => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => switch (target.cpu.arch) {
- .x86, .arm, .aarch64_32 => return 32,
- .x86_64 => switch (target.abi) {
- .gnux32, .muslx32 => return 32,
- else => return 64,
- },
- else => return 64,
- },
- .longlong, .ulonglong, .double => return 64,
- .longdouble => switch (target.cpu.arch) {
- .x86 => switch (target.abi) {
- .android => return 64,
- else => return 80,
- },
- .x86_64 => return 80,
- else => return 64,
- },
- },
-
- .nvcl, .cuda => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => switch (target.cpu.arch) {
- .nvptx => return 32,
- .nvptx64 => return 64,
- else => return 64,
- },
- .longlong, .ulonglong, .double => return 64,
- .longdouble => return 64,
- },
-
- .amdhsa, .amdpal => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong, .longlong, .ulonglong, .double => return 64,
- .longdouble => return 128,
- },
-
- .cloudabi,
- .kfreebsd,
- .lv2,
- .zos,
- .rtems,
- .nacl,
- .aix,
- .ps4,
- .ps5,
- .elfiamcu,
- .mesa3d,
- .contiki,
- .hermit,
- .hurd,
- .opencl,
- .glsl450,
- .vulkan,
- .driverkit,
- .shadermodel,
- => @panic("TODO specify the C integer and float type sizes for this OS"),
- }
- }
-
- pub fn alignment(self: CType, target: Target) u16 {
-
- // Overrides for unusual alignments
- switch (target.cpu.arch) {
- .avr => switch (self) {
- .short, .ushort => return 2,
- else => return 1,
- },
- .x86 => switch (target.os.tag) {
- .windows, .uefi => switch (self) {
- .longlong, .ulonglong, .double => return 8,
- .longdouble => switch (target.abi) {
- .gnu, .gnuilp32, .cygnus => return 4,
- else => return 8,
- },
- else => {},
- },
- else => {},
- },
- else => {},
- }
-
- // Next-power-of-two-aligned, up to a maximum.
- return @min(
- std.math.ceilPowerOfTwoAssert(u16, (self.sizeInBits(target) + 7) / 8),
- switch (target.cpu.arch) {
- .arm, .armeb, .thumb, .thumbeb => switch (target.os.tag) {
- .netbsd => switch (target.abi) {
- .gnueabi,
- .gnueabihf,
- .eabi,
- .eabihf,
- .android,
- .musleabi,
- .musleabihf,
- => 8,
-
- else => @as(u16, 4),
- },
- .ios, .tvos, .watchos => 4,
- else => 8,
- },
-
- .msp430,
- .avr,
- => 2,
-
- .arc,
- .csky,
- .x86,
- .xcore,
- .dxil,
- .loongarch32,
- .tce,
- .tcele,
- .le32,
- .amdil,
- .hsail,
- .spir,
- .spirv32,
- .kalimba,
- .shave,
- .renderscript32,
- .ve,
- .spu_2,
- .xtensa,
- => 4,
-
- .aarch64_32,
- .amdgcn,
- .amdil64,
- .bpfel,
- .bpfeb,
- .hexagon,
- .hsail64,
- .loongarch64,
- .m68k,
- .mips,
- .mipsel,
- .sparc,
- .sparcel,
- .sparc64,
- .lanai,
- .le64,
- .nvptx,
- .nvptx64,
- .r600,
- .s390x,
- .spir64,
- .spirv64,
- .renderscript64,
- => 8,
-
- .aarch64,
- .aarch64_be,
- .mips64,
- .mips64el,
- .powerpc,
- .powerpcle,
- .powerpc64,
- .powerpc64le,
- .riscv32,
- .riscv64,
- .x86_64,
- .wasm32,
- .wasm64,
- => 16,
- },
- );
- }
-
- pub fn preferredAlignment(self: CType, target: Target) u16 {
-
- // Overrides for unusual alignments
- switch (target.cpu.arch) {
- .arm, .armeb, .thumb, .thumbeb => switch (target.os.tag) {
- .netbsd => switch (target.abi) {
- .gnueabi,
- .gnueabihf,
- .eabi,
- .eabihf,
- .android,
- .musleabi,
- .musleabihf,
- => {},
-
- else => switch (self) {
- .longdouble => return 4,
- else => {},
- },
- },
- .ios, .tvos, .watchos => switch (self) {
- .longdouble => return 4,
- else => {},
- },
- else => {},
- },
- .arc => switch (self) {
- .longdouble => return 4,
- else => {},
- },
- .avr => switch (self) {
- .int, .uint, .long, .ulong, .float, .longdouble => return 1,
- .short, .ushort => return 2,
- .double => return 4,
- .longlong, .ulonglong => return 8,
- },
- .x86 => switch (target.os.tag) {
- .windows, .uefi => switch (self) {
- .longdouble => switch (target.abi) {
- .gnu, .gnuilp32, .cygnus => return 4,
- else => return 8,
- },
- else => {},
- },
- else => switch (self) {
- .longdouble => return 4,
- else => {},
- },
- },
- else => {},
- }
-
- // Next-power-of-two-aligned, up to a maximum.
- return @min(
- std.math.ceilPowerOfTwoAssert(u16, (self.sizeInBits(target) + 7) / 8),
- switch (target.cpu.arch) {
- .msp430 => @as(u16, 2),
-
- .csky,
- .xcore,
- .dxil,
- .loongarch32,
- .tce,
- .tcele,
- .le32,
- .amdil,
- .hsail,
- .spir,
- .spirv32,
- .kalimba,
- .shave,
- .renderscript32,
- .ve,
- .spu_2,
- => 4,
-
- .arc,
- .arm,
- .armeb,
- .avr,
- .thumb,
- .thumbeb,
- .aarch64_32,
- .amdgcn,
- .amdil64,
- .bpfel,
- .bpfeb,
- .hexagon,
- .hsail64,
- .x86,
- .loongarch64,
- .m68k,
- .mips,
- .mipsel,
- .sparc,
- .sparcel,
- .sparc64,
- .lanai,
- .le64,
- .nvptx,
- .nvptx64,
- .r600,
- .s390x,
- .spir64,
- .spirv64,
- .renderscript64,
- => 8,
-
- .aarch64,
- .aarch64_be,
- .mips64,
- .mips64el,
- .powerpc,
- .powerpcle,
- .powerpc64,
- .powerpc64le,
- .riscv32,
- .riscv64,
- .x86_64,
- .wasm32,
- .wasm64,
- => 16,
- },
- );
- }
-};