aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2024-10-23 22:56:04 -0700
committerGitHub <noreply@github.com>2024-10-23 22:56:04 -0700
commitc563ba6b15b65ecdc1cb538c9437e11dfb330453 (patch)
tree99dd968efc3daea52a1d3628b7d8cedba53e84b7 /src
parent33d07f4b6efe461ee3fbfa32cb18f60aac8c2827 (diff)
parent4bdc2d38717b5655acd862a5762e069419b158c7 (diff)
downloadzig-c563ba6b15b65ecdc1cb538c9437e11dfb330453.tar.gz
zig-c563ba6b15b65ecdc1cb538c9437e11dfb330453.zip
Merge pull request #21700 from ziglang/cli-lib-dirs
move linker input file parsing to the frontend
Diffstat (limited to 'src')
-rw-r--r--src/Compilation.zig685
-rw-r--r--src/Sema.zig9
-rw-r--r--src/ThreadSafeQueue.zig72
-rw-r--r--src/Zcu/PerThread.zig6
-rw-r--r--src/arch/riscv64/CodeGen.zig4
-rw-r--r--src/arch/x86_64/CodeGen.zig4
-rw-r--r--src/codegen.zig2
-rw-r--r--src/glibc.zig71
-rw-r--r--src/libcxx.zig8
-rw-r--r--src/libtsan.zig4
-rw-r--r--src/libunwind.zig4
-rw-r--r--src/link.zig1044
-rw-r--r--src/link/Coff.zig5
-rw-r--r--src/link/Coff/lld.zig59
-rw-r--r--src/link/Elf.zig864
-rw-r--r--src/link/Elf/Archive.zig78
-rw-r--r--src/link/Elf/Atom.zig51
-rw-r--r--src/link/Elf/Object.zig321
-rw-r--r--src/link/Elf/ZigObject.zig30
-rw-r--r--src/link/Elf/eh_frame.zig37
-rw-r--r--src/link/Elf/file.zig4
-rw-r--r--src/link/Elf/gc.zig49
-rw-r--r--src/link/Elf/relocatable.zig98
-rw-r--r--src/link/LdScript.zig (renamed from src/link/Elf/LdScript.zig)138
-rw-r--r--src/link/MachO.zig193
-rw-r--r--src/link/MachO/relocatable.zig53
-rw-r--r--src/link/Wasm.zig67
-rw-r--r--src/main.zig791
-rw-r--r--src/musl.zig38
-rw-r--r--src/target.zig44
30 files changed, 2665 insertions, 2168 deletions
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 2776d2960a..fac1ad4baa 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -10,6 +10,7 @@ const Target = std.Target;
const ThreadPool = std.Thread.Pool;
const WaitGroup = std.Thread.WaitGroup;
const ErrorBundle = std.zig.ErrorBundle;
+const Path = Cache.Path;
const Value = @import("Value.zig");
const Type = @import("Type.zig");
@@ -39,15 +40,17 @@ const Air = @import("Air.zig");
const Builtin = @import("Builtin.zig");
const LlvmObject = @import("codegen/llvm.zig").Object;
const dev = @import("dev.zig");
-pub const Directory = Cache.Directory;
-const Path = Cache.Path;
+const ThreadSafeQueue = @import("ThreadSafeQueue.zig").ThreadSafeQueue;
+pub const Directory = Cache.Directory;
pub const Config = @import("Compilation/Config.zig");
/// General-purpose allocator. Used for both temporary and long-term storage.
gpa: Allocator,
/// Arena-allocated memory, mostly used during initialization. However, it can
/// be used for other things requiring the same lifetime as the `Compilation`.
+/// Not thread-safe - lock `mutex` if potentially accessing from multiple
+/// threads at once.
arena: Allocator,
/// Not every Compilation compiles .zig code! For example you could do `zig build-exe foo.o`.
zcu: ?*Zcu,
@@ -76,12 +79,13 @@ implib_emit: ?Path,
docs_emit: ?Path,
root_name: [:0]const u8,
include_compiler_rt: bool,
-objects: []Compilation.LinkObject,
+/// Resolved into known paths, any GNU ld scripts already resolved.
+link_inputs: []const link.Input,
/// Needed only for passing -F args to clang.
framework_dirs: []const []const u8,
-/// These are *always* dynamically linked. Static libraries will be
-/// provided as positional arguments.
-system_libs: std.StringArrayHashMapUnmanaged(SystemLib),
+/// These are only for DLLs dependencies fulfilled by the `.def` files shipped
+/// with Zig. Static libraries are provided as `link.Input` values.
+windows_libs: std.StringArrayHashMapUnmanaged(void),
version: ?std.SemanticVersion,
libc_installation: ?*const LibCInstallation,
skip_linker_dependencies: bool,
@@ -107,6 +111,9 @@ win32_resource_table: if (dev.env.supports(.win32_resource)) std.AutoArrayHashMa
} = .{},
link_diags: link.Diags,
+link_task_queue: ThreadSafeQueue(link.Task) = .empty,
+/// Ensure only 1 simultaneous call to `flushTaskQueue`.
+link_task_queue_safety: std.debug.SafetyLock = .{},
work_queues: [
len: {
@@ -118,14 +125,6 @@ work_queues: [
}
]std.fifo.LinearFifo(Job, .Dynamic),
-codegen_work: if (InternPool.single_threaded) void else struct {
- mutex: std.Thread.Mutex,
- cond: std.Thread.Condition,
- queue: std.fifo.LinearFifo(CodegenJob, .Dynamic),
- job_error: ?JobError,
- done: bool,
-},
-
/// These jobs are to invoke the Clang compiler to create an object file, which
/// gets linked with the Compilation.
c_object_work_queue: std.fifo.LinearFifo(*CObject, .Dynamic),
@@ -262,6 +261,9 @@ emit_asm: ?EmitLoc,
emit_llvm_ir: ?EmitLoc,
emit_llvm_bc: ?EmitLoc,
+link_task_wait_group: WaitGroup = .{},
+work_queue_progress_node: std.Progress.Node = .none,
+
llvm_opt_bisect_limit: c_int,
file_system_inputs: ?*std.ArrayListUnmanaged(u8),
@@ -339,16 +341,14 @@ pub const RcIncludes = enum {
};
const Job = union(enum) {
- /// Write the constant value for a Decl to the output file.
+ /// Corresponds to the task in `link.Task`.
+ /// Only needed for backends that haven't yet been updated to not race against Sema.
codegen_nav: InternPool.Nav.Index,
- /// Write the machine code for a function to the output file.
- codegen_func: struct {
- /// This will either be a non-generic `func_decl` or a `func_instance`.
- func: InternPool.Index,
- /// This `Air` is owned by the `Job` and allocated with `gpa`.
- /// It must be deinited when the job is processed.
- air: Air,
- },
+ /// Corresponds to the task in `link.Task`.
+ /// Only needed for backends that haven't yet been updated to not race against Sema.
+ codegen_func: link.Task.CodegenFunc,
+ /// Corresponds to the task in `link.Task`.
+ /// Only needed for backends that haven't yet been updated to not race against Sema.
codegen_type: InternPool.Index,
/// The `Cau` must be semantically analyzed (and possibly export itself).
/// This may be its first time being analyzed, or it may be outdated.
@@ -357,9 +357,6 @@ const Job = union(enum) {
/// After analysis, a `codegen_func` job will be queued.
/// These must be separate jobs to ensure any needed type resolution occurs *before* codegen.
analyze_func: InternPool.Index,
- /// The source file containing the Decl has been updated, and so the
- /// Decl may need its line number information updated in the debug info.
- update_line_number: void, // TODO
/// The main source file for the module needs to be analyzed.
analyze_mod: *Package.Module,
/// Fully resolve the given `struct` or `union` type.
@@ -373,6 +370,7 @@ const Job = union(enum) {
musl_crt_file: musl.CrtFile,
/// one of the mingw-w64 static objects
mingw_crt_file: mingw.CrtFile,
+
/// libunwind.a, usually needed when linking libc
libunwind: void,
libcxx: void,
@@ -384,7 +382,7 @@ const Job = union(enum) {
/// one of WASI libc static objects
wasi_libc_crt_file: wasi_libc.CrtFile,
- /// The value is the index into `system_libs`.
+ /// The value is the index into `windows_libs`.
windows_import_lib: usize,
const Tag = @typeInfo(Job).@"union".tag_type.?;
@@ -402,17 +400,6 @@ const Job = union(enum) {
}
};
-const CodegenJob = union(enum) {
- nav: InternPool.Nav.Index,
- func: struct {
- func: InternPool.Index,
- /// This `Air` is owned by the `Job` and allocated with `gpa`.
- /// It must be deinited when the job is processed.
- air: Air,
- },
- type: InternPool.Index,
-};
-
pub const CObject = struct {
/// Relative to cwd. Owned by arena.
src: CSourceFile,
@@ -999,24 +986,6 @@ const CacheUse = union(CacheMode) {
}
};
-pub const LinkObject = struct {
- path: Path,
- must_link: bool = false,
- needed: bool = false,
- // When the library is passed via a positional argument, it will be
- // added as a full path. If it's `-l<lib>`, then just the basename.
- //
- // Consistent with `withLOption` variable name in lld ELF driver.
- loption: bool = false,
-
- pub fn isObject(lo: LinkObject) bool {
- return switch (classifyFileExt(lo.path.sub_path)) {
- .object => true,
- else => false,
- };
- }
-};
-
pub const CreateOptions = struct {
zig_lib_directory: Directory,
local_cache_directory: Directory,
@@ -1061,18 +1030,20 @@ pub const CreateOptions = struct {
/// this flag would be set to disable this machinery to avoid false positives.
disable_lld_caching: bool = false,
cache_mode: CacheMode = .incremental,
- lib_dirs: []const []const u8 = &[0][]const u8{},
+ /// This field is intended to be removed.
+ /// The ELF implementation no longer uses this data, however the MachO and COFF
+ /// implementations still do.
+ lib_directories: []const Directory = &.{},
rpath_list: []const []const u8 = &[0][]const u8{},
symbol_wrap_set: std.StringArrayHashMapUnmanaged(void) = .empty,
c_source_files: []const CSourceFile = &.{},
rc_source_files: []const RcSourceFile = &.{},
manifest_file: ?[]const u8 = null,
rc_includes: RcIncludes = .any,
- link_objects: []LinkObject = &[0]LinkObject{},
+ link_inputs: []const link.Input = &.{},
framework_dirs: []const []const u8 = &[0][]const u8{},
frameworks: []const Framework = &.{},
- system_lib_names: []const []const u8 = &.{},
- system_lib_infos: []const SystemLib = &.{},
+ windows_lib_names: []const []const u8 = &.{},
/// These correspond to the WASI libc emulated subcomponents including:
/// * process clocks
/// * getpid
@@ -1455,12 +1426,8 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
};
errdefer if (opt_zcu) |zcu| zcu.deinit();
- var system_libs = try std.StringArrayHashMapUnmanaged(SystemLib).init(
- gpa,
- options.system_lib_names,
- options.system_lib_infos,
- );
- errdefer system_libs.deinit(gpa);
+ var windows_libs = try std.StringArrayHashMapUnmanaged(void).init(gpa, options.windows_lib_names, &.{});
+ errdefer windows_libs.deinit(gpa);
comp.* = .{
.gpa = gpa,
@@ -1479,13 +1446,6 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.emit_llvm_ir = options.emit_llvm_ir,
.emit_llvm_bc = options.emit_llvm_bc,
.work_queues = .{std.fifo.LinearFifo(Job, .Dynamic).init(gpa)} ** @typeInfo(std.meta.FieldType(Compilation, .work_queues)).array.len,
- .codegen_work = if (InternPool.single_threaded) {} else .{
- .mutex = .{},
- .cond = .{},
- .queue = std.fifo.LinearFifo(CodegenJob, .Dynamic).init(gpa),
- .job_error = null,
- .done = false,
- },
.c_object_work_queue = std.fifo.LinearFifo(*CObject, .Dynamic).init(gpa),
.win32_resource_work_queue = if (dev.env.supports(.win32_resource)) std.fifo.LinearFifo(*Win32Resource, .Dynamic).init(gpa) else .{},
.astgen_work_queue = std.fifo.LinearFifo(Zcu.File.Index, .Dynamic).init(gpa),
@@ -1522,11 +1482,11 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.libcxx_abi_version = options.libcxx_abi_version,
.root_name = root_name,
.sysroot = sysroot,
- .system_libs = system_libs,
+ .windows_libs = windows_libs,
.version = options.version,
.libc_installation = libc_dirs.libc_installation,
.include_compiler_rt = include_compiler_rt,
- .objects = options.link_objects,
+ .link_inputs = options.link_inputs,
.framework_dirs = options.framework_dirs,
.llvm_opt_bisect_limit = options.llvm_opt_bisect_limit,
.skip_linker_dependencies = options.skip_linker_dependencies,
@@ -1564,7 +1524,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.z_max_page_size = options.linker_z_max_page_size,
.darwin_sdk_layout = libc_dirs.darwin_sdk_layout,
.frameworks = options.frameworks,
- .lib_dirs = options.lib_dirs,
+ .lib_directories = options.lib_directories,
.framework_dirs = options.framework_dirs,
.rpath_list = options.rpath_list,
.symbol_wrap_set = options.symbol_wrap_set,
@@ -1776,157 +1736,175 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.incremental => comp.bin_file != null,
};
- if (have_bin_emit and !comp.skip_linker_dependencies and target.ofmt != .c) {
- if (target.isDarwin()) {
- switch (target.abi) {
- .none,
- .simulator,
- .macabi,
- => {},
- else => return error.LibCUnavailable,
- }
- }
- // If we need to build glibc for the target, add work items for it.
- // We go through the work queue so that building can be done in parallel.
- if (comp.wantBuildGLibCFromSource()) {
- if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
-
- if (glibc.needsCrtiCrtn(target)) {
- try comp.queueJobs(&[_]Job{
- .{ .glibc_crt_file = .crti_o },
- .{ .glibc_crt_file = .crtn_o },
- });
- }
- try comp.queueJobs(&[_]Job{
- .{ .glibc_crt_file = .scrt1_o },
- .{ .glibc_crt_file = .libc_nonshared_a },
- .{ .glibc_shared_objects = {} },
- });
- }
- if (comp.wantBuildMuslFromSource()) {
- if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
-
- if (musl.needsCrtiCrtn(target)) {
- try comp.queueJobs(&[_]Job{
- .{ .musl_crt_file = .crti_o },
- .{ .musl_crt_file = .crtn_o },
- });
- }
- try comp.queueJobs(&[_]Job{
- .{ .musl_crt_file = .crt1_o },
- .{ .musl_crt_file = .scrt1_o },
- .{ .musl_crt_file = .rcrt1_o },
- switch (comp.config.link_mode) {
- .static => .{ .musl_crt_file = .libc_a },
- .dynamic => .{ .musl_crt_file = .libc_so },
- },
- });
- }
+ if (have_bin_emit and target.ofmt != .c) {
+ if (!comp.skip_linker_dependencies) {
+ // If we need to build libc for the target, add work items for it.
+ // We go through the work queue so that building can be done in parallel.
+ // If linking against host libc installation, instead queue up jobs
+ // for loading those files in the linker.
+ if (comp.config.link_libc and is_exe_or_dyn_lib) {
+ // If the "is darwin" check is moved below the libc_installation check below,
+ // error.LibCInstallationMissingCrtDir is returned from lci.resolveCrtPaths().
+ if (target.isDarwin()) {
+ switch (target.abi) {
+ .none, .simulator, .macabi => {},
+ else => return error.LibCUnavailable,
+ }
+ // TODO delete logic from MachO flush() and queue up tasks here instead.
+ } else if (comp.libc_installation) |lci| {
+ const basenames = LibCInstallation.CrtBasenames.get(.{
+ .target = target,
+ .link_libc = comp.config.link_libc,
+ .output_mode = comp.config.output_mode,
+ .link_mode = comp.config.link_mode,
+ .pie = comp.config.pie,
+ });
+ const paths = try lci.resolveCrtPaths(arena, basenames, target);
- if (comp.wantBuildWasiLibcFromSource()) {
- if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
+ const fields = @typeInfo(@TypeOf(paths)).@"struct".fields;
+ try comp.link_task_queue.shared.ensureUnusedCapacity(gpa, fields.len + 1);
+ inline for (fields) |field| {
+ if (@field(paths, field.name)) |path| {
+ comp.link_task_queue.shared.appendAssumeCapacity(.{ .load_object = path });
+ }
+ }
+ // Loads the libraries provided by `target_util.libcFullLinkFlags(target)`.
+ comp.link_task_queue.shared.appendAssumeCapacity(.load_host_libc);
+ } else if (target.isMusl() and !target.isWasm()) {
+ if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
+
+ if (musl.needsCrtiCrtn(target)) {
+ try comp.queueJobs(&[_]Job{
+ .{ .musl_crt_file = .crti_o },
+ .{ .musl_crt_file = .crtn_o },
+ });
+ }
+ if (musl.needsCrt0(comp.config.output_mode, comp.config.link_mode, comp.config.pie)) |f| {
+ try comp.queueJobs(&.{.{ .musl_crt_file = f }});
+ }
+ try comp.queueJobs(&.{.{ .musl_crt_file = switch (comp.config.link_mode) {
+ .static => .libc_a,
+ .dynamic => .libc_so,
+ } }});
+ } else if (target.isGnuLibC()) {
+ if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
+
+ if (glibc.needsCrtiCrtn(target)) {
+ try comp.queueJobs(&[_]Job{
+ .{ .glibc_crt_file = .crti_o },
+ .{ .glibc_crt_file = .crtn_o },
+ });
+ }
+ if (!is_dyn_lib) {
+ try comp.queueJob(.{ .glibc_crt_file = .scrt1_o });
+ }
+ try comp.queueJobs(&[_]Job{
+ .{ .glibc_shared_objects = {} },
+ .{ .glibc_crt_file = .libc_nonshared_a },
+ });
+ } else if (target.isWasm() and target.os.tag == .wasi) {
+ if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
- for (comp.wasi_emulated_libs) |crt_file| {
- try comp.queueJob(.{
- .wasi_libc_crt_file = crt_file,
- });
- }
- try comp.queueJobs(&[_]Job{
- .{ .wasi_libc_crt_file = wasi_libc.execModelCrtFile(comp.config.wasi_exec_model) },
- .{ .wasi_libc_crt_file = .libc_a },
- });
- }
+ for (comp.wasi_emulated_libs) |crt_file| {
+ try comp.queueJob(.{
+ .wasi_libc_crt_file = crt_file,
+ });
+ }
+ try comp.queueJobs(&[_]Job{
+ .{ .wasi_libc_crt_file = wasi_libc.execModelCrtFile(comp.config.wasi_exec_model) },
+ .{ .wasi_libc_crt_file = .libc_a },
+ });
+ } else if (target.isMinGW()) {
+ if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
- if (comp.wantBuildMinGWFromSource()) {
- if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
+ const crt_job: Job = .{ .mingw_crt_file = if (is_dyn_lib) .dllcrt2_o else .crt2_o };
+ try comp.queueJobs(&.{
+ .{ .mingw_crt_file = .mingw32_lib },
+ crt_job,
+ });
- const crt_job: Job = .{ .mingw_crt_file = if (is_dyn_lib) .dllcrt2_o else .crt2_o };
- try comp.queueJobs(&.{
- .{ .mingw_crt_file = .mingw32_lib },
- crt_job,
- });
+ // When linking mingw-w64 there are some import libs we always need.
+ try comp.windows_libs.ensureUnusedCapacity(gpa, mingw.always_link_libs.len);
+ for (mingw.always_link_libs) |name| comp.windows_libs.putAssumeCapacity(name, {});
+ } else if (target.isDarwin()) {
+ switch (target.abi) {
+ .none, .simulator, .macabi => {},
+ else => return error.LibCUnavailable,
+ }
+ } else if (target.os.tag == .freestanding and capable_of_building_zig_libc) {
+ try comp.queueJob(.{ .zig_libc = {} });
+ } else {
+ return error.LibCUnavailable;
+ }
+ }
- // When linking mingw-w64 there are some import libs we always need.
- for (mingw.always_link_libs) |name| {
- try comp.system_libs.put(comp.gpa, name, .{
- .needed = false,
- .weak = false,
- .path = null,
- });
+ // Generate Windows import libs.
+ if (target.os.tag == .windows) {
+ const count = comp.windows_libs.count();
+ for (0..count) |i| {
+ try comp.queueJob(.{ .windows_import_lib = i });
+ }
}
- }
- // Generate Windows import libs.
- if (target.os.tag == .windows) {
- const count = comp.system_libs.count();
- for (0..count) |i| {
- try comp.queueJob(.{ .windows_import_lib = i });
+ if (comp.wantBuildLibUnwindFromSource()) {
+ try comp.queueJob(.{ .libunwind = {} });
+ }
+ if (build_options.have_llvm and is_exe_or_dyn_lib and comp.config.link_libcpp) {
+ try comp.queueJob(.libcxx);
+ try comp.queueJob(.libcxxabi);
+ }
+ if (build_options.have_llvm and is_exe_or_dyn_lib and comp.config.any_sanitize_thread) {
+ try comp.queueJob(.libtsan);
}
- }
- if (comp.wantBuildLibUnwindFromSource()) {
- try comp.queueJob(.{ .libunwind = {} });
- }
- if (build_options.have_llvm and is_exe_or_dyn_lib and comp.config.link_libcpp) {
- try comp.queueJob(.libcxx);
- try comp.queueJob(.libcxxabi);
- }
- if (build_options.have_llvm and comp.config.any_sanitize_thread) {
- try comp.queueJob(.libtsan);
- }
- if (target.isMinGW() and comp.config.any_non_single_threaded) {
- // LLD might drop some symbols as unused during LTO and GCing, therefore,
- // we force mark them for resolution here.
+ if (target.isMinGW() and comp.config.any_non_single_threaded) {
+ // LLD might drop some symbols as unused during LTO and GCing, therefore,
+ // we force mark them for resolution here.
- const tls_index_sym = switch (target.cpu.arch) {
- .x86 => "__tls_index",
- else => "_tls_index",
- };
+ const tls_index_sym = switch (target.cpu.arch) {
+ .x86 => "__tls_index",
+ else => "_tls_index",
+ };
- try comp.force_undefined_symbols.put(comp.gpa, tls_index_sym, {});
- }
+ try comp.force_undefined_symbols.put(comp.gpa, tls_index_sym, {});
+ }
- if (comp.include_compiler_rt and capable_of_building_compiler_rt) {
- if (is_exe_or_dyn_lib) {
- log.debug("queuing a job to build compiler_rt_lib", .{});
- comp.job_queued_compiler_rt_lib = true;
- } else if (output_mode != .Obj) {
- log.debug("queuing a job to build compiler_rt_obj", .{});
- // In this case we are making a static library, so we ask
- // for a compiler-rt object to put in it.
- comp.job_queued_compiler_rt_obj = true;
+ if (comp.include_compiler_rt and capable_of_building_compiler_rt) {
+ if (is_exe_or_dyn_lib) {
+ log.debug("queuing a job to build compiler_rt_lib", .{});
+ comp.job_queued_compiler_rt_lib = true;
+ } else if (output_mode != .Obj) {
+ log.debug("queuing a job to build compiler_rt_obj", .{});
+ // In this case we are making a static library, so we ask
+ // for a compiler-rt object to put in it.
+ comp.job_queued_compiler_rt_obj = true;
+ }
}
- }
- if (comp.config.any_fuzz and capable_of_building_compiler_rt) {
- if (is_exe_or_dyn_lib) {
+ if (is_exe_or_dyn_lib and comp.config.any_fuzz and capable_of_building_compiler_rt) {
log.debug("queuing a job to build libfuzzer", .{});
comp.job_queued_fuzzer_lib = true;
}
}
- if (!comp.skip_linker_dependencies and is_exe_or_dyn_lib and
- !comp.config.link_libc and capable_of_building_zig_libc)
- {
- try comp.queueJob(.{ .zig_libc = {} });
- }
+ try comp.link_task_queue.shared.append(gpa, .load_explicitly_provided);
}
return comp;
}
pub fn destroy(comp: *Compilation) void {
+ const gpa = comp.gpa;
+
if (comp.bin_file) |lf| lf.destroy();
if (comp.zcu) |zcu| zcu.deinit();
comp.cache_use.deinit();
for (comp.work_queues) |work_queue| work_queue.deinit();
- if (!InternPool.single_threaded) comp.codegen_work.queue.deinit();
comp.c_object_work_queue.deinit();
comp.win32_resource_work_queue.deinit();
comp.astgen_work_queue.deinit();
comp.embed_file_work_queue.deinit();
- const gpa = comp.gpa;
- comp.system_libs.deinit(gpa);
+ comp.windows_libs.deinit(gpa);
{
var it = comp.crt_files.iterator();
@@ -2559,12 +2537,7 @@ fn addNonIncrementalStuffToCacheManifest(
cache_helpers.addModule(&man.hash, comp.root_mod);
}
- for (comp.objects) |obj| {
- _ = try man.addFilePath(obj.path, null);
- man.hash.add(obj.must_link);
- man.hash.add(obj.needed);
- man.hash.add(obj.loption);
- }
+ try link.hashInputs(man, comp.link_inputs);
for (comp.c_object_table.keys()) |key| {
_ = try man.addFile(key.src.src_path, null);
@@ -2601,7 +2574,7 @@ fn addNonIncrementalStuffToCacheManifest(
man.hash.add(comp.rc_includes);
man.hash.addListOfBytes(comp.force_undefined_symbols.keys());
man.hash.addListOfBytes(comp.framework_dirs);
- try link.hashAddSystemLibs(man, comp.system_libs);
+ man.hash.addListOfBytes(comp.windows_libs.keys());
cache_helpers.addOptionalEmitLoc(&man.hash, comp.emit_asm);
cache_helpers.addOptionalEmitLoc(&man.hash, comp.emit_llvm_ir);
@@ -2620,12 +2593,16 @@ fn addNonIncrementalStuffToCacheManifest(
man.hash.addOptional(opts.image_base);
man.hash.addOptional(opts.gc_sections);
man.hash.add(opts.emit_relocs);
- man.hash.addListOfBytes(opts.lib_dirs);
+ const target = comp.root_mod.resolved_target.result;
+ if (target.ofmt == .macho or target.ofmt == .coff) {
+ // TODO remove this, libraries need to be resolved by the frontend. this is already
+ // done by ELF.
+ for (opts.lib_directories) |lib_directory| man.hash.addOptionalBytes(lib_directory.path);
+ }
man.hash.addListOfBytes(opts.rpath_list);
man.hash.addListOfBytes(opts.symbol_wrap_set.keys());
if (comp.config.link_libc) {
man.hash.add(comp.libc_installation != null);
- const target = comp.root_mod.resolved_target.result;
if (comp.libc_installation) |libc_installation| {
man.hash.addOptionalBytes(libc_installation.crt_dir);
if (target.abi == .msvc or target.abi == .itanium) {
@@ -3219,18 +3196,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
}));
}
- for (comp.link_diags.msgs.items) |link_err| {
- try bundle.addRootErrorMessage(.{
- .msg = try bundle.addString(link_err.msg),
- .notes_len = @intCast(link_err.notes.len),
- });
- const notes_start = try bundle.reserveNotes(@intCast(link_err.notes.len));
- for (link_err.notes, 0..) |note, i| {
- bundle.extra.items[notes_start + i] = @intFromEnum(try bundle.addErrorMessage(.{
- .msg = try bundle.addString(note.msg),
- }));
- }
- }
+ try comp.link_diags.addMessagesToBundle(&bundle);
if (comp.zcu) |zcu| {
if (bundle.root_list.items.len == 0 and zcu.compile_log_sources.count() != 0) {
@@ -3482,6 +3448,9 @@ pub fn performAllTheWork(
comp: *Compilation,
main_progress_node: std.Progress.Node,
) JobError!void {
+ comp.work_queue_progress_node = main_progress_node;
+ defer comp.work_queue_progress_node = .none;
+
defer if (comp.zcu) |zcu| {
zcu.sema_prog_node.end();
zcu.sema_prog_node = std.Progress.Node.none;
@@ -3491,7 +3460,6 @@ pub fn performAllTheWork(
zcu.generation += 1;
};
try comp.performAllTheWorkInner(main_progress_node);
- if (!InternPool.single_threaded) if (comp.codegen_work.job_error) |job_error| return job_error;
}
fn performAllTheWorkInner(
@@ -3506,12 +3474,34 @@ fn performAllTheWorkInner(
var work_queue_wait_group: WaitGroup = .{};
defer work_queue_wait_group.wait();
+ comp.link_task_wait_group.reset();
+ defer comp.link_task_wait_group.wait();
+
+ if (comp.link_task_queue.start()) {
+ comp.thread_pool.spawnWgId(&comp.link_task_wait_group, link.flushTaskQueue, .{comp});
+ }
+
if (comp.docs_emit != null) {
dev.check(.docs_emit);
comp.thread_pool.spawnWg(&work_queue_wait_group, workerDocsCopy, .{comp});
work_queue_wait_group.spawnManager(workerDocsWasm, .{ comp, main_progress_node });
}
+ if (comp.job_queued_compiler_rt_lib) {
+ comp.job_queued_compiler_rt_lib = false;
+ comp.link_task_wait_group.spawnManager(buildRt, .{ comp, "compiler_rt.zig", .compiler_rt, .Lib, &comp.compiler_rt_lib, main_progress_node });
+ }
+
+ if (comp.job_queued_compiler_rt_obj) {
+ comp.job_queued_compiler_rt_obj = false;
+ comp.link_task_wait_group.spawnManager(buildRt, .{ comp, "compiler_rt.zig", .compiler_rt, .Obj, &comp.compiler_rt_obj, main_progress_node });
+ }
+
+ if (comp.job_queued_fuzzer_lib) {
+ comp.job_queued_fuzzer_lib = false;
+ comp.link_task_wait_group.spawnManager(buildRt, .{ comp, "fuzzer.zig", .libfuzzer, .Lib, &comp.fuzzer_lib, main_progress_node });
+ }
+
{
const astgen_frame = tracy.namedFrame("astgen");
defer astgen_frame.end();
@@ -3574,22 +3564,18 @@ fn performAllTheWorkInner(
}
while (comp.c_object_work_queue.readItem()) |c_object| {
- comp.thread_pool.spawnWg(&work_queue_wait_group, workerUpdateCObject, .{
+ comp.thread_pool.spawnWg(&comp.link_task_wait_group, workerUpdateCObject, .{
comp, c_object, main_progress_node,
});
}
while (comp.win32_resource_work_queue.readItem()) |win32_resource| {
- comp.thread_pool.spawnWg(&work_queue_wait_group, workerUpdateWin32Resource, .{
+ comp.thread_pool.spawnWg(&comp.link_task_wait_group, workerUpdateWin32Resource, .{
comp, win32_resource, main_progress_node,
});
}
}
- if (comp.job_queued_compiler_rt_lib) work_queue_wait_group.spawnManager(buildRt, .{ comp, "compiler_rt.zig", .compiler_rt, .Lib, &comp.compiler_rt_lib, main_progress_node });
- if (comp.job_queued_compiler_rt_obj) work_queue_wait_group.spawnManager(buildRt, .{ comp, "compiler_rt.zig", .compiler_rt, .Obj, &comp.compiler_rt_obj, main_progress_node });
- if (comp.job_queued_fuzzer_lib) work_queue_wait_group.spawnManager(buildRt, .{ comp, "fuzzer.zig", .libfuzzer, .Lib, &comp.fuzzer_lib, main_progress_node });
-
if (comp.zcu) |zcu| {
const pt: Zcu.PerThread = .{ .zcu = zcu, .tid = .main };
if (comp.incremental) {
@@ -3604,18 +3590,12 @@ fn performAllTheWorkInner(
zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0);
}
- if (!InternPool.single_threaded) {
- comp.codegen_work.done = false; // may be `true` from a prior update
- comp.thread_pool.spawnWgId(&work_queue_wait_group, codegenThread, .{comp});
+ if (!comp.separateCodegenThreadOk()) {
+ // Waits until all input files have been parsed.
+ comp.link_task_wait_group.wait();
+ comp.link_task_wait_group.reset();
+ std.log.scoped(.link).debug("finished waiting for link_task_wait_group", .{});
}
- defer if (!InternPool.single_threaded) {
- {
- comp.codegen_work.mutex.lock();
- defer comp.codegen_work.mutex.unlock();
- comp.codegen_work.done = true;
- }
- comp.codegen_work.cond.signal();
- };
work: while (true) {
for (&comp.work_queues) |*work_queue| if (work_queue.readItem()) |job| {
@@ -3659,16 +3639,14 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progre
}
}
assert(nav.status == .resolved);
- try comp.queueCodegenJob(tid, .{ .nav = nav_index });
+ comp.dispatchCodegenTask(tid, .{ .codegen_nav = nav_index });
},
.codegen_func => |func| {
- // This call takes ownership of `func.air`.
- try comp.queueCodegenJob(tid, .{ .func = .{
- .func = func.func,
- .air = func.air,
- } });
+ comp.dispatchCodegenTask(tid, .{ .codegen_func = func });
+ },
+ .codegen_type => |ty| {
+ comp.dispatchCodegenTask(tid, .{ .codegen_type = ty });
},
- .codegen_type => |ty| try comp.queueCodegenJob(tid, .{ .type = ty }),
.analyze_func => |func| {
const named_frame = tracy.namedFrame("analyze_func");
defer named_frame.end();
@@ -3715,31 +3693,6 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progre
error.AnalysisFail => return,
};
},
- .update_line_number => |decl_index| {
- const named_frame = tracy.namedFrame("update_line_number");
- defer named_frame.end();
-
- if (true) @panic("TODO: update_line_number");
-
- const gpa = comp.gpa;
- const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = @enumFromInt(tid) };
- const decl = pt.zcu.declPtr(decl_index);
- const lf = comp.bin_file.?;
- lf.updateDeclLineNumber(pt, decl_index) catch |err| {
- try pt.zcu.failed_analysis.ensureUnusedCapacity(gpa, 1);
- pt.zcu.failed_analysis.putAssumeCapacityNoClobber(
- InternPool.AnalUnit.wrap(.{ .decl = decl_index }),
- try Zcu.ErrorMsg.create(
- gpa,
- decl.navSrcLoc(pt.zcu),
- "unable to update line number: {s}",
- .{@errorName(err)},
- ),
- );
- decl.analysis = .codegen_failure;
- try pt.zcu.retryable_failures.append(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }));
- };
- },
.analyze_mod => |mod| {
const named_frame = tracy.namedFrame("analyze_mod");
defer named_frame.end();
@@ -3804,7 +3757,7 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progre
const named_frame = tracy.namedFrame("windows_import_lib");
defer named_frame.end();
- const link_lib = comp.system_libs.keys()[index];
+ const link_lib = comp.windows_libs.keys()[index];
mingw.buildImportLib(comp, link_lib) catch |err| {
// TODO Surface more error details.
comp.lockAndSetMiscFailure(
@@ -3906,66 +3859,20 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progre
}
}
-fn queueCodegenJob(comp: *Compilation, tid: usize, codegen_job: CodegenJob) !void {
- if (InternPool.single_threaded or
- !comp.zcu.?.backendSupportsFeature(.separate_thread))
- return processOneCodegenJob(tid, comp, codegen_job);
-
- {
- comp.codegen_work.mutex.lock();
- defer comp.codegen_work.mutex.unlock();
- try comp.codegen_work.queue.writeItem(codegen_job);
+/// The reason for the double-queue here is that the first queue ensures any
+/// resolve_type_fully tasks are complete before this dispatch function is called.
+fn dispatchCodegenTask(comp: *Compilation, tid: usize, link_task: link.Task) void {
+ if (comp.separateCodegenThreadOk()) {
+ comp.queueLinkTasks(&.{link_task});
+ } else {
+ link.doTask(comp, tid, link_task);
}
- comp.codegen_work.cond.signal();
}
-fn codegenThread(tid: usize, comp: *Compilation) void {
- comp.codegen_work.mutex.lock();
- defer comp.codegen_work.mutex.unlock();
-
- while (true) {
- if (comp.codegen_work.queue.readItem()) |codegen_job| {
- comp.codegen_work.mutex.unlock();
- defer comp.codegen_work.mutex.lock();
-
- processOneCodegenJob(tid, comp, codegen_job) catch |job_error| {
- comp.codegen_work.job_error = job_error;
- break;
- };
- continue;
- }
-
- if (comp.codegen_work.done) break;
-
- comp.codegen_work.cond.wait(&comp.codegen_work.mutex);
- }
-}
-
-fn processOneCodegenJob(tid: usize, comp: *Compilation, codegen_job: CodegenJob) JobError!void {
- switch (codegen_job) {
- .nav => |nav_index| {
- const named_frame = tracy.namedFrame("codegen_nav");
- defer named_frame.end();
-
- const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = @enumFromInt(tid) };
- try pt.linkerUpdateNav(nav_index);
- },
- .func => |func| {
- const named_frame = tracy.namedFrame("codegen_func");
- defer named_frame.end();
-
- const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = @enumFromInt(tid) };
- // This call takes ownership of `func.air`.
- try pt.linkerUpdateFunc(func.func, func.air);
- },
- .type => |ty| {
- const named_frame = tracy.namedFrame("codegen_type");
- defer named_frame.end();
-
- const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = @enumFromInt(tid) };
- try pt.linkerUpdateContainerType(ty);
- },
- }
+fn separateCodegenThreadOk(comp: *const Compilation) bool {
+ if (InternPool.single_threaded) return false;
+ const zcu = comp.zcu orelse return true;
+ return zcu.backendSupportsFeature(.separate_thread);
}
fn workerDocsCopy(comp: *Compilation) void {
@@ -4717,7 +4624,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
// file and building an object we need to link them together, but with just one it should go
// directly to the output file.
const direct_o = comp.c_source_files.len == 1 and comp.zcu == null and
- comp.config.output_mode == .Obj and comp.objects.len == 0;
+ comp.config.output_mode == .Obj and !link.anyObjectInputs(comp.link_inputs);
const o_basename_noext = if (direct_o)
comp.root_name
else
@@ -4956,7 +4863,9 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
// the contents were the same, we hit the cache but the manifest is dirty and we need to update
// it to prevent doing a full file content comparison the next time around.
man.writeManifest() catch |err| {
- log.warn("failed to write cache manifest when compiling '{s}': {s}", .{ c_object.src.src_path, @errorName(err) });
+ log.warn("failed to write cache manifest when compiling '{s}': {s}", .{
+ c_object.src.src_path, @errorName(err),
+ });
};
}
@@ -4971,6 +4880,8 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
.lock = man.toOwnedLock(),
},
};
+
+ comp.queueLinkTasks(&.{.{ .load_object = c_object.status.success.object_path }});
}
fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32_resource_prog_node: std.Progress.Node) !void {
@@ -6075,8 +5986,8 @@ test "classifyFileExt" {
try std.testing.expectEqual(FileExt.zig, classifyFileExt("foo.zig"));
}
-pub fn get_libc_crt_file(comp: *Compilation, arena: Allocator, basename: []const u8) !Path {
- return (try crtFilePath(comp, basename)) orelse {
+fn get_libc_crt_file(comp: *Compilation, arena: Allocator, basename: []const u8) !Path {
+ return (try crtFilePath(&comp.crt_files, basename)) orelse {
const lci = comp.libc_installation orelse return error.LibCInstallationNotAvailable;
const crt_dir_path = lci.crt_dir orelse return error.LibCInstallationMissingCrtDir;
const full_path = try std.fs.path.join(arena, &[_][]const u8{ crt_dir_path, basename });
@@ -6089,40 +6000,11 @@ pub fn crtFileAsString(comp: *Compilation, arena: Allocator, basename: []const u
return path.toString(arena);
}
-pub fn crtFilePath(comp: *Compilation, basename: []const u8) Allocator.Error!?Path {
- const crt_file = comp.crt_files.get(basename) orelse return null;
+fn crtFilePath(crt_files: *std.StringHashMapUnmanaged(CrtFile), basename: []const u8) Allocator.Error!?Path {
+ const crt_file = crt_files.get(basename) orelse return null;
return crt_file.full_object_path;
}
-fn wantBuildLibCFromSource(comp: Compilation) bool {
- const is_exe_or_dyn_lib = switch (comp.config.output_mode) {
- .Obj => false,
- .Lib => comp.config.link_mode == .dynamic,
- .Exe => true,
- };
- const ofmt = comp.root_mod.resolved_target.result.ofmt;
- return comp.config.link_libc and is_exe_or_dyn_lib and
- comp.libc_installation == null and ofmt != .c;
-}
-
-fn wantBuildGLibCFromSource(comp: Compilation) bool {
- return comp.wantBuildLibCFromSource() and comp.getTarget().isGnuLibC();
-}
-
-fn wantBuildMuslFromSource(comp: Compilation) bool {
- return comp.wantBuildLibCFromSource() and comp.getTarget().isMusl() and
- !comp.getTarget().isWasm();
-}
-
-fn wantBuildWasiLibcFromSource(comp: Compilation) bool {
- return comp.wantBuildLibCFromSource() and comp.getTarget().isWasm() and
- comp.getTarget().os.tag == .wasi;
-}
-
-fn wantBuildMinGWFromSource(comp: Compilation) bool {
- return comp.wantBuildLibCFromSource() and comp.getTarget().isMinGW();
-}
-
fn wantBuildLibUnwindFromSource(comp: *Compilation) bool {
const is_exe_or_dyn_lib = switch (comp.config.output_mode) {
.Obj => false,
@@ -6133,7 +6015,7 @@ fn wantBuildLibUnwindFromSource(comp: *Compilation) bool {
return is_exe_or_dyn_lib and comp.config.link_libunwind and ofmt != .c;
}
-fn setAllocFailure(comp: *Compilation) void {
+pub fn setAllocFailure(comp: *Compilation) void {
@branchHint(.cold);
log.debug("memory allocation failure", .{});
comp.alloc_failure_occurred = true;
@@ -6370,9 +6252,11 @@ fn buildOutputFromZig(
try comp.updateSubCompilation(sub_compilation, misc_task_tag, prog_node);
- // Under incremental compilation, `out` may already be populated from a prior update.
- assert(out.* == null or comp.incremental);
- out.* = try sub_compilation.toCrtFile();
+ const crt_file = try sub_compilation.toCrtFile();
+ assert(out.* == null);
+ out.* = crt_file;
+
+ comp.queueLinkTaskMode(crt_file.full_object_path, output_mode);
}
pub fn build_crt_file(
@@ -6479,8 +6363,33 @@ pub fn build_crt_file(
try comp.updateSubCompilation(sub_compilation, misc_task_tag, prog_node);
- try comp.crt_files.ensureUnusedCapacity(gpa, 1);
- comp.crt_files.putAssumeCapacityNoClobber(basename, try sub_compilation.toCrtFile());
+ const crt_file = try sub_compilation.toCrtFile();
+ comp.queueLinkTaskMode(crt_file.full_object_path, output_mode);
+
+ {
+ comp.mutex.lock();
+ defer comp.mutex.unlock();
+ try comp.crt_files.ensureUnusedCapacity(gpa, 1);
+ comp.crt_files.putAssumeCapacityNoClobber(basename, crt_file);
+ }
+}
+
+pub fn queueLinkTaskMode(comp: *Compilation, path: Path, output_mode: std.builtin.OutputMode) void {
+ comp.queueLinkTasks(switch (output_mode) {
+ .Exe => unreachable,
+ .Obj => &.{.{ .load_object = path }},
+ .Lib => &.{.{ .load_archive = path }},
+ });
+}
+
+/// Only valid to call during `update`. Automatically handles queuing up a
+/// linker worker task if there is not already one.
+pub fn queueLinkTasks(comp: *Compilation, tasks: []const link.Task) void {
+ if (comp.link_task_queue.enqueue(comp.gpa, tasks) catch |err| switch (err) {
+ error.OutOfMemory => return comp.setAllocFailure(),
+ }) {
+ comp.thread_pool.spawnWgId(&comp.link_task_wait_group, link.flushTaskQueue, .{comp});
+ }
}
pub fn toCrtFile(comp: *Compilation) Allocator.Error!CrtFile {
@@ -6498,21 +6407,31 @@ pub fn getCrtPaths(
arena: Allocator,
) error{ OutOfMemory, LibCInstallationMissingCrtDir }!LibCInstallation.CrtPaths {
const target = comp.root_mod.resolved_target.result;
+ return getCrtPathsInner(arena, target, comp.config, comp.libc_installation, &comp.crt_files);
+}
+
+fn getCrtPathsInner(
+ arena: Allocator,
+ target: std.Target,
+ config: Config,
+ libc_installation: ?*const LibCInstallation,
+ crt_files: *std.StringHashMapUnmanaged(CrtFile),
+) error{ OutOfMemory, LibCInstallationMissingCrtDir }!LibCInstallation.CrtPaths {
const basenames = LibCInstallation.CrtBasenames.get(.{
.target = target,
- .link_libc = comp.config.link_libc,
- .output_mode = comp.config.output_mode,
- .link_mode = comp.config.link_mode,
- .pie = comp.config.pie,
+ .link_libc = config.link_libc,
+ .output_mode = config.output_mode,
+ .link_mode = config.link_mode,
+ .pie = config.pie,
});
- if (comp.libc_installation) |lci| return lci.resolveCrtPaths(arena, basenames, target);
+ if (libc_installation) |lci| return lci.resolveCrtPaths(arena, basenames, target);
return .{
- .crt0 = if (basenames.crt0) |basename| try comp.crtFilePath(basename) else null,
- .crti = if (basenames.crti) |basename| try comp.crtFilePath(basename) else null,
- .crtbegin = if (basenames.crtbegin) |basename| try comp.crtFilePath(basename) else null,
- .crtend = if (basenames.crtend) |basename| try comp.crtFilePath(basename) else null,
- .crtn = if (basenames.crtn) |basename| try comp.crtFilePath(basename) else null,
+ .crt0 = if (basenames.crt0) |basename| try crtFilePath(crt_files, basename) else null,
+ .crti = if (basenames.crti) |basename| try crtFilePath(crt_files, basename) else null,
+ .crtbegin = if (basenames.crtbegin) |basename| try crtFilePath(crt_files, basename) else null,
+ .crtend = if (basenames.crtend) |basename| try crtFilePath(crt_files, basename) else null,
+ .crtn = if (basenames.crtn) |basename| try crtFilePath(crt_files, basename) else null,
};
}
@@ -6522,24 +6441,14 @@ pub fn addLinkLib(comp: *Compilation, lib_name: []const u8) !void {
// then when we create a sub-Compilation for zig libc, it also tries to
// build kernel32.lib.
if (comp.skip_linker_dependencies) return;
+ const target = comp.root_mod.resolved_target.result;
+ if (target.os.tag != .windows or target.ofmt == .c) return;
// This happens when an `extern "foo"` function is referenced.
// If we haven't seen this library yet and we're targeting Windows, we need
// to queue up a work item to produce the DLL import library for this.
- const gop = try comp.system_libs.getOrPut(comp.gpa, lib_name);
- if (!gop.found_existing) {
- gop.value_ptr.* = .{
- .needed = true,
- .weak = false,
- .path = null,
- };
- const target = comp.root_mod.resolved_target.result;
- if (target.os.tag == .windows and target.ofmt != .c) {
- try comp.queueJob(.{
- .windows_import_lib = comp.system_libs.count() - 1,
- });
- }
- }
+ const gop = try comp.windows_libs.getOrPut(comp.gpa, lib_name);
+ if (!gop.found_existing) try comp.queueJob(.{ .windows_import_lib = comp.windows_libs.count() - 1 });
}
/// This decides the optimization mode for all zig-provided libraries, including
diff --git a/src/Sema.zig b/src/Sema.zig
index cd32c989ee..63af9686a0 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -2899,6 +2899,7 @@ fn zirStructDecl(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
+ // This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.declareDependency(.{ .interned = wip_ty.index });
@@ -3149,6 +3150,7 @@ fn zirEnumDecl(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
+ // This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
return Air.internedToRef(wip_ty.index);
@@ -3272,6 +3274,7 @@ fn zirUnionDecl(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
+ // This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.declareDependency(.{ .interned = wip_ty.index });
@@ -3357,6 +3360,7 @@ fn zirOpaqueDecl(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
+ // This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.addTypeReferenceEntry(src, wip_ty.index);
@@ -9595,7 +9599,7 @@ fn resolveGenericBody(
}
/// Given a library name, examines if the library name should end up in
-/// `link.File.Options.system_libs` table (for example, libc is always
+/// `link.File.Options.windows_libs` table (for example, libc is always
/// specified via dedicated flag `link_libc` instead),
/// and puts it there if it doesn't exist.
/// It also dupes the library name which can then be saved as part of the
@@ -22456,6 +22460,7 @@ fn reifyEnum(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
+ // This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
return Air.internedToRef(wip_ty.index);
@@ -22713,6 +22718,7 @@ fn reifyUnion(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
+ // This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.declareDependency(.{ .interned = wip_ty.index });
@@ -22997,6 +23003,7 @@ fn reifyStruct(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
+ // This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.declareDependency(.{ .interned = wip_ty.index });
diff --git a/src/ThreadSafeQueue.zig b/src/ThreadSafeQueue.zig
new file mode 100644
index 0000000000..74bbdc418f
--- /dev/null
+++ b/src/ThreadSafeQueue.zig
@@ -0,0 +1,72 @@
+const std = @import("std");
+const assert = std.debug.assert;
+const Allocator = std.mem.Allocator;
+
+pub fn ThreadSafeQueue(comptime T: type) type {
+ return struct {
+ worker_owned: std.ArrayListUnmanaged(T),
+ /// Protected by `mutex`.
+ shared: std.ArrayListUnmanaged(T),
+ mutex: std.Thread.Mutex,
+ state: State,
+
+ const Self = @This();
+
+ pub const State = enum { wait, run };
+
+ pub const empty: Self = .{
+ .worker_owned = .empty,
+ .shared = .empty,
+ .mutex = .{},
+ .state = .wait,
+ };
+
+ pub fn deinit(self: *Self, gpa: Allocator) void {
+ self.worker_owned.deinit(gpa);
+ self.shared.deinit(gpa);
+ self.* = undefined;
+ }
+
+ /// Must be called from the worker thread.
+ pub fn check(self: *Self) ?[]T {
+ assert(self.worker_owned.items.len == 0);
+ {
+ self.mutex.lock();
+ defer self.mutex.unlock();
+ assert(self.state == .run);
+ if (self.shared.items.len == 0) {
+ self.state = .wait;
+ return null;
+ }
+ std.mem.swap(std.ArrayListUnmanaged(T), &self.worker_owned, &self.shared);
+ }
+ const result = self.worker_owned.items;
+ self.worker_owned.clearRetainingCapacity();
+ return result;
+ }
+
+ /// Adds items to the queue, returning true if and only if the worker
+ /// thread is waiting. Thread-safe.
+ /// Not safe to call from the worker thread.
+ pub fn enqueue(self: *Self, gpa: Allocator, items: []const T) error{OutOfMemory}!bool {
+ self.mutex.lock();
+ defer self.mutex.unlock();
+ try self.shared.appendSlice(gpa, items);
+ return switch (self.state) {
+ .run => false,
+ .wait => {
+ self.state = .run;
+ return true;
+ },
+ };
+ }
+
+ /// Safe only to call exactly once when initially starting the worker.
+ pub fn start(self: *Self) bool {
+ assert(self.state == .wait);
+ if (self.shared.items.len == 0) return false;
+ self.state = .run;
+ return true;
+ }
+ };
+}
diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig
index 4ec9306792..db1ee319dc 100644
--- a/src/Zcu/PerThread.zig
+++ b/src/Zcu/PerThread.zig
@@ -845,6 +845,7 @@ fn ensureFuncBodyAnalyzedInner(
return .{ .ies_outdated = ies_outdated };
}
+ // This job depends on any resolve_type_fully jobs queued up before it.
try comp.queueJob(.{ .codegen_func = .{
.func = func_index,
.air = air,
@@ -1016,6 +1017,7 @@ fn createFileRootStruct(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (file.mod.strip) break :codegen_type;
+ // This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
zcu.setFileRootType(file_index, wip_ty.index);
@@ -1362,6 +1364,7 @@ fn semaCau(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) !SemaCauResult {
if (file.mod.strip) break :queue_codegen;
}
+ // This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_nav = nav_index });
}
@@ -2593,7 +2596,7 @@ pub fn populateTestFunctions(
}
}
-pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void {
+pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) error{OutOfMemory}!void {
const zcu = pt.zcu;
const comp = zcu.comp;
const ip = &zcu.intern_pool;
@@ -3163,6 +3166,7 @@ pub fn navPtrType(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) Allocator.
pub fn getExtern(pt: Zcu.PerThread, key: InternPool.Key.Extern) Allocator.Error!InternPool.Index {
const result = try pt.zcu.intern_pool.getExtern(pt.zcu.gpa, pt.tid, key);
if (result.new_nav.unwrap()) |nav| {
+ // This job depends on any resolve_type_fully jobs queued up before it.
try pt.zcu.comp.queueJob(.{ .codegen_nav = nav });
}
return result.index;
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index 61eaf373f5..24497defa2 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -133,7 +133,7 @@ const Owner = union(enum) {
switch (owner) {
.nav_index => |nav_index| {
const elf_file = func.bin_file.cast(.elf).?;
- return elf_file.zigObjectPtr().?.getOrCreateMetadataForNav(elf_file, nav_index);
+ return elf_file.zigObjectPtr().?.getOrCreateMetadataForNav(pt.zcu, nav_index);
},
.lazy_sym => |lazy_sym| {
const elf_file = func.bin_file.cast(.elf).?;
@@ -5002,7 +5002,7 @@ fn genCall(
.func => |func_val| {
if (func.bin_file.cast(.elf)) |elf_file| {
const zo = elf_file.zigObjectPtr().?;
- const sym_index = try zo.getOrCreateMetadataForNav(elf_file, func_val.owner_nav);
+ const sym_index = try zo.getOrCreateMetadataForNav(zcu, func_val.owner_nav);
if (func.mod.pic) {
return func.fail("TODO: genCall pic", .{});
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 10260ea465..7bd3517fac 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -126,7 +126,7 @@ const Owner = union(enum) {
const pt = ctx.pt;
switch (owner) {
.nav_index => |nav_index| if (ctx.bin_file.cast(.elf)) |elf_file| {
- return elf_file.zigObjectPtr().?.getOrCreateMetadataForNav(elf_file, nav_index);
+ return elf_file.zigObjectPtr().?.getOrCreateMetadataForNav(pt.zcu, nav_index);
} else if (ctx.bin_file.cast(.macho)) |macho_file| {
return macho_file.getZigObject().?.getOrCreateMetadataForNav(macho_file, nav_index);
} else if (ctx.bin_file.cast(.coff)) |coff_file| {
@@ -12605,7 +12605,7 @@ fn genCall(self: *Self, info: union(enum) {
.func => |func| {
if (self.bin_file.cast(.elf)) |elf_file| {
const zo = elf_file.zigObjectPtr().?;
- const sym_index = try zo.getOrCreateMetadataForNav(elf_file, func.owner_nav);
+ const sym_index = try zo.getOrCreateMetadataForNav(zcu, func.owner_nav);
try self.asmImmediate(.{ ._, .call }, Immediate.rel(.{ .sym_index = sym_index }));
} else if (self.bin_file.cast(.coff)) |coff_file| {
const atom = try coff_file.getOrCreateAtomForNav(func.owner_nav);
diff --git a/src/codegen.zig b/src/codegen.zig
index b41fa3c2ef..287d0bad51 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -866,7 +866,7 @@ fn genNavRef(
zo.symbol(sym_index).flags.is_extern_ptr = true;
return .{ .mcv = .{ .lea_symbol = sym_index } };
}
- const sym_index = try zo.getOrCreateMetadataForNav(elf_file, nav_index);
+ const sym_index = try zo.getOrCreateMetadataForNav(zcu, nav_index);
if (!single_threaded and is_threadlocal) {
return .{ .mcv = .{ .load_tlv = sym_index } };
}
diff --git a/src/glibc.zig b/src/glibc.zig
index 62f1fbbb13..a7736a9827 100644
--- a/src/glibc.zig
+++ b/src/glibc.zig
@@ -6,12 +6,14 @@ const fs = std.fs;
const path = fs.path;
const assert = std.debug.assert;
const Version = std.SemanticVersion;
+const Path = std.Build.Cache.Path;
const Compilation = @import("Compilation.zig");
const build_options = @import("build_options");
const trace = @import("tracy.zig").trace;
const Cache = std.Build.Cache;
const Module = @import("Package/Module.zig");
+const link = @import("link.zig");
pub const Lib = struct {
name: []const u8,
@@ -717,11 +719,11 @@ fn lib_path(comp: *Compilation, arena: Allocator, sub_path: []const u8) ![]const
pub const BuiltSharedObjects = struct {
lock: Cache.Lock,
- dir_path: []u8,
+ dir_path: Path,
pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator) void {
self.lock.release();
- gpa.free(self.dir_path);
+ gpa.free(self.dir_path.sub_path);
self.* = undefined;
}
};
@@ -742,7 +744,9 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
- var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
+ const gpa = comp.gpa;
+
+ var arena_allocator = std.heap.ArenaAllocator.init(gpa);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
@@ -751,7 +755,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi
// Use the global cache directory.
var cache: Cache = .{
- .gpa = comp.gpa,
+ .gpa = gpa,
.manifest_dir = try comp.global_cache_directory.handle.makeOpenPath("h", .{}),
};
cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
@@ -772,12 +776,13 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi
if (try man.hit()) {
const digest = man.final();
- assert(comp.glibc_so_files == null);
- comp.glibc_so_files = BuiltSharedObjects{
+ return queueSharedObjects(comp, .{
.lock = man.toOwnedLock(),
- .dir_path = try comp.global_cache_directory.join(comp.gpa, &.{ "o", &digest }),
- };
- return;
+ .dir_path = .{
+ .root_dir = comp.global_cache_directory,
+ .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest),
+ },
+ });
}
const digest = man.final();
@@ -790,8 +795,8 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi
defer o_directory.handle.close();
const abilists_contents = man.files.keys()[abilists_index].contents.?;
- const metadata = try loadMetaData(comp.gpa, abilists_contents);
- defer metadata.destroy(comp.gpa);
+ const metadata = try loadMetaData(gpa, abilists_contents);
+ defer metadata.destroy(gpa);
const target_targ_index = for (metadata.all_targets, 0..) |targ, i| {
if (targ.arch == target.cpu.arch and
@@ -835,7 +840,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi
map_contents.deinit(); // The most recent allocation of an arena can be freed :)
}
- var stubs_asm = std.ArrayList(u8).init(comp.gpa);
+ var stubs_asm = std.ArrayList(u8).init(gpa);
defer stubs_asm.deinit();
for (libs, 0..) |lib, lib_i| {
@@ -1195,7 +1200,6 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi
var lib_name_buf: [32]u8 = undefined; // Larger than each of the names "c", "pthread", etc.
const asm_file_basename = std.fmt.bufPrint(&lib_name_buf, "{s}.s", .{lib.name}) catch unreachable;
try o_directory.handle.writeFile(.{ .sub_path = asm_file_basename, .data = stubs_asm.items });
-
try buildSharedLib(comp, arena, comp.global_cache_directory, o_directory, asm_file_basename, lib, prog_node);
}
@@ -1203,14 +1207,45 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi
log.warn("failed to write cache manifest for glibc stubs: {s}", .{@errorName(err)});
};
- assert(comp.glibc_so_files == null);
- comp.glibc_so_files = BuiltSharedObjects{
+ return queueSharedObjects(comp, .{
.lock = man.toOwnedLock(),
- .dir_path = try comp.global_cache_directory.join(comp.gpa, &.{ "o", &digest }),
- };
+ .dir_path = .{
+ .root_dir = comp.global_cache_directory,
+ .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest),
+ },
+ });
}
-// zig fmt: on
+fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
+ const target_version = comp.getTarget().os.version_range.linux.glibc;
+
+ assert(comp.glibc_so_files == null);
+ comp.glibc_so_files = so_files;
+
+ var task_buffer: [libs.len]link.Task = undefined;
+ var task_buffer_i: usize = 0;
+
+ {
+ comp.mutex.lock(); // protect comp.arena
+ defer comp.mutex.unlock();
+
+ for (libs) |lib| {
+ if (lib.removed_in) |rem_in| {
+ if (target_version.order(rem_in) != .lt) continue;
+ }
+ const so_path: Path = .{
+ .root_dir = so_files.dir_path.root_dir,
+ .sub_path = std.fmt.allocPrint(comp.arena, "{s}{c}lib{s}.so.{d}", .{
+ so_files.dir_path.sub_path, fs.path.sep, lib.name, lib.sover,
+ }) catch return comp.setAllocFailure(),
+ };
+ task_buffer[task_buffer_i] = .{ .load_dso = so_path };
+ task_buffer_i += 1;
+ }
+ }
+
+ comp.queueLinkTasks(task_buffer[0..task_buffer_i]);
+}
fn buildSharedLib(
comp: *Compilation,
diff --git a/src/libcxx.zig b/src/libcxx.zig
index 11deff91c9..a9f3030c42 100644
--- a/src/libcxx.zig
+++ b/src/libcxx.zig
@@ -355,7 +355,9 @@ pub fn buildLibCXX(comp: *Compilation, prog_node: std.Progress.Node) BuildError!
};
assert(comp.libcxx_static_lib == null);
- comp.libcxx_static_lib = try sub_compilation.toCrtFile();
+ const crt_file = try sub_compilation.toCrtFile();
+ comp.libcxx_static_lib = crt_file;
+ comp.queueLinkTaskMode(crt_file.full_object_path, output_mode);
}
pub fn buildLibCXXABI(comp: *Compilation, prog_node: std.Progress.Node) BuildError!void {
@@ -584,7 +586,9 @@ pub fn buildLibCXXABI(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
};
assert(comp.libcxxabi_static_lib == null);
- comp.libcxxabi_static_lib = try sub_compilation.toCrtFile();
+ const crt_file = try sub_compilation.toCrtFile();
+ comp.libcxxabi_static_lib = crt_file;
+ comp.queueLinkTaskMode(crt_file.full_object_path, output_mode);
}
pub fn hardeningModeFlag(optimize_mode: std.builtin.OptimizeMode) []const u8 {
diff --git a/src/libtsan.zig b/src/libtsan.zig
index 63131def00..d078fa2a38 100644
--- a/src/libtsan.zig
+++ b/src/libtsan.zig
@@ -342,8 +342,10 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
},
};
+ const crt_file = try sub_compilation.toCrtFile();
+ comp.queueLinkTaskMode(crt_file.full_object_path, output_mode);
assert(comp.tsan_lib == null);
- comp.tsan_lib = try sub_compilation.toCrtFile();
+ comp.tsan_lib = crt_file;
}
const tsan_sources = [_][]const u8{
diff --git a/src/libunwind.zig b/src/libunwind.zig
index 5eb19e8d67..fba604e725 100644
--- a/src/libunwind.zig
+++ b/src/libunwind.zig
@@ -199,8 +199,10 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
},
};
+ const crt_file = try sub_compilation.toCrtFile();
+ comp.queueLinkTaskMode(crt_file.full_object_path, output_mode);
assert(comp.libunwind_static_lib == null);
- comp.libunwind_static_lib = try sub_compilation.toCrtFile();
+ comp.libunwind_static_lib = crt_file;
}
const unwind_src_list = [_][]const u8{
diff --git a/src/link.zig b/src/link.zig
index 7280ce3df7..cd777b86c4 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -12,6 +12,7 @@ const Air = @import("Air.zig");
const Allocator = std.mem.Allocator;
const Cache = std.Build.Cache;
const Path = std.Build.Cache.Path;
+const Directory = std.Build.Cache.Directory;
const Compilation = @import("Compilation.zig");
const LibCInstallation = std.zig.LibCInstallation;
const Liveness = @import("Liveness.zig");
@@ -23,19 +24,10 @@ const LlvmObject = @import("codegen/llvm.zig").Object;
const lldMain = @import("main.zig").lldMain;
const Package = @import("Package.zig");
const dev = @import("dev.zig");
+const ThreadSafeQueue = @import("ThreadSafeQueue.zig").ThreadSafeQueue;
+const target_util = @import("target.zig");
-/// When adding a new field, remember to update `hashAddSystemLibs`.
-/// These are *always* dynamically linked. Static libraries will be
-/// provided as positional arguments.
-pub const SystemLib = struct {
- needed: bool,
- weak: bool,
- /// This can be null in two cases right now:
- /// 1. Windows DLLs that zig ships such as advapi32.
- /// 2. extern "foo" fn declarations where we find out about libraries too late
- /// TODO: make this non-optional and resolve those two cases somehow.
- path: ?Path,
-};
+pub const LdScript = @import("link/LdScript.zig");
pub const Diags = struct {
/// Stored here so that function definitions can distinguish between
@@ -336,20 +328,22 @@ pub const Diags = struct {
log.debug("memory allocation failure", .{});
diags.flags.alloc_failure_occurred = true;
}
-};
-pub fn hashAddSystemLibs(
- man: *Cache.Manifest,
- hm: std.StringArrayHashMapUnmanaged(SystemLib),
-) !void {
- const keys = hm.keys();
- man.hash.addListOfBytes(keys);
- for (hm.values()) |value| {
- man.hash.add(value.needed);
- man.hash.add(value.weak);
- if (value.path) |p| _ = try man.addFilePath(p, null);
+ pub fn addMessagesToBundle(diags: *const Diags, bundle: *std.zig.ErrorBundle.Wip) Allocator.Error!void {
+ for (diags.msgs.items) |link_err| {
+ try bundle.addRootErrorMessage(.{
+ .msg = try bundle.addString(link_err.msg),
+ .notes_len = @intCast(link_err.notes.len),
+ });
+ const notes_start = try bundle.reserveNotes(@intCast(link_err.notes.len));
+ for (link_err.notes, 0..) |note, i| {
+ bundle.extra.items[notes_start + i] = @intFromEnum(try bundle.addErrorMessage(.{
+ .msg = try bundle.addString(note.msg),
+ }));
+ }
+ }
}
-}
+};
pub const producer_string = if (builtin.is_test) "zig test" else "zig " ++ build_options.version;
@@ -438,7 +432,7 @@ pub const File = struct {
compatibility_version: ?std.SemanticVersion,
// TODO: remove this. libraries are resolved by the frontend.
- lib_dirs: []const []const u8,
+ lib_directories: []const Directory,
framework_dirs: []const []const u8,
rpath_list: []const []const u8,
@@ -1003,6 +997,102 @@ pub const File = struct {
}
}
+ /// Opens a path as an object file and parses it into the linker.
+ fn openLoadObject(base: *File, path: Path) anyerror!void {
+ const diags = &base.comp.link_diags;
+ const input = try openObjectInput(diags, path);
+ errdefer input.object.file.close();
+ try loadInput(base, input);
+ }
+
+ /// Opens a path as a static library and parses it into the linker.
+ /// If `query` is non-null, allows GNU ld scripts.
+ fn openLoadArchive(base: *File, path: Path, opt_query: ?UnresolvedInput.Query) anyerror!void {
+ if (opt_query) |query| {
+ const archive = try openObject(path, query.must_link, query.hidden);
+ errdefer archive.file.close();
+ loadInput(base, .{ .archive = archive }) catch |err| switch (err) {
+ error.BadMagic, error.UnexpectedEndOfFile => {
+ if (base.tag != .elf) return err;
+ try loadGnuLdScript(base, path, query, archive.file);
+ archive.file.close();
+ return;
+ },
+ else => return err,
+ };
+ } else {
+ const archive = try openObject(path, false, false);
+ errdefer archive.file.close();
+ try loadInput(base, .{ .archive = archive });
+ }
+ }
+
+ /// Opens a path as a shared library and parses it into the linker.
+ /// Handles GNU ld scripts.
+ fn openLoadDso(base: *File, path: Path, query: UnresolvedInput.Query) anyerror!void {
+ const dso = try openDso(path, query.needed, query.weak, query.reexport);
+ errdefer dso.file.close();
+ loadInput(base, .{ .dso = dso }) catch |err| switch (err) {
+ error.BadMagic, error.UnexpectedEndOfFile => {
+ if (base.tag != .elf) return err;
+ try loadGnuLdScript(base, path, query, dso.file);
+ dso.file.close();
+ return;
+ },
+ else => return err,
+ };
+ }
+
+ fn loadGnuLdScript(base: *File, path: Path, parent_query: UnresolvedInput.Query, file: fs.File) anyerror!void {
+ const diags = &base.comp.link_diags;
+ const gpa = base.comp.gpa;
+ const stat = try file.stat();
+ const size = std.math.cast(u32, stat.size) orelse return error.FileTooBig;
+ const buf = try gpa.alloc(u8, size);
+ defer gpa.free(buf);
+ const n = try file.preadAll(buf, 0);
+ if (buf.len != n) return error.UnexpectedEndOfFile;
+ var ld_script = try LdScript.parse(gpa, diags, path, buf);
+ defer ld_script.deinit(gpa);
+ for (ld_script.args) |arg| {
+ const query: UnresolvedInput.Query = .{
+ .needed = arg.needed or parent_query.needed,
+ .weak = parent_query.weak,
+ .reexport = parent_query.reexport,
+ .preferred_mode = parent_query.preferred_mode,
+ .search_strategy = parent_query.search_strategy,
+ .allow_so_scripts = parent_query.allow_so_scripts,
+ };
+ if (mem.startsWith(u8, arg.path, "-l")) {
+ @panic("TODO");
+ } else {
+ if (fs.path.isAbsolute(arg.path)) {
+ const new_path = Path.initCwd(try gpa.dupe(u8, arg.path));
+ switch (Compilation.classifyFileExt(arg.path)) {
+ .shared_library => try openLoadDso(base, new_path, query),
+ .object => try openLoadObject(base, new_path),
+ .static_library => try openLoadArchive(base, new_path, query),
+ else => diags.addParseError(path, "GNU ld script references file with unrecognized extension: {s}", .{arg.path}),
+ }
+ } else {
+ @panic("TODO");
+ }
+ }
+ }
+ }
+
+ pub fn loadInput(base: *File, input: Input) anyerror!void {
+ const use_lld = build_options.have_llvm and base.comp.config.use_lld;
+ if (use_lld) return;
+ switch (base.tag) {
+ inline .elf => |tag| {
+ dev.check(tag.devFeature());
+ return @as(*tag.Type(), @fieldParentPtr("base", base)).loadInput(input);
+ },
+ else => {},
+ }
+ }
+
pub fn linkAsArchive(base: *File, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) FlushError!void {
dev.check(.lld_linker);
@@ -1010,7 +1100,6 @@ pub const File = struct {
defer tracy.end();
const comp = base.comp;
- const gpa = comp.gpa;
const directory = base.emit.root_dir; // Just an alias to make it shorter to type.
const full_out_path = try directory.join(arena, &[_][]const u8{base.emit.sub_path});
@@ -1042,7 +1131,7 @@ pub const File = struct {
var man: Cache.Manifest = undefined;
defer if (!base.disable_lld_caching) man.deinit();
- const objects = comp.objects;
+ const link_inputs = comp.link_inputs;
var digest: [Cache.hex_digest_len]u8 = undefined;
@@ -1052,11 +1141,8 @@ pub const File = struct {
// We are about to obtain this lock, so here we give other processes a chance first.
base.releaseLock();
- for (objects) |obj| {
- _ = try man.addFilePath(obj.path, null);
- man.hash.add(obj.must_link);
- man.hash.add(obj.loption);
- }
+ try hashInputs(&man, link_inputs);
+
for (comp.c_object_table.keys()) |key| {
_ = try man.addFilePath(key.status.success.object_path, null);
}
@@ -1092,26 +1178,24 @@ pub const File = struct {
};
}
- const win32_resource_table_len = comp.win32_resource_table.count();
- const num_object_files = objects.len + comp.c_object_table.count() + win32_resource_table_len + 2;
- var object_files = try std.ArrayList([*:0]const u8).initCapacity(gpa, num_object_files);
- defer object_files.deinit();
+ var object_files: std.ArrayListUnmanaged([*:0]const u8) = .empty;
- for (objects) |obj| {
- object_files.appendAssumeCapacity(try obj.path.toStringZ(arena));
+ try object_files.ensureUnusedCapacity(arena, link_inputs.len);
+ for (link_inputs) |input| {
+ object_files.appendAssumeCapacity(try input.path().?.toStringZ(arena));
}
+
+ try object_files.ensureUnusedCapacity(arena, comp.c_object_table.count() +
+ comp.win32_resource_table.count() + 2);
+
for (comp.c_object_table.keys()) |key| {
object_files.appendAssumeCapacity(try key.status.success.object_path.toStringZ(arena));
}
for (comp.win32_resource_table.keys()) |key| {
object_files.appendAssumeCapacity(try arena.dupeZ(u8, key.status.success.res_path));
}
- if (zcu_obj_path) |p| {
- object_files.appendAssumeCapacity(try arena.dupeZ(u8, p));
- }
- if (compiler_rt_path) |p| {
- object_files.appendAssumeCapacity(try p.toStringZ(arena));
- }
+ if (zcu_obj_path) |p| object_files.appendAssumeCapacity(try arena.dupeZ(u8, p));
+ if (compiler_rt_path) |p| object_files.appendAssumeCapacity(try p.toStringZ(arena));
if (comp.verbose_link) {
std.debug.print("ar rcs {s}", .{full_out_path_z});
@@ -1277,6 +1361,195 @@ pub const File = struct {
pub const Dwarf = @import("link/Dwarf.zig");
};
+/// Does all the tasks in the queue. Runs in exactly one separate thread
+/// from the rest of compilation. All tasks performed here are
+/// single-threaded with respect to one another.
+pub fn flushTaskQueue(tid: usize, comp: *Compilation) void {
+ // As soon as check() is called, another `flushTaskQueue` call could occur,
+ // so the safety lock must go after the check.
+ while (comp.link_task_queue.check()) |tasks| {
+ comp.link_task_queue_safety.lock();
+ defer comp.link_task_queue_safety.unlock();
+ for (tasks) |task| doTask(comp, tid, task);
+ }
+}
+
+pub const Task = union(enum) {
+ /// Loads the objects, shared objects, and archives that are already
+ /// known from the command line.
+ load_explicitly_provided,
+ /// Loads the shared objects and archives by resolving
+ /// `target_util.libcFullLinkFlags()` against the host libc
+ /// installation.
+ load_host_libc,
+ /// Tells the linker to load an object file by path.
+ load_object: Path,
+ /// Tells the linker to load a static library by path.
+ load_archive: Path,
+ /// Tells the linker to load a shared library, possibly one that is a
+ /// GNU ld script.
+ load_dso: Path,
+ /// Tells the linker to load an input which could be an object file,
+ /// archive, or shared library.
+ load_input: Input,
+
+ /// Write the constant value for a Decl to the output file.
+ codegen_nav: InternPool.Nav.Index,
+ /// Write the machine code for a function to the output file.
+ codegen_func: CodegenFunc,
+ codegen_type: InternPool.Index,
+
+ pub const CodegenFunc = struct {
+ /// This will either be a non-generic `func_decl` or a `func_instance`.
+ func: InternPool.Index,
+ /// This `Air` is owned by the `Job` and allocated with `gpa`.
+ /// It must be deinited when the job is processed.
+ air: Air,
+ };
+};
+
+pub fn doTask(comp: *Compilation, tid: usize, task: Task) void {
+ const diags = &comp.link_diags;
+ switch (task) {
+ .load_explicitly_provided => if (comp.bin_file) |base| {
+ const prog_node = comp.work_queue_progress_node.start("Parse Linker Inputs", comp.link_inputs.len);
+ defer prog_node.end();
+ for (comp.link_inputs) |input| {
+ base.loadInput(input) catch |err| switch (err) {
+ error.LinkFailure => return, // error reported via diags
+ else => |e| switch (input) {
+ .dso => |dso| diags.addParseError(dso.path, "failed to parse shared library: {s}", .{@errorName(e)}),
+ .object => |obj| diags.addParseError(obj.path, "failed to parse object: {s}", .{@errorName(e)}),
+ .archive => |obj| diags.addParseError(obj.path, "failed to parse archive: {s}", .{@errorName(e)}),
+ .res => |res| diags.addParseError(res.path, "failed to parse Windows resource: {s}", .{@errorName(e)}),
+ .dso_exact => diags.addError("failed to handle dso_exact: {s}", .{@errorName(e)}),
+ },
+ };
+ prog_node.completeOne();
+ }
+ },
+ .load_host_libc => if (comp.bin_file) |base| {
+ const prog_node = comp.work_queue_progress_node.start("Linker Parse Host libc", 0);
+ defer prog_node.end();
+
+ const target = comp.root_mod.resolved_target.result;
+ const flags = target_util.libcFullLinkFlags(target);
+ const crt_dir = comp.libc_installation.?.crt_dir.?;
+ const sep = std.fs.path.sep_str;
+ for (flags) |flag| {
+ assert(mem.startsWith(u8, flag, "-l"));
+ const lib_name = flag["-l".len..];
+ switch (comp.config.link_mode) {
+ .dynamic => {
+ const dso_path = Path.initCwd(
+ std.fmt.allocPrint(comp.arena, "{s}" ++ sep ++ "{s}{s}{s}", .{
+ crt_dir, target.libPrefix(), lib_name, target.dynamicLibSuffix(),
+ }) catch return diags.setAllocFailure(),
+ );
+ base.openLoadDso(dso_path, .{
+ .preferred_mode = .dynamic,
+ .search_strategy = .paths_first,
+ }) catch |err| switch (err) {
+ error.FileNotFound => {
+ // Also try static.
+ const archive_path = Path.initCwd(
+ std.fmt.allocPrint(comp.arena, "{s}" ++ sep ++ "{s}{s}{s}", .{
+ crt_dir, target.libPrefix(), lib_name, target.staticLibSuffix(),
+ }) catch return diags.setAllocFailure(),
+ );
+ base.openLoadArchive(archive_path, .{
+ .preferred_mode = .dynamic,
+ .search_strategy = .paths_first,
+ }) catch |archive_err| switch (archive_err) {
+ error.LinkFailure => return, // error reported via diags
+ else => |e| diags.addParseError(dso_path, "failed to parse archive {}: {s}", .{ archive_path, @errorName(e) }),
+ };
+ },
+ error.LinkFailure => return, // error reported via diags
+ else => |e| diags.addParseError(dso_path, "failed to parse shared library: {s}", .{@errorName(e)}),
+ };
+ },
+ .static => {
+ const path = Path.initCwd(
+ std.fmt.allocPrint(comp.arena, "{s}" ++ sep ++ "{s}{s}{s}", .{
+ crt_dir, target.libPrefix(), lib_name, target.staticLibSuffix(),
+ }) catch return diags.setAllocFailure(),
+ );
+ // glibc sometimes makes even archive files GNU ld scripts.
+ base.openLoadArchive(path, .{
+ .preferred_mode = .static,
+ .search_strategy = .no_fallback,
+ }) catch |err| switch (err) {
+ error.LinkFailure => return, // error reported via diags
+ else => |e| diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(e)}),
+ };
+ },
+ }
+ }
+ },
+ .load_object => |path| if (comp.bin_file) |base| {
+ const prog_node = comp.work_queue_progress_node.start("Linker Parse Object", 0);
+ defer prog_node.end();
+ base.openLoadObject(path) catch |err| switch (err) {
+ error.LinkFailure => return, // error reported via diags
+ else => |e| diags.addParseError(path, "failed to parse object: {s}", .{@errorName(e)}),
+ };
+ },
+ .load_archive => |path| if (comp.bin_file) |base| {
+ const prog_node = comp.work_queue_progress_node.start("Linker Parse Archive", 0);
+ defer prog_node.end();
+ base.openLoadArchive(path, null) catch |err| switch (err) {
+ error.LinkFailure => return, // error reported via link_diags
+ else => |e| diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(e)}),
+ };
+ },
+ .load_dso => |path| if (comp.bin_file) |base| {
+ const prog_node = comp.work_queue_progress_node.start("Linker Parse Shared Library", 0);
+ defer prog_node.end();
+ base.openLoadDso(path, .{
+ .preferred_mode = .dynamic,
+ .search_strategy = .paths_first,
+ }) catch |err| switch (err) {
+ error.LinkFailure => return, // error reported via link_diags
+ else => |e| diags.addParseError(path, "failed to parse shared library: {s}", .{@errorName(e)}),
+ };
+ },
+ .load_input => |input| if (comp.bin_file) |base| {
+ const prog_node = comp.work_queue_progress_node.start("Linker Parse Input", 0);
+ defer prog_node.end();
+ base.loadInput(input) catch |err| switch (err) {
+ error.LinkFailure => return, // error reported via link_diags
+ else => |e| {
+ if (input.path()) |path| {
+ diags.addParseError(path, "failed to parse linker input: {s}", .{@errorName(e)});
+ } else {
+ diags.addError("failed to {s}: {s}", .{ input.taskName(), @errorName(e) });
+ }
+ },
+ };
+ },
+ .codegen_nav => |nav_index| {
+ const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = @enumFromInt(tid) };
+ pt.linkerUpdateNav(nav_index) catch |err| switch (err) {
+ error.OutOfMemory => diags.setAllocFailure(),
+ };
+ },
+ .codegen_func => |func| {
+ const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = @enumFromInt(tid) };
+ // This call takes ownership of `func.air`.
+ pt.linkerUpdateFunc(func.func, func.air) catch |err| switch (err) {
+ error.OutOfMemory => diags.setAllocFailure(),
+ };
+ },
+ .codegen_type => |ty| {
+ const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = @enumFromInt(tid) };
+ pt.linkerUpdateContainerType(ty) catch |err| switch (err) {
+ error.OutOfMemory => diags.setAllocFailure(),
+ };
+ },
+ }
+}
+
pub fn spawnLld(
comp: *Compilation,
arena: Allocator,
@@ -1387,3 +1660,686 @@ pub fn spawnLld(
if (stderr.len > 0) log.warn("unexpected LLD stderr:\n{s}", .{stderr});
}
+
+/// Provided by the CLI, processed into `LinkInput` instances at the start of
+/// the compilation pipeline.
+pub const UnresolvedInput = union(enum) {
+ /// A library name that could potentially be dynamic or static depending on
+ /// query parameters, resolved according to library directories.
+ /// This could potentially resolve to a GNU ld script, resulting in more
+ /// library dependencies.
+ name_query: NameQuery,
+ /// When a file path is provided, query info is still needed because the
+ /// path may point to a .so file which may actually be a GNU ld script that
+ /// references library names which need to be resolved.
+ path_query: PathQuery,
+ /// Strings that come from GNU ld scripts. Is it a filename? Is it a path?
+ /// Who knows! Fuck around and find out.
+ ambiguous_name: NameQuery,
+ /// Put exactly this string in the dynamic section, no rpath.
+ dso_exact: Input.DsoExact,
+
+ pub const NameQuery = struct {
+ name: []const u8,
+ query: Query,
+ };
+
+ pub const PathQuery = struct {
+ path: Path,
+ query: Query,
+ };
+
+ pub const Query = struct {
+ needed: bool = false,
+ weak: bool = false,
+ reexport: bool = false,
+ must_link: bool = false,
+ hidden: bool = false,
+ allow_so_scripts: bool = false,
+ preferred_mode: std.builtin.LinkMode,
+ search_strategy: SearchStrategy,
+
+ fn fallbackMode(q: Query) std.builtin.LinkMode {
+ assert(q.search_strategy != .no_fallback);
+ return switch (q.preferred_mode) {
+ .dynamic => .static,
+ .static => .dynamic,
+ };
+ }
+ };
+
+ pub const SearchStrategy = enum {
+ paths_first,
+ mode_first,
+ no_fallback,
+ };
+};
+
+pub const Input = union(enum) {
+ object: Object,
+ archive: Object,
+ res: Res,
+ /// May not be a GNU ld script. Those are resolved when converting from
+ /// `UnresolvedInput` to `Input` values.
+ dso: Dso,
+ dso_exact: DsoExact,
+
+ pub const Object = struct {
+ path: Path,
+ file: fs.File,
+ must_link: bool,
+ hidden: bool,
+ };
+
+ pub const Res = struct {
+ path: Path,
+ file: fs.File,
+ };
+
+ pub const Dso = struct {
+ path: Path,
+ file: fs.File,
+ needed: bool,
+ weak: bool,
+ reexport: bool,
+ };
+
+ pub const DsoExact = struct {
+ /// Includes the ":" prefix. This is intended to be put into the DSO
+ /// section verbatim with no corresponding rpaths.
+ name: []const u8,
+ };
+
+ /// Returns `null` in the case of `dso_exact`.
+ pub fn path(input: Input) ?Path {
+ return switch (input) {
+ .object, .archive => |obj| obj.path,
+ inline .res, .dso => |x| x.path,
+ .dso_exact => null,
+ };
+ }
+
+ /// Returns `null` in the case of `dso_exact`.
+ pub fn pathAndFile(input: Input) ?struct { Path, fs.File } {
+ return switch (input) {
+ .object, .archive => |obj| .{ obj.path, obj.file },
+ inline .res, .dso => |x| .{ x.path, x.file },
+ .dso_exact => null,
+ };
+ }
+
+ pub fn taskName(input: Input) []const u8 {
+ return switch (input) {
+ .object, .archive => |obj| obj.path.basename(),
+ inline .res, .dso => |x| x.path.basename(),
+ .dso_exact => "dso_exact",
+ };
+ }
+};
+
+pub fn hashInputs(man: *Cache.Manifest, link_inputs: []const Input) !void {
+ for (link_inputs) |link_input| {
+ man.hash.add(@as(@typeInfo(Input).@"union".tag_type.?, link_input));
+ switch (link_input) {
+ .object, .archive => |obj| {
+ _ = try man.addOpenedFile(obj.path, obj.file, null);
+ man.hash.add(obj.must_link);
+ man.hash.add(obj.hidden);
+ },
+ .res => |res| {
+ _ = try man.addOpenedFile(res.path, res.file, null);
+ },
+ .dso => |dso| {
+ _ = try man.addOpenedFile(dso.path, dso.file, null);
+ man.hash.add(dso.needed);
+ man.hash.add(dso.weak);
+ man.hash.add(dso.reexport);
+ },
+ .dso_exact => |dso_exact| {
+ man.hash.addBytes(dso_exact.name);
+ },
+ }
+ }
+}
+
+pub fn resolveInputs(
+ gpa: Allocator,
+ arena: Allocator,
+ target: std.Target,
+ /// This function mutates this array but does not take ownership.
+ /// Allocated with `gpa`.
+ unresolved_inputs: *std.ArrayListUnmanaged(UnresolvedInput),
+ /// Allocated with `gpa`.
+ resolved_inputs: *std.ArrayListUnmanaged(Input),
+ lib_directories: []const Cache.Directory,
+ color: std.zig.Color,
+) Allocator.Error!void {
+ var checked_paths: std.ArrayListUnmanaged(u8) = .empty;
+ defer checked_paths.deinit(gpa);
+
+ var ld_script_bytes: std.ArrayListUnmanaged(u8) = .empty;
+ defer ld_script_bytes.deinit(gpa);
+
+ var failed_libs: std.ArrayListUnmanaged(struct {
+ name: []const u8,
+ strategy: UnresolvedInput.SearchStrategy,
+ checked_paths: []const u8,
+ preferred_mode: std.builtin.LinkMode,
+ }) = .empty;
+
+ // Convert external system libs into a stack so that items can be
+ // pushed to it.
+ //
+ // This is necessary because shared objects might turn out to be
+ // "linker scripts" that in fact resolve to one or more other
+ // external system libs, including parameters such as "needed".
+ //
+ // Unfortunately, such files need to be detected immediately, so
+ // that this library search logic can be applied to them.
+ mem.reverse(UnresolvedInput, unresolved_inputs.items);
+
+ syslib: while (unresolved_inputs.popOrNull()) |unresolved_input| {
+ const name_query: UnresolvedInput.NameQuery = switch (unresolved_input) {
+ .name_query => |nq| nq,
+ .ambiguous_name => |an| an: {
+ const lib_name, const link_mode = stripLibPrefixAndSuffix(an.name, target) orelse {
+ try resolvePathInput(gpa, arena, unresolved_inputs, resolved_inputs, &ld_script_bytes, target, .{
+ .path = Path.initCwd(an.name),
+ .query = an.query,
+ }, color);
+ continue;
+ };
+ break :an .{
+ .name = lib_name,
+ .query = .{
+ .needed = an.query.needed,
+ .weak = an.query.weak,
+ .reexport = an.query.reexport,
+ .must_link = an.query.must_link,
+ .hidden = an.query.hidden,
+ .allow_so_scripts = an.query.allow_so_scripts,
+ .preferred_mode = link_mode,
+ .search_strategy = .no_fallback,
+ },
+ };
+ },
+ .path_query => |pq| {
+ try resolvePathInput(gpa, arena, unresolved_inputs, resolved_inputs, &ld_script_bytes, target, pq, color);
+ continue;
+ },
+ .dso_exact => |dso_exact| {
+ try resolved_inputs.append(gpa, .{ .dso_exact = dso_exact });
+ continue;
+ },
+ };
+ const query = name_query.query;
+
+ // Checked in the first pass above while looking for libc libraries.
+ assert(!fs.path.isAbsolute(name_query.name));
+
+ checked_paths.clearRetainingCapacity();
+
+ switch (query.search_strategy) {
+ .mode_first, .no_fallback => {
+ // check for preferred mode
+ for (lib_directories) |lib_directory| switch (try resolveLibInput(
+ gpa,
+ arena,
+ unresolved_inputs,
+ resolved_inputs,
+ &checked_paths,
+ &ld_script_bytes,
+ lib_directory,
+ name_query,
+ target,
+ query.preferred_mode,
+ color,
+ )) {
+ .ok => continue :syslib,
+ .no_match => {},
+ };
+ // check for fallback mode
+ if (query.search_strategy == .no_fallback) {
+ try failed_libs.append(arena, .{
+ .name = name_query.name,
+ .strategy = query.search_strategy,
+ .checked_paths = try arena.dupe(u8, checked_paths.items),
+ .preferred_mode = query.preferred_mode,
+ });
+ continue :syslib;
+ }
+ for (lib_directories) |lib_directory| switch (try resolveLibInput(
+ gpa,
+ arena,
+ unresolved_inputs,
+ resolved_inputs,
+ &checked_paths,
+ &ld_script_bytes,
+ lib_directory,
+ name_query,
+ target,
+ query.fallbackMode(),
+ color,
+ )) {
+ .ok => continue :syslib,
+ .no_match => {},
+ };
+ try failed_libs.append(arena, .{
+ .name = name_query.name,
+ .strategy = query.search_strategy,
+ .checked_paths = try arena.dupe(u8, checked_paths.items),
+ .preferred_mode = query.preferred_mode,
+ });
+ continue :syslib;
+ },
+ .paths_first => {
+ for (lib_directories) |lib_directory| {
+ // check for preferred mode
+ switch (try resolveLibInput(
+ gpa,
+ arena,
+ unresolved_inputs,
+ resolved_inputs,
+ &checked_paths,
+ &ld_script_bytes,
+ lib_directory,
+ name_query,
+ target,
+ query.preferred_mode,
+ color,
+ )) {
+ .ok => continue :syslib,
+ .no_match => {},
+ }
+
+ // check for fallback mode
+ switch (try resolveLibInput(
+ gpa,
+ arena,
+ unresolved_inputs,
+ resolved_inputs,
+ &checked_paths,
+ &ld_script_bytes,
+ lib_directory,
+ name_query,
+ target,
+ query.fallbackMode(),
+ color,
+ )) {
+ .ok => continue :syslib,
+ .no_match => {},
+ }
+ }
+ try failed_libs.append(arena, .{
+ .name = name_query.name,
+ .strategy = query.search_strategy,
+ .checked_paths = try arena.dupe(u8, checked_paths.items),
+ .preferred_mode = query.preferred_mode,
+ });
+ continue :syslib;
+ },
+ }
+ @compileError("unreachable");
+ }
+
+ if (failed_libs.items.len > 0) {
+ for (failed_libs.items) |f| {
+ const searched_paths = if (f.checked_paths.len == 0) " none" else f.checked_paths;
+ std.log.err("unable to find {s} system library '{s}' using strategy '{s}'. searched paths:{s}", .{
+ @tagName(f.preferred_mode), f.name, @tagName(f.strategy), searched_paths,
+ });
+ }
+ std.process.exit(1);
+ }
+}
+
+const ResolveLibInputResult = enum { ok, no_match };
+const fatal = std.process.fatal;
+
+fn resolveLibInput(
+ gpa: Allocator,
+ arena: Allocator,
+ /// Allocated via `gpa`.
+ unresolved_inputs: *std.ArrayListUnmanaged(UnresolvedInput),
+ /// Allocated via `gpa`.
+ resolved_inputs: *std.ArrayListUnmanaged(Input),
+ /// Allocated via `gpa`.
+ checked_paths: *std.ArrayListUnmanaged(u8),
+ /// Allocated via `gpa`.
+ ld_script_bytes: *std.ArrayListUnmanaged(u8),
+ lib_directory: Directory,
+ name_query: UnresolvedInput.NameQuery,
+ target: std.Target,
+ link_mode: std.builtin.LinkMode,
+ color: std.zig.Color,
+) Allocator.Error!ResolveLibInputResult {
+ try resolved_inputs.ensureUnusedCapacity(gpa, 1);
+
+ const lib_name = name_query.name;
+
+ if (target.isDarwin() and link_mode == .dynamic) tbd: {
+ // Prefer .tbd over .dylib.
+ const test_path: Path = .{
+ .root_dir = lib_directory,
+ .sub_path = try std.fmt.allocPrint(arena, "lib{s}.tbd", .{lib_name}),
+ };
+ try checked_paths.writer(gpa).print("\n {}", .{test_path});
+ var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
+ error.FileNotFound => break :tbd,
+ else => |e| fatal("unable to search for tbd library '{}': {s}", .{ test_path, @errorName(e) }),
+ };
+ errdefer file.close();
+ return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, name_query.query);
+ }
+
+ {
+ const test_path: Path = .{
+ .root_dir = lib_directory,
+ .sub_path = try std.fmt.allocPrint(arena, "{s}{s}{s}", .{
+ target.libPrefix(), lib_name, switch (link_mode) {
+ .static => target.staticLibSuffix(),
+ .dynamic => target.dynamicLibSuffix(),
+ },
+ }),
+ };
+ try checked_paths.writer(gpa).print("\n {}", .{test_path});
+ switch (try resolvePathInputLib(gpa, arena, unresolved_inputs, resolved_inputs, ld_script_bytes, target, .{
+ .path = test_path,
+ .query = name_query.query,
+ }, link_mode, color)) {
+ .no_match => {},
+ .ok => return .ok,
+ }
+ }
+
+ // In the case of Darwin, the main check will be .dylib, so here we
+ // additionally check for .so files.
+ if (target.isDarwin() and link_mode == .dynamic) so: {
+ const test_path: Path = .{
+ .root_dir = lib_directory,
+ .sub_path = try std.fmt.allocPrint(arena, "lib{s}.so", .{lib_name}),
+ };
+ try checked_paths.writer(gpa).print("\n {}", .{test_path});
+ var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
+ error.FileNotFound => break :so,
+ else => |e| fatal("unable to search for so library '{}': {s}", .{
+ test_path, @errorName(e),
+ }),
+ };
+ errdefer file.close();
+ return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, name_query.query);
+ }
+
+ // In the case of MinGW, the main check will be .lib but we also need to
+ // look for `libfoo.a`.
+ if (target.isMinGW() and link_mode == .static) mingw: {
+ const test_path: Path = .{
+ .root_dir = lib_directory,
+ .sub_path = try std.fmt.allocPrint(arena, "lib{s}.a", .{lib_name}),
+ };
+ try checked_paths.writer(gpa).print("\n {}", .{test_path});
+ var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
+ error.FileNotFound => break :mingw,
+ else => |e| fatal("unable to search for static library '{}': {s}", .{ test_path, @errorName(e) }),
+ };
+ errdefer file.close();
+ return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, name_query.query);
+ }
+
+ return .no_match;
+}
+
+fn finishResolveLibInput(
+ resolved_inputs: *std.ArrayListUnmanaged(Input),
+ path: Path,
+ file: std.fs.File,
+ link_mode: std.builtin.LinkMode,
+ query: UnresolvedInput.Query,
+) ResolveLibInputResult {
+ switch (link_mode) {
+ .static => resolved_inputs.appendAssumeCapacity(.{ .archive = .{
+ .path = path,
+ .file = file,
+ .must_link = query.must_link,
+ .hidden = query.hidden,
+ } }),
+ .dynamic => resolved_inputs.appendAssumeCapacity(.{ .dso = .{
+ .path = path,
+ .file = file,
+ .needed = query.needed,
+ .weak = query.weak,
+ .reexport = query.reexport,
+ } }),
+ }
+ return .ok;
+}
+
+fn resolvePathInput(
+ gpa: Allocator,
+ arena: Allocator,
+ /// Allocated with `gpa`.
+ unresolved_inputs: *std.ArrayListUnmanaged(UnresolvedInput),
+ /// Allocated with `gpa`.
+ resolved_inputs: *std.ArrayListUnmanaged(Input),
+ /// Allocated via `gpa`.
+ ld_script_bytes: *std.ArrayListUnmanaged(u8),
+ target: std.Target,
+ pq: UnresolvedInput.PathQuery,
+ color: std.zig.Color,
+) Allocator.Error!void {
+ switch (switch (Compilation.classifyFileExt(pq.path.sub_path)) {
+ .static_library => try resolvePathInputLib(gpa, arena, unresolved_inputs, resolved_inputs, ld_script_bytes, target, pq, .static, color),
+ .shared_library => try resolvePathInputLib(gpa, arena, unresolved_inputs, resolved_inputs, ld_script_bytes, target, pq, .dynamic, color),
+ .object => {
+ var file = pq.path.root_dir.handle.openFile(pq.path.sub_path, .{}) catch |err|
+ fatal("failed to open object {}: {s}", .{ pq.path, @errorName(err) });
+ errdefer file.close();
+ try resolved_inputs.append(gpa, .{ .object = .{
+ .path = pq.path,
+ .file = file,
+ .must_link = pq.query.must_link,
+ .hidden = pq.query.hidden,
+ } });
+ return;
+ },
+ .res => {
+ var file = pq.path.root_dir.handle.openFile(pq.path.sub_path, .{}) catch |err|
+ fatal("failed to open windows resource {}: {s}", .{ pq.path, @errorName(err) });
+ errdefer file.close();
+ try resolved_inputs.append(gpa, .{ .res = .{
+ .path = pq.path,
+ .file = file,
+ } });
+ return;
+ },
+ else => fatal("{}: unrecognized file extension", .{pq.path}),
+ }) {
+ .ok => {},
+ .no_match => fatal("{}: file not found", .{pq.path}),
+ }
+}
+
+fn resolvePathInputLib(
+ gpa: Allocator,
+ arena: Allocator,
+ /// Allocated with `gpa`.
+ unresolved_inputs: *std.ArrayListUnmanaged(UnresolvedInput),
+ /// Allocated with `gpa`.
+ resolved_inputs: *std.ArrayListUnmanaged(Input),
+ /// Allocated via `gpa`.
+ ld_script_bytes: *std.ArrayListUnmanaged(u8),
+ target: std.Target,
+ pq: UnresolvedInput.PathQuery,
+ link_mode: std.builtin.LinkMode,
+ color: std.zig.Color,
+) Allocator.Error!ResolveLibInputResult {
+ try resolved_inputs.ensureUnusedCapacity(gpa, 1);
+
+ const test_path: Path = pq.path;
+ // In the case of .so files, they might actually be "linker scripts"
+ // that contain references to other libraries.
+ if (pq.query.allow_so_scripts and target.ofmt == .elf and mem.endsWith(u8, test_path.sub_path, ".so")) {
+ var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
+ error.FileNotFound => return .no_match,
+ else => |e| fatal("unable to search for {s} library '{'}': {s}", .{
+ @tagName(link_mode), test_path, @errorName(e),
+ }),
+ };
+ errdefer file.close();
+ try ld_script_bytes.resize(gpa, @sizeOf(std.elf.Elf64_Ehdr));
+ const n = file.preadAll(ld_script_bytes.items, 0) catch |err| fatal("failed to read '{'}': {s}", .{
+ test_path, @errorName(err),
+ });
+ elf_file: {
+ if (n != ld_script_bytes.items.len) break :elf_file;
+ if (!mem.eql(u8, ld_script_bytes.items[0..4], "\x7fELF")) break :elf_file;
+ // Appears to be an ELF file.
+ return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, pq.query);
+ }
+ const stat = file.stat() catch |err|
+ fatal("failed to stat {}: {s}", .{ test_path, @errorName(err) });
+ const size = std.math.cast(u32, stat.size) orelse
+ fatal("{}: linker script too big", .{test_path});
+ try ld_script_bytes.resize(gpa, size);
+ const buf = ld_script_bytes.items[n..];
+ const n2 = file.preadAll(buf, n) catch |err|
+ fatal("failed to read {}: {s}", .{ test_path, @errorName(err) });
+ if (n2 != buf.len) fatal("failed to read {}: unexpected end of file", .{test_path});
+ var diags = Diags.init(gpa);
+ defer diags.deinit();
+ const ld_script_result = LdScript.parse(gpa, &diags, test_path, ld_script_bytes.items);
+ if (diags.hasErrors()) {
+ var wip_errors: std.zig.ErrorBundle.Wip = undefined;
+ try wip_errors.init(gpa);
+ defer wip_errors.deinit();
+
+ try diags.addMessagesToBundle(&wip_errors);
+
+ var error_bundle = try wip_errors.toOwnedBundle("");
+ defer error_bundle.deinit(gpa);
+
+ error_bundle.renderToStdErr(color.renderOptions());
+
+ std.process.exit(1);
+ }
+
+ var ld_script = ld_script_result catch |err|
+ fatal("{}: failed to parse linker script: {s}", .{ test_path, @errorName(err) });
+ defer ld_script.deinit(gpa);
+
+ try unresolved_inputs.ensureUnusedCapacity(gpa, ld_script.args.len);
+ for (ld_script.args) |arg| {
+ const query: UnresolvedInput.Query = .{
+ .needed = arg.needed or pq.query.needed,
+ .weak = pq.query.weak,
+ .reexport = pq.query.reexport,
+ .preferred_mode = pq.query.preferred_mode,
+ .search_strategy = pq.query.search_strategy,
+ .allow_so_scripts = pq.query.allow_so_scripts,
+ };
+ if (mem.startsWith(u8, arg.path, "-l")) {
+ unresolved_inputs.appendAssumeCapacity(.{ .name_query = .{
+ .name = try arena.dupe(u8, arg.path["-l".len..]),
+ .query = query,
+ } });
+ } else {
+ unresolved_inputs.appendAssumeCapacity(.{ .ambiguous_name = .{
+ .name = try arena.dupe(u8, arg.path),
+ .query = query,
+ } });
+ }
+ }
+ file.close();
+ return .ok;
+ }
+
+ var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
+ error.FileNotFound => return .no_match,
+ else => |e| fatal("unable to search for {s} library {}: {s}", .{
+ @tagName(link_mode), test_path, @errorName(e),
+ }),
+ };
+ errdefer file.close();
+ return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, pq.query);
+}
+
+pub fn openObject(path: Path, must_link: bool, hidden: bool) !Input.Object {
+ var file = try path.root_dir.handle.openFile(path.sub_path, .{});
+ errdefer file.close();
+ return .{
+ .path = path,
+ .file = file,
+ .must_link = must_link,
+ .hidden = hidden,
+ };
+}
+
+pub fn openDso(path: Path, needed: bool, weak: bool, reexport: bool) !Input.Dso {
+ var file = try path.root_dir.handle.openFile(path.sub_path, .{});
+ errdefer file.close();
+ return .{
+ .path = path,
+ .file = file,
+ .needed = needed,
+ .weak = weak,
+ .reexport = reexport,
+ };
+}
+
+pub fn openObjectInput(diags: *Diags, path: Path) error{LinkFailure}!Input {
+ return .{ .object = openObject(path, false, false) catch |err| {
+ return diags.failParse(path, "failed to open {}: {s}", .{ path, @errorName(err) });
+ } };
+}
+
+pub fn openArchiveInput(diags: *Diags, path: Path, must_link: bool, hidden: bool) error{LinkFailure}!Input {
+ return .{ .archive = openObject(path, must_link, hidden) catch |err| {
+ return diags.failParse(path, "failed to open {}: {s}", .{ path, @errorName(err) });
+ } };
+}
+
+pub fn openDsoInput(diags: *Diags, path: Path, needed: bool, weak: bool, reexport: bool) error{LinkFailure}!Input {
+ return .{ .dso = openDso(path, needed, weak, reexport) catch |err| {
+ return diags.failParse(path, "failed to open {}: {s}", .{ path, @errorName(err) });
+ } };
+}
+
+fn stripLibPrefixAndSuffix(path: []const u8, target: std.Target) ?struct { []const u8, std.builtin.LinkMode } {
+ const prefix = target.libPrefix();
+ const static_suffix = target.staticLibSuffix();
+ const dynamic_suffix = target.dynamicLibSuffix();
+ const basename = fs.path.basename(path);
+ const unlibbed = if (mem.startsWith(u8, basename, prefix)) basename[prefix.len..] else return null;
+ if (mem.endsWith(u8, unlibbed, static_suffix)) return .{
+ unlibbed[0 .. unlibbed.len - static_suffix.len], .static,
+ };
+ if (mem.endsWith(u8, unlibbed, dynamic_suffix)) return .{
+ unlibbed[0 .. unlibbed.len - dynamic_suffix.len], .dynamic,
+ };
+ return null;
+}
+
+/// Returns true if and only if there is at least one input of type object,
+/// archive, or Windows resource file.
+pub fn anyObjectInputs(inputs: []const Input) bool {
+ return countObjectInputs(inputs) != 0;
+}
+
+/// Returns the number of inputs of type object, archive, or Windows resource file.
+pub fn countObjectInputs(inputs: []const Input) usize {
+ var count: usize = 0;
+ for (inputs) |input| switch (input) {
+ .dso, .dso_exact => continue,
+ .res, .object, .archive => count += 1,
+ };
+ return count;
+}
+
+/// Returns the first input of type object or archive.
+pub fn firstObjectInput(inputs: []const Input) ?Input.Object {
+ for (inputs) |input| switch (input) {
+ .object, .archive => |obj| return obj,
+ .res, .dso, .dso_exact => continue,
+ };
+ return null;
+}
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index 7d01e64d78..aa75432bc1 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -16,7 +16,7 @@ dynamicbase: bool,
/// default or populated together. They should not be separate fields.
major_subsystem_version: u16,
minor_subsystem_version: u16,
-lib_dirs: []const []const u8,
+lib_directories: []const Directory,
entry: link.File.OpenOptions.Entry,
entry_addr: ?u32,
module_definition_file: ?[]const u8,
@@ -297,7 +297,7 @@ pub fn createEmpty(
.dynamicbase = options.dynamicbase,
.major_subsystem_version = options.major_subsystem_version orelse 6,
.minor_subsystem_version = options.minor_subsystem_version orelse 0,
- .lib_dirs = options.lib_dirs,
+ .lib_directories = options.lib_directories,
.entry_addr = math.cast(u32, options.entry_addr orelse 0) orelse
return error.EntryAddressTooBig,
.module_definition_file = options.module_definition_file,
@@ -2727,6 +2727,7 @@ const mem = std.mem;
const Allocator = std.mem.Allocator;
const Path = std.Build.Cache.Path;
+const Directory = std.Build.Cache.Directory;
const codegen = @import("../codegen.zig");
const link = @import("../link.zig");
diff --git a/src/link/Coff/lld.zig b/src/link/Coff/lld.zig
index cf09174e88..de9ec66177 100644
--- a/src/link/Coff/lld.zig
+++ b/src/link/Coff/lld.zig
@@ -8,6 +8,7 @@ const log = std.log.scoped(.link);
const mem = std.mem;
const Cache = std.Build.Cache;
const Path = std.Build.Cache.Path;
+const Directory = std.Build.Cache.Directory;
const mingw = @import("../../mingw.zig");
const link = @import("../../link.zig");
@@ -74,10 +75,7 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
comptime assert(Compilation.link_hash_implementation_version == 14);
- for (comp.objects) |obj| {
- _ = try man.addFilePath(obj.path, null);
- man.hash.add(obj.must_link);
- }
+ try link.hashInputs(&man, comp.link_inputs);
for (comp.c_object_table.keys()) |key| {
_ = try man.addFilePath(key.status.success.object_path, null);
}
@@ -88,7 +86,10 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
man.hash.addOptionalBytes(entry_name);
man.hash.add(self.base.stack_size);
man.hash.add(self.image_base);
- man.hash.addListOfBytes(self.lib_dirs);
+ {
+ // TODO remove this, libraries must instead be resolved by the frontend.
+ for (self.lib_directories) |lib_directory| man.hash.addOptionalBytes(lib_directory.path);
+ }
man.hash.add(comp.skip_linker_dependencies);
if (comp.config.link_libc) {
man.hash.add(comp.libc_installation != null);
@@ -100,7 +101,7 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
}
}
}
- try link.hashAddSystemLibs(&man, comp.system_libs);
+ man.hash.addListOfBytes(comp.windows_libs.keys());
man.hash.addListOfBytes(comp.force_undefined_symbols.keys());
man.hash.addOptional(self.subsystem);
man.hash.add(comp.config.is_test);
@@ -148,8 +149,7 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
// here. TODO: think carefully about how we can avoid this redundant operation when doing
// build-obj. See also the corresponding TODO in linkAsArchive.
const the_object_path = blk: {
- if (comp.objects.len != 0)
- break :blk comp.objects[0].path;
+ if (link.firstObjectInput(comp.link_inputs)) |obj| break :blk obj.path;
if (comp.c_object_table.count() != 0)
break :blk comp.c_object_table.keys()[0].status.success.object_path;
@@ -266,18 +266,24 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
}
}
- for (self.lib_dirs) |lib_dir| {
- try argv.append(try allocPrint(arena, "-LIBPATH:{s}", .{lib_dir}));
+ for (self.lib_directories) |lib_directory| {
+ try argv.append(try allocPrint(arena, "-LIBPATH:{s}", .{lib_directory.path orelse "."}));
}
- try argv.ensureUnusedCapacity(comp.objects.len);
- for (comp.objects) |obj| {
- if (obj.must_link) {
- argv.appendAssumeCapacity(try allocPrint(arena, "-WHOLEARCHIVE:{}", .{@as(Path, obj.path)}));
- } else {
- argv.appendAssumeCapacity(try obj.path.toString(arena));
- }
- }
+ try argv.ensureUnusedCapacity(comp.link_inputs.len);
+ for (comp.link_inputs) |link_input| switch (link_input) {
+ .dso_exact => unreachable, // not applicable to PE/COFF
+ inline .dso, .res => |x| {
+ argv.appendAssumeCapacity(try x.path.toString(arena));
+ },
+ .object, .archive => |obj| {
+ if (obj.must_link) {
+ argv.appendAssumeCapacity(try allocPrint(arena, "-WHOLEARCHIVE:{}", .{@as(Path, obj.path)}));
+ } else {
+ argv.appendAssumeCapacity(try obj.path.toString(arena));
+ }
+ },
+ };
for (comp.c_object_table.keys()) |key| {
try argv.append(try key.status.success.object_path.toString(arena));
@@ -484,20 +490,20 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
if (comp.compiler_rt_lib) |lib| try argv.append(try lib.full_object_path.toString(arena));
}
- try argv.ensureUnusedCapacity(comp.system_libs.count());
- for (comp.system_libs.keys()) |key| {
+ try argv.ensureUnusedCapacity(comp.windows_libs.count());
+ for (comp.windows_libs.keys()) |key| {
const lib_basename = try allocPrint(arena, "{s}.lib", .{key});
if (comp.crt_files.get(lib_basename)) |crt_file| {
argv.appendAssumeCapacity(try crt_file.full_object_path.toString(arena));
continue;
}
- if (try findLib(arena, lib_basename, self.lib_dirs)) |full_path| {
+ if (try findLib(arena, lib_basename, self.lib_directories)) |full_path| {
argv.appendAssumeCapacity(full_path);
continue;
}
if (target.abi.isGnu()) {
const fallback_name = try allocPrint(arena, "lib{s}.dll.a", .{key});
- if (try findLib(arena, fallback_name, self.lib_dirs)) |full_path| {
+ if (try findLib(arena, fallback_name, self.lib_directories)) |full_path| {
argv.appendAssumeCapacity(full_path);
continue;
}
@@ -530,14 +536,13 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
}
}
-fn findLib(arena: Allocator, name: []const u8, lib_dirs: []const []const u8) !?[]const u8 {
- for (lib_dirs) |lib_dir| {
- const full_path = try fs.path.join(arena, &.{ lib_dir, name });
- fs.cwd().access(full_path, .{}) catch |err| switch (err) {
+fn findLib(arena: Allocator, name: []const u8, lib_directories: []const Directory) !?[]const u8 {
+ for (lib_directories) |lib_directory| {
+ lib_directory.handle.access(name, .{}) catch |err| switch (err) {
error.FileNotFound => continue,
else => |e| return e,
};
- return full_path;
+ return try lib_directory.join(arena, &.{name});
}
return null;
}
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index dfb4a614fa..d02d98800d 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -1,6 +1,7 @@
pub const Atom = @import("Elf/Atom.zig");
base: link.File,
+zig_object: ?*ZigObject,
rpath_table: std.StringArrayHashMapUnmanaged(void),
image_base: u64,
emit_relocs: bool,
@@ -15,7 +16,6 @@ z_relro: bool,
z_common_page_size: ?u64,
/// TODO make this non optional and resolve the default in open()
z_max_page_size: ?u64,
-lib_dirs: []const []const u8,
hash_style: HashStyle,
compress_debug_sections: CompressDebugSections,
symbol_wrap_set: std.StringArrayHashMapUnmanaged(void),
@@ -36,8 +36,7 @@ ptr_width: PtrWidth,
llvm_object: ?LlvmObject.Ptr = null,
/// A list of all input files.
-/// Index of each input file also encodes the priority or precedence of one input file
-/// over another.
+/// First index is a special "null file". Order is otherwise not observed.
files: std.MultiArrayList(File.Entry) = .{},
/// Long-lived list of all file descriptors.
/// We store them globally rather than per actual File so that we can re-use
@@ -116,6 +115,10 @@ comment_merge_section_index: ?Merge.Section.Index = null,
first_eflags: ?elf.Word = null,
+/// `--verbose-link` output.
+/// Initialized on creation, appended to as inputs are added, printed during `flush`.
+dump_argv_list: std.ArrayListUnmanaged([]const u8),
+
const SectionIndexes = struct {
copy_rel: ?u32 = null,
dynamic: ?u32 = null,
@@ -297,6 +300,7 @@ pub fn createEmpty(
.disable_lld_caching = options.disable_lld_caching,
.build_id = options.build_id,
},
+ .zig_object = null,
.rpath_table = rpath_table,
.ptr_width = ptr_width,
.page_size = page_size,
@@ -328,7 +332,6 @@ pub fn createEmpty(
.z_relro = options.z_relro,
.z_common_page_size = options.z_common_page_size,
.z_max_page_size = options.z_max_page_size,
- .lib_dirs = options.lib_dirs,
.hash_style = options.hash_style,
.compress_debug_sections = options.compress_debug_sections,
.symbol_wrap_set = options.symbol_wrap_set,
@@ -341,6 +344,7 @@ pub fn createEmpty(
.enable_new_dtags = options.enable_new_dtags,
.print_icf_sections = options.print_icf_sections,
.print_map = options.print_map,
+ .dump_argv_list = .empty,
};
if (use_llvm and comp.config.have_zcu) {
self.llvm_object = try LlvmObject.create(arena, comp);
@@ -352,6 +356,9 @@ pub fn createEmpty(
return self;
}
+ // --verbose-link
+ if (comp.verbose_link) try dumpArgvInit(self, arena);
+
const is_obj = output_mode == .Obj;
const is_obj_or_ar = is_obj or (output_mode == .Lib and link_mode == .static);
@@ -418,14 +425,17 @@ pub fn createEmpty(
if (opt_zcu) |zcu| {
if (!use_llvm) {
const index: File.Index = @intCast(try self.files.addOne(gpa));
- self.files.set(index, .{ .zig_object = .{
+ self.files.set(index, .zig_object);
+ self.zig_object_index = index;
+ const zig_object = try arena.create(ZigObject);
+ self.zig_object = zig_object;
+ zig_object.* = .{
.index = index,
.basename = try std.fmt.allocPrint(arena, "{s}.o", .{
fs.path.stem(zcu.main_mod.root_src_path),
}),
- } });
- self.zig_object_index = index;
- try self.zigObjectPtr().?.init(self, .{
+ };
+ try zig_object.init(self, .{
.symbol_count_hint = options.symbol_count_hint,
.program_code_size_hint = options.program_code_size_hint,
});
@@ -457,12 +467,14 @@ pub fn deinit(self: *Elf) void {
self.file_handles.deinit(gpa);
for (self.files.items(.tags), self.files.items(.data)) |tag, *data| switch (tag) {
- .null => {},
- .zig_object => data.zig_object.deinit(gpa),
+ .null, .zig_object => {},
.linker_defined => data.linker_defined.deinit(gpa),
.object => data.object.deinit(gpa),
.shared_object => data.shared_object.deinit(gpa),
};
+ if (self.zig_object) |zig_object| {
+ zig_object.deinit(gpa);
+ }
self.files.deinit(gpa);
self.objects.deinit(gpa);
self.shared_objects.deinit(gpa);
@@ -501,6 +513,7 @@ pub fn deinit(self: *Elf) void {
self.rela_dyn.deinit(gpa);
self.rela_plt.deinit(gpa);
self.comdat_group_sections.deinit(gpa);
+ self.dump_argv_list.deinit(gpa);
}
pub fn getNavVAddr(self: *Elf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index, reloc_info: link.File.RelocInfo) !u64 {
@@ -752,6 +765,37 @@ pub fn allocateChunk(self: *Elf, args: struct {
return res;
}
+pub fn loadInput(self: *Elf, input: link.Input) !void {
+ const comp = self.base.comp;
+ const gpa = comp.gpa;
+ const diags = &comp.link_diags;
+ const target = self.getTarget();
+ const debug_fmt_strip = comp.config.debug_format == .strip;
+ const default_sym_version = self.default_sym_version;
+ const is_static_lib = self.base.isStaticLib();
+
+ if (comp.verbose_link) {
+ comp.mutex.lock(); // protect comp.arena
+ defer comp.mutex.unlock();
+
+ const argv = &self.dump_argv_list;
+ switch (input) {
+ .res => unreachable,
+ .dso_exact => |dso_exact| try argv.appendSlice(gpa, &.{ "-l", dso_exact.name }),
+ .object, .archive => |obj| try argv.append(gpa, try obj.path.toString(comp.arena)),
+ .dso => |dso| try argv.append(gpa, try dso.path.toString(comp.arena)),
+ }
+ }
+
+ switch (input) {
+ .res => unreachable,
+ .dso_exact => @panic("TODO"),
+ .object => |obj| try parseObject(self, obj),
+ .archive => |obj| try parseArchive(gpa, diags, &self.file_handles, &self.files, &self.first_eflags, target, debug_fmt_strip, default_sym_version, &self.objects, obj, is_static_lib),
+ .dso => |dso| try parseDso(gpa, diags, dso, &self.shared_objects, &self.files, target),
+ }
+}
+
pub fn flush(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
const use_lld = build_options.have_llvm and self.base.comp.config.use_lld;
if (use_lld) {
@@ -774,11 +818,11 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
if (use_lld) return;
}
+ if (comp.verbose_link) Compilation.dump_argv(self.dump_argv_list.items);
+
const sub_prog_node = prog_node.start("ELF Flush", 0);
defer sub_prog_node.end();
- const target = self.getTarget();
- const link_mode = comp.config.link_mode;
const directory = self.base.emit.root_dir; // Just an alias to make it shorter to type.
const module_obj_path: ?Path = if (self.base.zcu_object_sub_path) |path| .{
.root_dir = directory,
@@ -788,126 +832,19 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
path,
} else null;
- // --verbose-link
- if (comp.verbose_link) try self.dumpArgv(comp);
-
if (self.zigObjectPtr()) |zig_object| try zig_object.flush(self, tid);
- if (self.base.isStaticLib()) return relocatable.flushStaticLib(self, comp, module_obj_path);
- if (self.base.isObject()) return relocatable.flushObject(self, comp, module_obj_path);
-
- const csu = try comp.getCrtPaths(arena);
- // csu prelude
- if (csu.crt0) |path| parseObjectReportingFailure(self, path);
- if (csu.crti) |path| parseObjectReportingFailure(self, path);
- if (csu.crtbegin) |path| parseObjectReportingFailure(self, path);
-
- for (comp.objects) |obj| {
- parseInputReportingFailure(self, obj.path, obj.needed, obj.must_link);
- }
-
- // This is a set of object files emitted by clang in a single `build-exe` invocation.
- // For instance, the implicit `a.o` as compiled by `zig build-exe a.c` will end up
- // in this set.
- for (comp.c_object_table.keys()) |key| {
- parseObjectReportingFailure(self, key.status.success.object_path);
- }
-
- if (module_obj_path) |path| parseObjectReportingFailure(self, path);
-
- if (comp.config.any_sanitize_thread) parseCrtFileReportingFailure(self, comp.tsan_lib.?);
- if (comp.config.any_fuzz) parseCrtFileReportingFailure(self, comp.fuzzer_lib.?);
-
- // libc
- if (!comp.skip_linker_dependencies and !comp.config.link_libc) {
- if (comp.libc_static_lib) |lib| parseCrtFileReportingFailure(self, lib);
- }
+ if (module_obj_path) |path| openParseObjectReportingFailure(self, path);
- for (comp.system_libs.values()) |lib_info| {
- parseInputReportingFailure(self, lib_info.path.?, lib_info.needed, false);
- }
-
- // libc++ dep
- if (comp.config.link_libcpp) {
- parseInputReportingFailure(self, comp.libcxxabi_static_lib.?.full_object_path, false, false);
- parseInputReportingFailure(self, comp.libcxx_static_lib.?.full_object_path, false, false);
- }
-
- // libunwind dep
- if (comp.config.link_libunwind) {
- parseInputReportingFailure(self, comp.libunwind_static_lib.?.full_object_path, false, false);
- }
-
- // libc dep
- diags.flags.missing_libc = false;
- if (comp.config.link_libc) {
- if (comp.libc_installation) |lc| {
- const flags = target_util.libcFullLinkFlags(target);
-
- var test_path = std.ArrayList(u8).init(arena);
- var checked_paths = std.ArrayList([]const u8).init(arena);
-
- for (flags) |flag| {
- checked_paths.clearRetainingCapacity();
- const lib_name = flag["-l".len..];
-
- success: {
- if (!self.base.isStatic()) {
- if (try self.accessLibPath(arena, &test_path, &checked_paths, lc.crt_dir.?, lib_name, .dynamic))
- break :success;
- }
- if (try self.accessLibPath(arena, &test_path, &checked_paths, lc.crt_dir.?, lib_name, .static))
- break :success;
-
- diags.addMissingLibraryError(
- checked_paths.items,
- "missing system library: '{s}' was not found",
- .{lib_name},
- );
- continue;
- }
-
- const resolved_path = Path.initCwd(try arena.dupe(u8, test_path.items));
- parseInputReportingFailure(self, resolved_path, false, false);
- }
- } else if (target.isGnuLibC()) {
- for (glibc.libs) |lib| {
- if (lib.removed_in) |rem_in| {
- if (target.os.version_range.linux.glibc.order(rem_in) != .lt) continue;
- }
-
- const lib_path = Path.initCwd(try std.fmt.allocPrint(arena, "{s}{c}lib{s}.so.{d}", .{
- comp.glibc_so_files.?.dir_path, fs.path.sep, lib.name, lib.sover,
- }));
- parseInputReportingFailure(self, lib_path, false, false);
- }
- parseInputReportingFailure(self, try comp.get_libc_crt_file(arena, "libc_nonshared.a"), false, false);
- } else if (target.isMusl()) {
- const path = try comp.get_libc_crt_file(arena, switch (link_mode) {
- .static => "libc.a",
- .dynamic => "libc.so",
- });
- parseInputReportingFailure(self, path, false, false);
- } else {
- diags.flags.missing_libc = true;
- }
- }
-
- // Finally, as the last input objects we add compiler_rt and CSU postlude (if any).
-
- // compiler-rt. Since compiler_rt exports symbols like `memset`, it needs
- // to be after the shared libraries, so they are picked up from the shared
- // libraries, not libcompiler_rt.
- if (comp.compiler_rt_lib) |crt_file| {
- parseInputReportingFailure(self, crt_file.full_object_path, false, false);
- } else if (comp.compiler_rt_obj) |crt_file| {
- parseObjectReportingFailure(self, crt_file.full_object_path);
+ switch (comp.config.output_mode) {
+ .Obj => return relocatable.flushObject(self, comp),
+ .Lib => switch (comp.config.link_mode) {
+ .dynamic => {},
+ .static => return relocatable.flushStaticLib(self, comp),
+ },
+ .Exe => {},
}
- // csu postlude
- if (csu.crtend) |path| parseObjectReportingFailure(self, path);
- if (csu.crtn) |path| parseObjectReportingFailure(self, path);
-
if (diags.hasErrors()) return error.FlushFailure;
// If we haven't already, create a linker-generated input file comprising of
@@ -1058,347 +995,150 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
if (diags.hasErrors()) return error.FlushFailure;
}
-/// --verbose-link output
-fn dumpArgv(self: *Elf, comp: *Compilation) !void {
- const gpa = self.base.comp.gpa;
- var arena_allocator = std.heap.ArenaAllocator.init(gpa);
- defer arena_allocator.deinit();
- const arena = arena_allocator.allocator();
-
+fn dumpArgvInit(self: *Elf, arena: Allocator) !void {
+ const comp = self.base.comp;
+ const gpa = comp.gpa;
const target = self.getTarget();
- const link_mode = self.base.comp.config.link_mode;
- const directory = self.base.emit.root_dir; // Just an alias to make it shorter to type.
- const full_out_path = try directory.join(arena, &[_][]const u8{self.base.emit.sub_path});
- const module_obj_path: ?[]const u8 = if (self.base.zcu_object_sub_path) |path| blk: {
- if (fs.path.dirname(full_out_path)) |dirname| {
- break :blk try fs.path.join(arena, &.{ dirname, path });
- } else {
- break :blk path;
- }
- } else null;
-
- const csu = try comp.getCrtPaths(arena);
- const compiler_rt_path: ?[]const u8 = blk: {
- if (comp.compiler_rt_lib) |x| break :blk try x.full_object_path.toString(arena);
- if (comp.compiler_rt_obj) |x| break :blk try x.full_object_path.toString(arena);
- break :blk null;
- };
+ const full_out_path = try self.base.emit.root_dir.join(arena, &[_][]const u8{self.base.emit.sub_path});
- var argv = std.ArrayList([]const u8).init(arena);
+ const argv = &self.dump_argv_list;
- try argv.append("zig");
+ try argv.append(gpa, "zig");
if (self.base.isStaticLib()) {
- try argv.append("ar");
+ try argv.append(gpa, "ar");
} else {
- try argv.append("ld");
+ try argv.append(gpa, "ld");
}
if (self.base.isObject()) {
- try argv.append("-r");
+ try argv.append(gpa, "-r");
}
- try argv.append("-o");
- try argv.append(full_out_path);
+ try argv.append(gpa, "-o");
+ try argv.append(gpa, full_out_path);
- if (self.base.isRelocatable()) {
- for (comp.objects) |obj| {
- try argv.append(try obj.path.toString(arena));
- }
-
- for (comp.c_object_table.keys()) |key| {
- try argv.append(try key.status.success.object_path.toString(arena));
- }
-
- if (module_obj_path) |p| {
- try argv.append(p);
- }
- } else {
+ if (!self.base.isRelocatable()) {
if (!self.base.isStatic()) {
if (target.dynamic_linker.get()) |path| {
- try argv.append("-dynamic-linker");
- try argv.append(path);
+ try argv.appendSlice(gpa, &.{ "-dynamic-linker", try arena.dupe(u8, path) });
}
}
if (self.base.isDynLib()) {
if (self.soname) |name| {
- try argv.append("-soname");
- try argv.append(name);
+ try argv.append(gpa, "-soname");
+ try argv.append(gpa, name);
}
}
if (self.entry_name) |name| {
- try argv.appendSlice(&.{ "--entry", name });
+ try argv.appendSlice(gpa, &.{ "--entry", name });
}
for (self.rpath_table.keys()) |rpath| {
- try argv.appendSlice(&.{ "-rpath", rpath });
+ try argv.appendSlice(gpa, &.{ "-rpath", rpath });
}
- try argv.appendSlice(&.{
+ try argv.appendSlice(gpa, &.{
"-z",
try std.fmt.allocPrint(arena, "stack-size={d}", .{self.base.stack_size}),
});
- try argv.append(try std.fmt.allocPrint(arena, "--image-base={d}", .{self.image_base}));
+ try argv.append(gpa, try std.fmt.allocPrint(arena, "--image-base={d}", .{self.image_base}));
if (self.base.gc_sections) {
- try argv.append("--gc-sections");
+ try argv.append(gpa, "--gc-sections");
}
if (self.base.print_gc_sections) {
- try argv.append("--print-gc-sections");
+ try argv.append(gpa, "--print-gc-sections");
}
if (comp.link_eh_frame_hdr) {
- try argv.append("--eh-frame-hdr");
+ try argv.append(gpa, "--eh-frame-hdr");
}
if (comp.config.rdynamic) {
- try argv.append("--export-dynamic");
+ try argv.append(gpa, "--export-dynamic");
}
if (self.z_notext) {
- try argv.append("-z");
- try argv.append("notext");
+ try argv.append(gpa, "-z");
+ try argv.append(gpa, "notext");
}
if (self.z_nocopyreloc) {
- try argv.append("-z");
- try argv.append("nocopyreloc");
+ try argv.append(gpa, "-z");
+ try argv.append(gpa, "nocopyreloc");
}
if (self.z_now) {
- try argv.append("-z");
- try argv.append("now");
+ try argv.append(gpa, "-z");
+ try argv.append(gpa, "now");
}
if (self.base.isStatic()) {
- try argv.append("-static");
+ try argv.append(gpa, "-static");
} else if (self.isEffectivelyDynLib()) {
- try argv.append("-shared");
+ try argv.append(gpa, "-shared");
}
if (comp.config.pie and self.base.isExe()) {
- try argv.append("-pie");
+ try argv.append(gpa, "-pie");
}
if (comp.config.debug_format == .strip) {
- try argv.append("-s");
- }
-
- // csu prelude
- if (csu.crt0) |path| try argv.append(try path.toString(arena));
- if (csu.crti) |path| try argv.append(try path.toString(arena));
- if (csu.crtbegin) |path| try argv.append(try path.toString(arena));
-
- for (self.lib_dirs) |lib_dir| {
- try argv.append("-L");
- try argv.append(lib_dir);
- }
-
- if (comp.config.link_libc) {
- if (self.base.comp.libc_installation) |libc_installation| {
- try argv.append("-L");
- try argv.append(libc_installation.crt_dir.?);
- }
- }
-
- var whole_archive = false;
- for (comp.objects) |obj| {
- if (obj.must_link and !whole_archive) {
- try argv.append("-whole-archive");
- whole_archive = true;
- } else if (!obj.must_link and whole_archive) {
- try argv.append("-no-whole-archive");
- whole_archive = false;
- }
-
- if (obj.loption) {
- try argv.append("-l");
- }
- try argv.append(try obj.path.toString(arena));
- }
- if (whole_archive) {
- try argv.append("-no-whole-archive");
- whole_archive = false;
- }
-
- for (comp.c_object_table.keys()) |key| {
- try argv.append(try key.status.success.object_path.toString(arena));
- }
-
- if (module_obj_path) |p| {
- try argv.append(p);
- }
-
- if (comp.config.any_sanitize_thread) {
- try argv.append(try comp.tsan_lib.?.full_object_path.toString(arena));
+ try argv.append(gpa, "-s");
}
- if (comp.config.any_fuzz) {
- try argv.append(try comp.fuzzer_lib.?.full_object_path.toString(arena));
- }
-
- // libc
- if (!comp.skip_linker_dependencies and !comp.config.link_libc) {
- if (comp.libc_static_lib) |lib| {
- try argv.append(try lib.full_object_path.toString(arena));
- }
- }
-
- // Shared libraries.
- // Worst-case, we need an --as-needed argument for every lib, as well
- // as one before and one after.
- try argv.ensureUnusedCapacity(self.base.comp.system_libs.keys().len * 2 + 2);
- argv.appendAssumeCapacity("--as-needed");
- var as_needed = true;
-
- for (self.base.comp.system_libs.values()) |lib_info| {
- const lib_as_needed = !lib_info.needed;
- switch ((@as(u2, @intFromBool(lib_as_needed)) << 1) | @intFromBool(as_needed)) {
- 0b00, 0b11 => {},
- 0b01 => {
- argv.appendAssumeCapacity("--no-as-needed");
- as_needed = false;
- },
- 0b10 => {
- argv.appendAssumeCapacity("--as-needed");
- as_needed = true;
- },
- }
- argv.appendAssumeCapacity(try lib_info.path.?.toString(arena));
- }
-
- if (!as_needed) {
- argv.appendAssumeCapacity("--as-needed");
- as_needed = true;
- }
-
- // libc++ dep
- if (comp.config.link_libcpp) {
- try argv.append(try comp.libcxxabi_static_lib.?.full_object_path.toString(arena));
- try argv.append(try comp.libcxx_static_lib.?.full_object_path.toString(arena));
- }
-
- // libunwind dep
- if (comp.config.link_libunwind) {
- try argv.append(try comp.libunwind_static_lib.?.full_object_path.toString(arena));
- }
-
- // libc dep
if (comp.config.link_libc) {
- if (self.base.comp.libc_installation != null) {
- const needs_grouping = link_mode == .static;
- if (needs_grouping) try argv.append("--start-group");
- try argv.appendSlice(target_util.libcFullLinkFlags(target));
- if (needs_grouping) try argv.append("--end-group");
- } else if (target.isGnuLibC()) {
- for (glibc.libs) |lib| {
- if (lib.removed_in) |rem_in| {
- if (target.os.version_range.linux.glibc.order(rem_in) != .lt) continue;
- }
-
- const lib_path = try std.fmt.allocPrint(arena, "{s}{c}lib{s}.so.{d}", .{
- comp.glibc_so_files.?.dir_path, fs.path.sep, lib.name, lib.sover,
- });
- try argv.append(lib_path);
- }
- try argv.append(try comp.crtFileAsString(arena, "libc_nonshared.a"));
- } else if (target.isMusl()) {
- try argv.append(try comp.crtFileAsString(arena, switch (link_mode) {
- .static => "libc.a",
- .dynamic => "libc.so",
- }));
+ if (self.base.comp.libc_installation) |lci| {
+ try argv.append(gpa, "-L");
+ try argv.append(gpa, lci.crt_dir.?);
}
}
-
- // compiler-rt
- if (compiler_rt_path) |p| {
- try argv.append(p);
- }
-
- // crt postlude
- if (csu.crtend) |path| try argv.append(try path.toString(arena));
- if (csu.crtn) |path| try argv.append(try path.toString(arena));
}
-
- Compilation.dump_argv(argv.items);
}
-pub const ParseError = error{
- /// Indicates the error is already reported on `Compilation.link_diags`.
- LinkFailure,
-
- OutOfMemory,
- Overflow,
- InputOutput,
- EndOfStream,
- FileSystem,
- NotSupported,
- InvalidCharacter,
- UnknownFileType,
-} || LdScript.Error || fs.Dir.AccessError || fs.File.SeekError || fs.File.OpenError || fs.File.ReadError;
-
-fn parseCrtFileReportingFailure(self: *Elf, crt_file: Compilation.CrtFile) void {
- parseInputReportingFailure(self, crt_file.full_object_path, false, false);
-}
-
-pub fn parseInputReportingFailure(self: *Elf, path: Path, needed: bool, must_link: bool) void {
- const gpa = self.base.comp.gpa;
+pub fn openParseObjectReportingFailure(self: *Elf, path: Path) void {
const diags = &self.base.comp.link_diags;
- const target = self.getTarget();
-
- switch (Compilation.classifyFileExt(path.sub_path)) {
- .object => parseObjectReportingFailure(self, path),
- .shared_library => parseSharedObject(gpa, diags, .{
- .path = path,
- .needed = needed,
- }, &self.shared_objects, &self.files, target) catch |err| switch (err) {
- error.LinkFailure => return, // already reported
- error.BadMagic, error.UnexpectedEndOfFile => {
- // It could be a linker script.
- self.parseLdScript(.{ .path = path, .needed = needed }) catch |err2| switch (err2) {
- error.LinkFailure => return, // already reported
- else => |e| diags.addParseError(path, "failed to parse linker script: {s}", .{@errorName(e)}),
- };
- },
- else => |e| diags.addParseError(path, "failed to parse shared object: {s}", .{@errorName(e)}),
- },
- .static_library => parseArchive(self, path, must_link) catch |err| switch (err) {
- error.LinkFailure => return, // already reported
- else => |e| diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(e)}),
- },
- .unknown => self.parseLdScript(.{ .path = path, .needed = needed }) catch |err| switch (err) {
- error.LinkFailure => return, // already reported
- else => |e| diags.addParseError(path, "failed to parse linker script: {s}", .{@errorName(e)}),
- },
- else => diags.addParseError(path, "unrecognized file type", .{}),
- }
+ const obj = link.openObject(path, false, false) catch |err| {
+ switch (diags.failParse(path, "failed to open object {}: {s}", .{ path, @errorName(err) })) {
+ error.LinkFailure => return,
+ }
+ };
+ self.parseObjectReportingFailure(obj);
}
-pub fn parseObjectReportingFailure(self: *Elf, path: Path) void {
+fn parseObjectReportingFailure(self: *Elf, obj: link.Input.Object) void {
const diags = &self.base.comp.link_diags;
- self.parseObject(path) catch |err| switch (err) {
+ self.parseObject(obj) catch |err| switch (err) {
error.LinkFailure => return, // already reported
- else => |e| diags.addParseError(path, "unable to parse object: {s}", .{@errorName(e)}),
+ else => |e| diags.addParseError(obj.path, "failed to parse object: {s}", .{@errorName(e)}),
};
}
-fn parseObject(self: *Elf, path: Path) ParseError!void {
+fn parseObject(self: *Elf, obj: link.Input.Object) !void {
const tracy = trace(@src());
defer tracy.end();
const gpa = self.base.comp.gpa;
- const handle = try path.root_dir.handle.openFile(path.sub_path, .{});
- const fh = try self.addFileHandle(handle);
+ const diags = &self.base.comp.link_diags;
+ const first_eflags = &self.first_eflags;
+ const target = self.base.comp.root_mod.resolved_target.result;
+ const debug_fmt_strip = self.base.comp.config.debug_format == .strip;
+ const default_sym_version = self.default_sym_version;
+ const file_handles = &self.file_handles;
+
+ const handle = obj.file;
+ const fh = try addFileHandle(gpa, file_handles, handle);
const index: File.Index = @intCast(try self.files.addOne(gpa));
self.files.set(index, .{ .object = .{
.path = .{
- .root_dir = path.root_dir,
- .sub_path = try gpa.dupe(u8, path.sub_path),
+ .root_dir = obj.path.root_dir,
+ .sub_path = try gpa.dupe(u8, obj.path.sub_path),
},
.file_handle = fh,
.index = index,
@@ -1406,39 +1146,51 @@ fn parseObject(self: *Elf, path: Path) ParseError!void {
try self.objects.append(gpa, index);
const object = self.file(index).?.object;
- try object.parse(self);
+ try object.parseCommon(gpa, diags, obj.path, handle, target, first_eflags);
+ if (!self.base.isStaticLib()) {
+ try object.parse(gpa, diags, obj.path, handle, target, debug_fmt_strip, default_sym_version);
+ }
}
-fn parseArchive(self: *Elf, path: Path, must_link: bool) ParseError!void {
+fn parseArchive(
+ gpa: Allocator,
+ diags: *Diags,
+ file_handles: *std.ArrayListUnmanaged(File.Handle),
+ files: *std.MultiArrayList(File.Entry),
+ first_eflags: *?elf.Word,
+ target: std.Target,
+ debug_fmt_strip: bool,
+ default_sym_version: elf.Versym,
+ objects: *std.ArrayListUnmanaged(File.Index),
+ obj: link.Input.Object,
+ is_static_lib: bool,
+) !void {
const tracy = trace(@src());
defer tracy.end();
- const gpa = self.base.comp.gpa;
- const handle = try path.root_dir.handle.openFile(path.sub_path, .{});
- const fh = try self.addFileHandle(handle);
-
- var archive: Archive = .{};
+ const fh = try addFileHandle(gpa, file_handles, obj.file);
+ var archive = try Archive.parse(gpa, diags, file_handles, obj.path, fh);
defer archive.deinit(gpa);
- try archive.parse(self, path, fh);
- const objects = try archive.objects.toOwnedSlice(gpa);
- defer gpa.free(objects);
+ const init_alive = if (is_static_lib) true else obj.must_link;
- for (objects) |extracted| {
- const index: File.Index = @intCast(try self.files.addOne(gpa));
- self.files.set(index, .{ .object = extracted });
- const object = &self.files.items(.data)[index].object;
+ for (archive.objects) |extracted| {
+ const index: File.Index = @intCast(try files.addOne(gpa));
+ files.set(index, .{ .object = extracted });
+ const object = &files.items(.data)[index].object;
object.index = index;
- object.alive = must_link;
- try object.parse(self);
- try self.objects.append(gpa, index);
+ object.alive = init_alive;
+ try object.parseCommon(gpa, diags, obj.path, obj.file, target, first_eflags);
+ if (!is_static_lib)
+ try object.parse(gpa, diags, obj.path, obj.file, target, debug_fmt_strip, default_sym_version);
+ try objects.append(gpa, index);
}
}
-fn parseSharedObject(
+fn parseDso(
gpa: Allocator,
diags: *Diags,
- lib: SystemLib,
+ dso: link.Input.Dso,
shared_objects: *std.StringArrayHashMapUnmanaged(File.Index),
files: *std.MultiArrayList(File.Entry),
target: std.Target,
@@ -1446,20 +1198,16 @@ fn parseSharedObject(
const tracy = trace(@src());
defer tracy.end();
- const handle = try lib.path.root_dir.handle.openFile(lib.path.sub_path, .{});
- defer handle.close();
+ const handle = dso.file;
const stat = Stat.fromFs(try handle.stat());
- var header = try SharedObject.parseHeader(gpa, diags, lib.path, handle, stat, target);
+ var header = try SharedObject.parseHeader(gpa, diags, dso.path, handle, stat, target);
defer header.deinit(gpa);
- const soname = header.soname() orelse lib.path.basename();
+ const soname = header.soname() orelse dso.path.basename();
const gop = try shared_objects.getOrPut(gpa, soname);
- if (gop.found_existing) {
- header.deinit(gpa);
- return;
- }
+ if (gop.found_existing) return;
errdefer _ = shared_objects.pop();
const index: File.Index = @intCast(try files.addOne(gpa));
@@ -1471,8 +1219,8 @@ fn parseSharedObject(
errdefer parsed.deinit(gpa);
const duped_path: Path = .{
- .root_dir = lib.path.root_dir,
- .sub_path = try gpa.dupe(u8, lib.path.sub_path),
+ .root_dir = dso.path.root_dir,
+ .sub_path = try gpa.dupe(u8, dso.path.sub_path),
};
errdefer gpa.free(duped_path.sub_path);
@@ -1481,8 +1229,8 @@ fn parseSharedObject(
.parsed = parsed,
.path = duped_path,
.index = index,
- .needed = lib.needed,
- .alive = lib.needed,
+ .needed = dso.needed,
+ .alive = dso.needed,
.aliases = null,
.symbols = .empty,
.symbols_extra = .empty,
@@ -1490,7 +1238,7 @@ fn parseSharedObject(
.output_symtab_ctx = .{},
},
});
- const so = fileLookup(files.*, index).?.shared_object;
+ const so = fileLookup(files.*, index, null).?.shared_object;
// TODO: save this work for later
const nsyms = parsed.symbols.len;
@@ -1511,148 +1259,6 @@ fn parseSharedObject(
}
}
-fn parseLdScript(self: *Elf, lib: SystemLib) ParseError!void {
- const tracy = trace(@src());
- defer tracy.end();
-
- const comp = self.base.comp;
- const gpa = comp.gpa;
- const diags = &comp.link_diags;
-
- const in_file = try lib.path.root_dir.handle.openFile(lib.path.sub_path, .{});
- defer in_file.close();
- const data = try in_file.readToEndAlloc(gpa, std.math.maxInt(u32));
- defer gpa.free(data);
-
- var script: LdScript = .{ .path = lib.path };
- defer script.deinit(gpa);
- try script.parse(data, self);
-
- var arena_allocator = std.heap.ArenaAllocator.init(gpa);
- defer arena_allocator.deinit();
- const arena = arena_allocator.allocator();
-
- var test_path = std.ArrayList(u8).init(arena);
- var checked_paths = std.ArrayList([]const u8).init(arena);
-
- for (script.args.items) |script_arg| {
- checked_paths.clearRetainingCapacity();
-
- success: {
- if (mem.startsWith(u8, script_arg.path, "-l")) {
- const lib_name = script_arg.path["-l".len..];
-
- // TODO I think technically we should re-use the mechanism used by the frontend here.
- // Maybe we should hoist search-strategy all the way here?
- for (self.lib_dirs) |lib_dir| {
- if (!self.base.isStatic()) {
- if (try self.accessLibPath(arena, &test_path, &checked_paths, lib_dir, lib_name, .dynamic))
- break :success;
- }
- if (try self.accessLibPath(arena, &test_path, &checked_paths, lib_dir, lib_name, .static))
- break :success;
- }
- } else {
- var buffer: [fs.max_path_bytes]u8 = undefined;
- if (fs.realpath(script_arg.path, &buffer)) |path| {
- test_path.clearRetainingCapacity();
- try test_path.writer().writeAll(path);
- break :success;
- } else |_| {}
-
- try checked_paths.append(try arena.dupe(u8, script_arg.path));
- for (self.lib_dirs) |lib_dir| {
- if (try self.accessLibPath(arena, &test_path, &checked_paths, lib_dir, script_arg.path, null))
- break :success;
- }
- }
-
- diags.addMissingLibraryError(
- checked_paths.items,
- "missing library dependency: GNU ld script '{}' requires '{s}', but file not found",
- .{ @as(Path, lib.path), script_arg.path },
- );
- continue;
- }
-
- const full_path = Path.initCwd(test_path.items);
- parseInputReportingFailure(self, full_path, script_arg.needed, false);
- }
-}
-
-pub fn validateEFlags(self: *Elf, file_index: File.Index, e_flags: elf.Word) !void {
- if (self.first_eflags == null) {
- self.first_eflags = e_flags;
- return; // there isn't anything to conflict with yet
- }
- const self_eflags: *elf.Word = &self.first_eflags.?;
-
- switch (self.getTarget().cpu.arch) {
- .riscv64 => {
- if (e_flags != self_eflags.*) {
- const riscv_eflags: riscv.RiscvEflags = @bitCast(e_flags);
- const self_riscv_eflags: *riscv.RiscvEflags = @ptrCast(self_eflags);
-
- self_riscv_eflags.rvc = self_riscv_eflags.rvc or riscv_eflags.rvc;
- self_riscv_eflags.tso = self_riscv_eflags.tso or riscv_eflags.tso;
-
- var any_errors: bool = false;
- if (self_riscv_eflags.fabi != riscv_eflags.fabi) {
- any_errors = true;
- try self.addFileError(
- file_index,
- "cannot link object files with different float-point ABIs",
- .{},
- );
- }
- if (self_riscv_eflags.rve != riscv_eflags.rve) {
- any_errors = true;
- try self.addFileError(
- file_index,
- "cannot link object files with different RVEs",
- .{},
- );
- }
- if (any_errors) return error.LinkFailure;
- }
- },
- else => {},
- }
-}
-
-fn accessLibPath(
- self: *Elf,
- arena: Allocator,
- test_path: *std.ArrayList(u8),
- checked_paths: ?*std.ArrayList([]const u8),
- lib_dir_path: []const u8,
- lib_name: []const u8,
- link_mode: ?std.builtin.LinkMode,
-) !bool {
- const sep = fs.path.sep_str;
- const target = self.getTarget();
- test_path.clearRetainingCapacity();
- const prefix = if (link_mode != null) "lib" else "";
- const suffix = if (link_mode) |mode| switch (mode) {
- .static => target.staticLibSuffix(),
- .dynamic => target.dynamicLibSuffix(),
- } else "";
- try test_path.writer().print("{s}" ++ sep ++ "{s}{s}{s}", .{
- lib_dir_path,
- prefix,
- lib_name,
- suffix,
- });
- if (checked_paths) |cpaths| {
- try cpaths.append(try arena.dupe(u8, test_path.items));
- }
- fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
- error.FileNotFound => return false,
- else => |e| return e,
- };
- return true;
-}
-
/// When resolving symbols, we approach the problem similarly to `mold`.
/// 1. Resolve symbols across all objects (including those preemptively extracted archives).
/// 2. Resolve symbols across all shared objects.
@@ -1842,7 +1448,7 @@ pub fn initOutputSection(self: *Elf, args: struct {
".dtors", ".gnu.warning",
};
inline for (name_prefixes) |prefix| {
- if (std.mem.eql(u8, args.name, prefix) or std.mem.startsWith(u8, args.name, prefix ++ ".")) {
+ if (mem.eql(u8, args.name, prefix) or mem.startsWith(u8, args.name, prefix ++ ".")) {
break :blk prefix;
}
}
@@ -1854,9 +1460,9 @@ pub fn initOutputSection(self: *Elf, args: struct {
switch (args.type) {
elf.SHT_NULL => unreachable,
elf.SHT_PROGBITS => {
- if (std.mem.eql(u8, args.name, ".init_array") or std.mem.startsWith(u8, args.name, ".init_array."))
+ if (mem.eql(u8, args.name, ".init_array") or mem.startsWith(u8, args.name, ".init_array."))
break :tt elf.SHT_INIT_ARRAY;
- if (std.mem.eql(u8, args.name, ".fini_array") or std.mem.startsWith(u8, args.name, ".fini_array."))
+ if (mem.eql(u8, args.name, ".fini_array") or mem.startsWith(u8, args.name, ".fini_array."))
break :tt elf.SHT_FINI_ARRAY;
break :tt args.type;
},
@@ -1951,11 +1557,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s
try man.addOptionalFile(self.version_script);
man.hash.add(self.allow_undefined_version);
man.hash.addOptional(self.enable_new_dtags);
- for (comp.objects) |obj| {
- _ = try man.addFilePath(obj.path, null);
- man.hash.add(obj.must_link);
- man.hash.add(obj.loption);
- }
+ try link.hashInputs(&man, comp.link_inputs);
for (comp.c_object_table.keys()) |key| {
_ = try man.addFilePath(key.status.success.object_path, null);
}
@@ -1973,7 +1575,6 @@ fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s
man.hash.add(comp.link_eh_frame_hdr);
man.hash.add(self.emit_relocs);
man.hash.add(comp.config.rdynamic);
- man.hash.addListOfBytes(self.lib_dirs);
man.hash.addListOfBytes(self.rpath_table.keys());
if (output_mode == .Exe) {
man.hash.add(self.base.stack_size);
@@ -2003,7 +1604,6 @@ fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s
}
man.hash.addOptionalBytes(self.soname);
man.hash.addOptional(comp.version);
- try link.hashAddSystemLibs(&man, comp.system_libs);
man.hash.addListOfBytes(comp.force_undefined_symbols.keys());
man.hash.add(self.base.allow_shlib_undefined);
man.hash.add(self.bind_global_refs_locally);
@@ -2050,8 +1650,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s
// here. TODO: think carefully about how we can avoid this redundant operation when doing
// build-obj. See also the corresponding TODO in linkAsArchive.
const the_object_path = blk: {
- if (comp.objects.len != 0)
- break :blk comp.objects[0].path;
+ if (link.firstObjectInput(comp.link_inputs)) |obj| break :blk obj.path;
if (comp.c_object_table.count() != 0)
break :blk comp.c_object_table.keys()[0].status.success.object_path;
@@ -2267,11 +1866,6 @@ fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s
try argv.appendSlice(&.{ "-wrap", symbol_name });
}
- for (self.lib_dirs) |lib_dir| {
- try argv.append("-L");
- try argv.append(lib_dir);
- }
-
if (comp.config.link_libc) {
if (comp.libc_installation) |libc_installation| {
try argv.append("-L");
@@ -2311,21 +1905,26 @@ fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s
// Positional arguments to the linker such as object files.
var whole_archive = false;
- for (comp.objects) |obj| {
- if (obj.must_link and !whole_archive) {
- try argv.append("-whole-archive");
- whole_archive = true;
- } else if (!obj.must_link and whole_archive) {
- try argv.append("-no-whole-archive");
- whole_archive = false;
- }
- if (obj.loption) {
- assert(obj.path.sub_path[0] == ':');
- try argv.append("-l");
- }
- try argv.append(try obj.path.toString(arena));
- }
+ for (self.base.comp.link_inputs) |link_input| switch (link_input) {
+ .res => unreachable, // Windows-only
+ .dso => continue,
+ .object, .archive => |obj| {
+ if (obj.must_link and !whole_archive) {
+ try argv.append("-whole-archive");
+ whole_archive = true;
+ } else if (!obj.must_link and whole_archive) {
+ try argv.append("-no-whole-archive");
+ whole_archive = false;
+ }
+ try argv.append(try obj.path.toString(arena));
+ },
+ .dso_exact => |dso_exact| {
+ assert(dso_exact.name[0] == ':');
+ try argv.appendSlice(&.{ "-l", dso_exact.name });
+ },
+ };
+
if (whole_archive) {
try argv.append("-no-whole-archive");
whole_archive = false;
@@ -2361,35 +1960,35 @@ fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s
// Shared libraries.
if (is_exe_or_dyn_lib) {
- const system_libs = comp.system_libs.keys();
- const system_libs_values = comp.system_libs.values();
-
// Worst-case, we need an --as-needed argument for every lib, as well
// as one before and one after.
- try argv.ensureUnusedCapacity(system_libs.len * 2 + 2);
- argv.appendAssumeCapacity("--as-needed");
+ try argv.append("--as-needed");
var as_needed = true;
- for (system_libs_values) |lib_info| {
- const lib_as_needed = !lib_info.needed;
- switch ((@as(u2, @intFromBool(lib_as_needed)) << 1) | @intFromBool(as_needed)) {
- 0b00, 0b11 => {},
- 0b01 => {
- argv.appendAssumeCapacity("--no-as-needed");
- as_needed = false;
- },
- 0b10 => {
- argv.appendAssumeCapacity("--as-needed");
- as_needed = true;
- },
- }
+ for (self.base.comp.link_inputs) |link_input| switch (link_input) {
+ .res => unreachable, // Windows-only
+ .object, .archive, .dso_exact => continue,
+ .dso => |dso| {
+ const lib_as_needed = !dso.needed;
+ switch ((@as(u2, @intFromBool(lib_as_needed)) << 1) | @intFromBool(as_needed)) {
+ 0b00, 0b11 => {},
+ 0b01 => {
+ argv.appendAssumeCapacity("--no-as-needed");
+ as_needed = false;
+ },
+ 0b10 => {
+ argv.appendAssumeCapacity("--as-needed");
+ as_needed = true;
+ },
+ }
- // By this time, we depend on these libs being dynamically linked
- // libraries and not static libraries (the check for that needs to be earlier),
- // but they could be full paths to .so files, in which case we
- // want to avoid prepending "-l".
- argv.appendAssumeCapacity(try lib_info.path.?.toString(arena));
- }
+ // By this time, we depend on these libs being dynamically linked
+ // libraries and not static libraries (the check for that needs to be earlier),
+ // but they could be full paths to .so files, in which case we
+ // want to avoid prepending "-l".
+ argv.appendAssumeCapacity(try dso.path.toString(arena));
+ },
+ };
if (!as_needed) {
argv.appendAssumeCapacity("--as-needed");
@@ -2421,7 +2020,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s
if (target.os.version_range.linux.glibc.order(rem_in) != .lt) continue;
}
- const lib_path = try std.fmt.allocPrint(arena, "{s}{c}lib{s}.so.{d}", .{
+ const lib_path = try std.fmt.allocPrint(arena, "{}{c}lib{s}.so.{d}", .{
comp.glibc_so_files.?.dir_path, fs.path.sep, lib.name, lib.sover,
});
try argv.append(lib_path);
@@ -3469,8 +3068,14 @@ pub fn sortShdrs(
};
pub fn lessThan(ctx: Context, lhs: @This(), rhs: @This()) bool {
- return shdrRank(ctx.shdrs[lhs.shndx], ctx.shstrtab) <
- shdrRank(ctx.shdrs[rhs.shndx], ctx.shstrtab);
+ const lhs_rank = shdrRank(ctx.shdrs[lhs.shndx], ctx.shstrtab);
+ const rhs_rank = shdrRank(ctx.shdrs[rhs.shndx], ctx.shstrtab);
+ if (lhs_rank == rhs_rank) {
+ const lhs_name = shString(ctx.shstrtab, ctx.shdrs[lhs.shndx].sh_name);
+ const rhs_name = shString(ctx.shstrtab, ctx.shdrs[rhs.shndx].sh_name);
+ return std.mem.lessThan(u8, lhs_name, rhs_name);
+ }
+ return lhs_rank < rhs_rank;
}
};
@@ -3486,7 +3091,7 @@ pub fn sortShdrs(
.shdrs = shdrs,
.shstrtab = shstrtab,
};
- mem.sort(Entry, entries, sort_context, Entry.lessThan);
+ mem.sortUnstable(Entry, entries, sort_context, Entry.lessThan);
const backlinks = try gpa.alloc(u32, entries.len);
defer gpa.free(backlinks);
@@ -3515,7 +3120,7 @@ pub fn sortShdrs(
for (slice.items(.shdr), slice.items(.atom_list_2)) |*shdr, *atom_list| {
atom_list.output_section_index = backlinks[atom_list.output_section_index];
for (atom_list.atoms.keys()) |ref| {
- fileLookup(files, ref.file).?.atom(ref.index).?.output_section_index = atom_list.output_section_index;
+ fileLookup(files, ref.file, zig_object_ptr).?.atom(ref.index).?.output_section_index = atom_list.output_section_index;
}
if (shdr.sh_type == elf.SHT_RELA) {
// FIXME:JK we should spin up .symtab potentially earlier, or set all non-dynamic RELA sections
@@ -4745,30 +4350,30 @@ pub fn thunk(self: *Elf, index: Thunk.Index) *Thunk {
}
pub fn file(self: *Elf, index: File.Index) ?File {
- return fileLookup(self.files, index);
+ return fileLookup(self.files, index, self.zig_object);
}
-fn fileLookup(files: std.MultiArrayList(File.Entry), index: File.Index) ?File {
+fn fileLookup(files: std.MultiArrayList(File.Entry), index: File.Index, zig_object: ?*ZigObject) ?File {
const tag = files.items(.tags)[index];
return switch (tag) {
.null => null,
.linker_defined => .{ .linker_defined = &files.items(.data)[index].linker_defined },
- .zig_object => .{ .zig_object = &files.items(.data)[index].zig_object },
+ .zig_object => .{ .zig_object = zig_object.? },
.object => .{ .object = &files.items(.data)[index].object },
.shared_object => .{ .shared_object = &files.items(.data)[index].shared_object },
};
}
-pub fn addFileHandle(self: *Elf, handle: fs.File) !File.HandleIndex {
- const gpa = self.base.comp.gpa;
- const index: File.HandleIndex = @intCast(self.file_handles.items.len);
- const fh = try self.file_handles.addOne(gpa);
- fh.* = handle;
- return index;
+pub fn addFileHandle(
+ gpa: Allocator,
+ file_handles: *std.ArrayListUnmanaged(File.Handle),
+ handle: fs.File,
+) Allocator.Error!File.HandleIndex {
+ try file_handles.append(gpa, handle);
+ return @intCast(file_handles.items.len - 1);
}
pub fn fileHandle(self: Elf, index: File.HandleIndex) File.Handle {
- assert(index < self.file_handles.items.len);
return self.file_handles.items[index];
}
@@ -4791,8 +4396,7 @@ pub fn getGlobalSymbol(self: *Elf, name: []const u8, lib_name: ?[]const u8) !u32
}
pub fn zigObjectPtr(self: *Elf) ?*ZigObject {
- const index = self.zig_object_index orelse return null;
- return self.file(index).?.zig_object;
+ return self.zig_object;
}
pub fn linkerDefinedPtr(self: *Elf) ?*LinkerDefined {
@@ -4870,7 +4474,7 @@ fn shString(
off: u32,
) [:0]const u8 {
const slice = shstrtab[off..];
- return slice[0..std.mem.indexOfScalar(u8, slice, 0).? :0];
+ return slice[0..mem.indexOfScalar(u8, slice, 0).? :0];
}
pub fn insertShString(self: *Elf, name: [:0]const u8) error{OutOfMemory}!u32 {
@@ -5628,7 +5232,6 @@ const GnuHashSection = synthetic_sections.GnuHashSection;
const GotSection = synthetic_sections.GotSection;
const GotPltSection = synthetic_sections.GotPltSection;
const HashSection = synthetic_sections.HashSection;
-const LdScript = @import("Elf/LdScript.zig");
const LinkerDefined = @import("Elf/LinkerDefined.zig");
const Liveness = @import("../Liveness.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
@@ -5644,4 +5247,3 @@ const Thunk = @import("Elf/Thunk.zig");
const Value = @import("../Value.zig");
const VerneedSection = synthetic_sections.VerneedSection;
const ZigObject = @import("Elf/ZigObject.zig");
-const riscv = @import("riscv.zig");
diff --git a/src/link/Elf/Archive.zig b/src/link/Elf/Archive.zig
index 50fffb0c19..0d177bc21a 100644
--- a/src/link/Elf/Archive.zig
+++ b/src/link/Elf/Archive.zig
@@ -1,29 +1,46 @@
-objects: std.ArrayListUnmanaged(Object) = .empty,
-strtab: std.ArrayListUnmanaged(u8) = .empty,
-
-pub fn deinit(self: *Archive, allocator: Allocator) void {
- self.objects.deinit(allocator);
- self.strtab.deinit(allocator);
+objects: []const Object,
+/// '\n'-delimited
+strtab: []const u8,
+
+pub fn deinit(a: *Archive, gpa: Allocator) void {
+ gpa.free(a.objects);
+ gpa.free(a.strtab);
+ a.* = undefined;
}
-pub fn parse(self: *Archive, elf_file: *Elf, path: Path, handle_index: File.HandleIndex) !void {
- const comp = elf_file.base.comp;
- const gpa = comp.gpa;
- const diags = &comp.link_diags;
- const handle = elf_file.fileHandle(handle_index);
+pub fn parse(
+ gpa: Allocator,
+ diags: *Diags,
+ file_handles: *const std.ArrayListUnmanaged(File.Handle),
+ path: Path,
+ handle_index: File.HandleIndex,
+) !Archive {
+ const handle = file_handles.items[handle_index];
+ var pos: usize = 0;
+ {
+ var magic_buffer: [elf.ARMAG.len]u8 = undefined;
+ const n = try handle.preadAll(&magic_buffer, pos);
+ if (n != magic_buffer.len) return error.BadMagic;
+ if (!mem.eql(u8, &magic_buffer, elf.ARMAG)) return error.BadMagic;
+ pos += magic_buffer.len;
+ }
+
const size = (try handle.stat()).size;
- var pos: usize = elf.ARMAG.len;
- while (true) {
- if (pos >= size) break;
- if (!mem.isAligned(pos, 2)) pos += 1;
+ var objects: std.ArrayListUnmanaged(Object) = .empty;
+ defer objects.deinit(gpa);
- var hdr_buffer: [@sizeOf(elf.ar_hdr)]u8 = undefined;
+ var strtab: std.ArrayListUnmanaged(u8) = .empty;
+ defer strtab.deinit(gpa);
+
+ while (pos < size) {
+ pos = mem.alignForward(usize, pos, 2);
+
+ var hdr: elf.ar_hdr = undefined;
{
- const amt = try handle.preadAll(&hdr_buffer, pos);
- if (amt != @sizeOf(elf.ar_hdr)) return error.InputOutput;
+ const n = try handle.preadAll(mem.asBytes(&hdr), pos);
+ if (n != @sizeOf(elf.ar_hdr)) return error.UnexpectedEndOfFile;
}
- const hdr = @as(*align(1) const elf.ar_hdr, @ptrCast(&hdr_buffer)).*;
pos += @sizeOf(elf.ar_hdr);
if (!mem.eql(u8, &hdr.ar_fmag, elf.ARFMAG)) {
@@ -37,8 +54,8 @@ pub fn parse(self: *Archive, elf_file: *Elf, path: Path, handle_index: File.Hand
if (hdr.isSymtab() or hdr.isSymtab64()) continue;
if (hdr.isStrtab()) {
- try self.strtab.resize(gpa, obj_size);
- const amt = try handle.preadAll(self.strtab.items, pos);
+ try strtab.resize(gpa, obj_size);
+ const amt = try handle.preadAll(strtab.items, pos);
if (amt != obj_size) return error.InputOutput;
continue;
}
@@ -47,7 +64,7 @@ pub fn parse(self: *Archive, elf_file: *Elf, path: Path, handle_index: File.Hand
const name = if (hdr.name()) |name|
name
else if (try hdr.nameOffset()) |off|
- self.getString(off)
+ stringTableLookup(strtab.items, off)
else
unreachable;
@@ -70,14 +87,18 @@ pub fn parse(self: *Archive, elf_file: *Elf, path: Path, handle_index: File.Hand
@as(Path, object.path), @as(Path, path),
});
- try self.objects.append(gpa, object);
+ try objects.append(gpa, object);
}
+
+ return .{
+ .objects = try objects.toOwnedSlice(gpa),
+ .strtab = try strtab.toOwnedSlice(gpa),
+ };
}
-fn getString(self: Archive, off: u32) []const u8 {
- assert(off < self.strtab.items.len);
- const name = mem.sliceTo(@as([*:'\n']const u8, @ptrCast(self.strtab.items.ptr + off)), 0);
- return name[0 .. name.len - 1];
+pub fn stringTableLookup(strtab: []const u8, off: u32) [:'\n']const u8 {
+ const slice = strtab[off..];
+ return slice[0..mem.indexOfScalar(u8, slice, '\n').? :'\n'];
}
pub fn setArHdr(opts: struct {
@@ -290,8 +311,9 @@ const fs = std.fs;
const log = std.log.scoped(.link);
const mem = std.mem;
const Path = std.Build.Cache.Path;
+const Allocator = std.mem.Allocator;
-const Allocator = mem.Allocator;
+const Diags = @import("../../link.zig").Diags;
const Archive = @This();
const Elf = @import("../Elf.zig");
const File = @import("file.zig").File;
diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig
index 4d8c23d2ff..6775c8c7ff 100644
--- a/src/link/Elf/Atom.zig
+++ b/src/link/Elf/Atom.zig
@@ -102,9 +102,13 @@ pub fn relocsShndx(self: Atom) ?u32 {
return self.relocs_section_index;
}
-pub fn priority(self: Atom, elf_file: *Elf) u64 {
- const index = self.file(elf_file).?.index();
- return (@as(u64, @intCast(index)) << 32) | @as(u64, @intCast(self.input_section_index));
+pub fn priority(atom: Atom, elf_file: *Elf) u64 {
+ const index = atom.file(elf_file).?.index();
+ return priorityLookup(index, atom.input_section_index);
+}
+
+pub fn priorityLookup(file_index: File.Index, input_section_index: u32) u64 {
+ return (@as(u64, @intCast(file_index)) << 32) | @as(u64, @intCast(input_section_index));
}
/// Returns how much room there is to grow in virtual address space.
@@ -255,19 +259,13 @@ pub fn writeRelocs(self: Atom, elf_file: *Elf, out_relocs: *std.ArrayList(elf.El
}
}
-pub fn fdes(self: Atom, elf_file: *Elf) []Fde {
- const extras = self.extra(elf_file);
- return switch (self.file(elf_file).?) {
- .shared_object => unreachable,
- .linker_defined, .zig_object => &[0]Fde{},
- .object => |x| x.fdes.items[extras.fde_start..][0..extras.fde_count],
- };
+pub fn fdes(atom: Atom, object: *Object) []Fde {
+ const extras = object.atomExtra(atom.extra_index);
+ return object.fdes.items[extras.fde_start..][0..extras.fde_count];
}
-pub fn markFdesDead(self: Atom, elf_file: *Elf) void {
- for (self.fdes(elf_file)) |*fde| {
- fde.alive = false;
- }
+pub fn markFdesDead(self: Atom, object: *Object) void {
+ for (self.fdes(object)) |*fde| fde.alive = false;
}
pub fn addReloc(self: Atom, alloc: Allocator, reloc: elf.Elf64_Rela, zo: *ZigObject) !void {
@@ -946,16 +944,21 @@ fn format2(
atom.output_section_index, atom.alignment.toByteUnits() orelse 0, atom.size,
atom.prev_atom_ref, atom.next_atom_ref,
});
- if (atom.fdes(elf_file).len > 0) {
- try writer.writeAll(" : fdes{ ");
- const extras = atom.extra(elf_file);
- for (atom.fdes(elf_file), extras.fde_start..) |fde, i| {
- try writer.print("{d}", .{i});
- if (!fde.alive) try writer.writeAll("([*])");
- if (i - extras.fde_start < extras.fde_count - 1) try writer.writeAll(", ");
- }
- try writer.writeAll(" }");
- }
+ if (atom.file(elf_file)) |atom_file| switch (atom_file) {
+ .object => |object| {
+ if (atom.fdes(object).len > 0) {
+ try writer.writeAll(" : fdes{ ");
+ const extras = atom.extra(elf_file);
+ for (atom.fdes(object), extras.fde_start..) |fde, i| {
+ try writer.print("{d}", .{i});
+ if (!fde.alive) try writer.writeAll("([*])");
+ if (i - extras.fde_start < extras.fde_count - 1) try writer.writeAll(", ");
+ }
+ try writer.writeAll(" }");
+ }
+ },
+ else => {},
+ };
if (!atom.alive) {
try writer.writeAll(" : [*]");
}
diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig
index 5aca15a205..d1ff168341 100644
--- a/src/link/Elf/Object.zig
+++ b/src/link/Elf/Object.zig
@@ -37,72 +37,84 @@ num_dynrelocs: u32 = 0,
output_symtab_ctx: Elf.SymtabCtx = .{},
output_ar_state: Archive.ArState = .{},
-pub fn deinit(self: *Object, allocator: Allocator) void {
- if (self.archive) |*ar| allocator.free(ar.path.sub_path);
- allocator.free(self.path.sub_path);
- self.shdrs.deinit(allocator);
- self.symtab.deinit(allocator);
- self.strtab.deinit(allocator);
- self.symbols.deinit(allocator);
- self.symbols_extra.deinit(allocator);
- self.symbols_resolver.deinit(allocator);
- self.atoms.deinit(allocator);
- self.atoms_indexes.deinit(allocator);
- self.atoms_extra.deinit(allocator);
- self.comdat_groups.deinit(allocator);
- self.comdat_group_data.deinit(allocator);
- self.relocs.deinit(allocator);
- self.fdes.deinit(allocator);
- self.cies.deinit(allocator);
- self.eh_frame_data.deinit(allocator);
+pub fn deinit(self: *Object, gpa: Allocator) void {
+ if (self.archive) |*ar| gpa.free(ar.path.sub_path);
+ gpa.free(self.path.sub_path);
+ self.shdrs.deinit(gpa);
+ self.symtab.deinit(gpa);
+ self.strtab.deinit(gpa);
+ self.symbols.deinit(gpa);
+ self.symbols_extra.deinit(gpa);
+ self.symbols_resolver.deinit(gpa);
+ self.atoms.deinit(gpa);
+ self.atoms_indexes.deinit(gpa);
+ self.atoms_extra.deinit(gpa);
+ self.comdat_groups.deinit(gpa);
+ self.comdat_group_data.deinit(gpa);
+ self.relocs.deinit(gpa);
+ self.fdes.deinit(gpa);
+ self.cies.deinit(gpa);
+ self.eh_frame_data.deinit(gpa);
for (self.input_merge_sections.items) |*isec| {
- isec.deinit(allocator);
+ isec.deinit(gpa);
}
- self.input_merge_sections.deinit(allocator);
- self.input_merge_sections_indexes.deinit(allocator);
+ self.input_merge_sections.deinit(gpa);
+ self.input_merge_sections_indexes.deinit(gpa);
}
-pub fn parse(self: *Object, elf_file: *Elf) !void {
- const gpa = elf_file.base.comp.gpa;
- const cpu_arch = elf_file.getTarget().cpu.arch;
- const handle = elf_file.fileHandle(self.file_handle);
-
- try self.parseCommon(gpa, handle, elf_file);
-
+pub fn parse(
+ self: *Object,
+ gpa: Allocator,
+ diags: *Diags,
+ /// For error reporting purposes only.
+ path: Path,
+ handle: fs.File,
+ target: std.Target,
+ debug_fmt_strip: bool,
+ default_sym_version: elf.Versym,
+) !void {
// Append null input merge section
try self.input_merge_sections.append(gpa, .{});
// Allocate atom index 0 to null atom
try self.atoms.append(gpa, .{ .extra_index = try self.addAtomExtra(gpa, .{}) });
- try self.initAtoms(gpa, handle, elf_file);
- try self.initSymbols(gpa, elf_file);
+ try self.initAtoms(gpa, diags, path, handle, debug_fmt_strip, target);
+ try self.initSymbols(gpa, default_sym_version);
for (self.shdrs.items, 0..) |shdr, i| {
const atom_ptr = self.atom(self.atoms_indexes.items[i]) orelse continue;
if (!atom_ptr.alive) continue;
- if ((cpu_arch == .x86_64 and shdr.sh_type == elf.SHT_X86_64_UNWIND) or
- mem.eql(u8, atom_ptr.name(elf_file), ".eh_frame"))
+ if ((target.cpu.arch == .x86_64 and shdr.sh_type == elf.SHT_X86_64_UNWIND) or
+ mem.eql(u8, self.getString(atom_ptr.name_offset), ".eh_frame"))
{
- try self.parseEhFrame(gpa, handle, @as(u32, @intCast(i)), elf_file);
+ try self.parseEhFrame(gpa, handle, @intCast(i), target);
}
}
}
-fn parseCommon(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file: *Elf) !void {
+pub fn parseCommon(
+ self: *Object,
+ gpa: Allocator,
+ diags: *Diags,
+ path: Path,
+ handle: fs.File,
+ target: std.Target,
+ first_eflags: *?elf.Word,
+) !void {
const offset = if (self.archive) |ar| ar.offset else 0;
const file_size = (try handle.stat()).size;
- const header_buffer = try Elf.preadAllAlloc(allocator, handle, offset, @sizeOf(elf.Elf64_Ehdr));
- defer allocator.free(header_buffer);
+ const header_buffer = try Elf.preadAllAlloc(gpa, handle, offset, @sizeOf(elf.Elf64_Ehdr));
+ defer gpa.free(header_buffer);
self.header = @as(*align(1) const elf.Elf64_Ehdr, @ptrCast(header_buffer)).*;
- const em = elf_file.base.comp.root_mod.resolved_target.result.toElfMachine();
+ const em = target.toElfMachine();
if (em != self.header.?.e_machine) {
- return elf_file.failFile(self.index, "invalid ELF machine type: {s}", .{
+ return diags.failParse(path, "invalid ELF machine type: {s}", .{
@tagName(self.header.?.e_machine),
});
}
- try elf_file.validateEFlags(self.index, self.header.?.e_flags);
+ try validateEFlags(diags, path, target, self.header.?.e_flags, first_eflags);
if (self.header.?.e_shnum == 0) return;
@@ -110,30 +122,30 @@ fn parseCommon(self: *Object, allocator: Allocator, handle: std.fs.File, elf_fil
const shnum = math.cast(usize, self.header.?.e_shnum) orelse return error.Overflow;
const shsize = shnum * @sizeOf(elf.Elf64_Shdr);
if (file_size < offset + shoff or file_size < offset + shoff + shsize) {
- return elf_file.failFile(self.index, "corrupt header: section header table extends past the end of file", .{});
+ return diags.failParse(path, "corrupt header: section header table extends past the end of file", .{});
}
- const shdrs_buffer = try Elf.preadAllAlloc(allocator, handle, offset + shoff, shsize);
- defer allocator.free(shdrs_buffer);
+ const shdrs_buffer = try Elf.preadAllAlloc(gpa, handle, offset + shoff, shsize);
+ defer gpa.free(shdrs_buffer);
const shdrs = @as([*]align(1) const elf.Elf64_Shdr, @ptrCast(shdrs_buffer.ptr))[0..shnum];
- try self.shdrs.appendUnalignedSlice(allocator, shdrs);
+ try self.shdrs.appendUnalignedSlice(gpa, shdrs);
for (self.shdrs.items) |shdr| {
if (shdr.sh_type != elf.SHT_NOBITS) {
if (file_size < offset + shdr.sh_offset or file_size < offset + shdr.sh_offset + shdr.sh_size) {
- return elf_file.failFile(self.index, "corrupt section: extends past the end of file", .{});
+ return diags.failParse(path, "corrupt section: extends past the end of file", .{});
}
}
}
- const shstrtab = try self.preadShdrContentsAlloc(allocator, handle, self.header.?.e_shstrndx);
- defer allocator.free(shstrtab);
+ const shstrtab = try self.preadShdrContentsAlloc(gpa, handle, self.header.?.e_shstrndx);
+ defer gpa.free(shstrtab);
for (self.shdrs.items) |shdr| {
if (shdr.sh_name >= shstrtab.len) {
- return elf_file.failFile(self.index, "corrupt section name offset", .{});
+ return diags.failParse(path, "corrupt section name offset", .{});
}
}
- try self.strtab.appendSlice(allocator, shstrtab);
+ try self.strtab.appendSlice(gpa, shstrtab);
const symtab_index = for (self.shdrs.items, 0..) |shdr, i| switch (shdr.sh_type) {
elf.SHT_SYMTAB => break @as(u32, @intCast(i)),
@@ -144,19 +156,19 @@ fn parseCommon(self: *Object, allocator: Allocator, handle: std.fs.File, elf_fil
const shdr = self.shdrs.items[index];
self.first_global = shdr.sh_info;
- const raw_symtab = try self.preadShdrContentsAlloc(allocator, handle, index);
- defer allocator.free(raw_symtab);
+ const raw_symtab = try self.preadShdrContentsAlloc(gpa, handle, index);
+ defer gpa.free(raw_symtab);
const nsyms = math.divExact(usize, raw_symtab.len, @sizeOf(elf.Elf64_Sym)) catch {
- return elf_file.failFile(self.index, "symbol table not evenly divisible", .{});
+ return diags.failParse(path, "symbol table not evenly divisible", .{});
};
const symtab = @as([*]align(1) const elf.Elf64_Sym, @ptrCast(raw_symtab.ptr))[0..nsyms];
const strtab_bias = @as(u32, @intCast(self.strtab.items.len));
- const strtab = try self.preadShdrContentsAlloc(allocator, handle, shdr.sh_link);
- defer allocator.free(strtab);
- try self.strtab.appendSlice(allocator, strtab);
+ const strtab = try self.preadShdrContentsAlloc(gpa, handle, shdr.sh_link);
+ defer gpa.free(strtab);
+ try self.strtab.appendSlice(gpa, strtab);
- try self.symtab.ensureUnusedCapacity(allocator, symtab.len);
+ try self.symtab.ensureUnusedCapacity(gpa, symtab.len);
for (symtab) |sym| {
const out_sym = self.symtab.addOneAssumeCapacity();
out_sym.* = sym;
@@ -168,15 +180,56 @@ fn parseCommon(self: *Object, allocator: Allocator, handle: std.fs.File, elf_fil
}
}
-fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file: *Elf) !void {
- const comp = elf_file.base.comp;
- const debug_fmt_strip = comp.config.debug_format == .strip;
- const target = comp.root_mod.resolved_target.result;
+fn validateEFlags(
+ diags: *Diags,
+ path: Path,
+ target: std.Target,
+ e_flags: elf.Word,
+ first_eflags: *?elf.Word,
+) error{LinkFailure}!void {
+ if (first_eflags.*) |*self_eflags| {
+ switch (target.cpu.arch) {
+ .riscv64 => {
+ if (e_flags != self_eflags.*) {
+ const riscv_eflags: riscv.RiscvEflags = @bitCast(e_flags);
+ const self_riscv_eflags: *riscv.RiscvEflags = @ptrCast(self_eflags);
+
+ self_riscv_eflags.rvc = self_riscv_eflags.rvc or riscv_eflags.rvc;
+ self_riscv_eflags.tso = self_riscv_eflags.tso or riscv_eflags.tso;
+
+ var any_errors: bool = false;
+ if (self_riscv_eflags.fabi != riscv_eflags.fabi) {
+ any_errors = true;
+ diags.addParseError(path, "cannot link object files with different float-point ABIs", .{});
+ }
+ if (self_riscv_eflags.rve != riscv_eflags.rve) {
+ any_errors = true;
+ diags.addParseError(path, "cannot link object files with different RVEs", .{});
+ }
+ if (any_errors) return error.LinkFailure;
+ }
+ },
+ else => {},
+ }
+ } else {
+ first_eflags.* = e_flags;
+ }
+}
+
+fn initAtoms(
+ self: *Object,
+ gpa: Allocator,
+ diags: *Diags,
+ path: Path,
+ handle: fs.File,
+ debug_fmt_strip: bool,
+ target: std.Target,
+) !void {
const shdrs = self.shdrs.items;
- try self.atoms.ensureTotalCapacityPrecise(allocator, shdrs.len);
- try self.atoms_extra.ensureTotalCapacityPrecise(allocator, shdrs.len * @sizeOf(Atom.Extra));
- try self.atoms_indexes.ensureTotalCapacityPrecise(allocator, shdrs.len);
- try self.atoms_indexes.resize(allocator, shdrs.len);
+ try self.atoms.ensureTotalCapacityPrecise(gpa, shdrs.len);
+ try self.atoms_extra.ensureTotalCapacityPrecise(gpa, shdrs.len * @sizeOf(Atom.Extra));
+ try self.atoms_indexes.ensureTotalCapacityPrecise(gpa, shdrs.len);
+ try self.atoms_indexes.resize(gpa, shdrs.len);
@memset(self.atoms_indexes.items, 0);
for (shdrs, 0..) |shdr, i| {
@@ -201,24 +254,24 @@ fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file:
};
const shndx: u32 = @intCast(i);
- const group_raw_data = try self.preadShdrContentsAlloc(allocator, handle, shndx);
- defer allocator.free(group_raw_data);
+ const group_raw_data = try self.preadShdrContentsAlloc(gpa, handle, shndx);
+ defer gpa.free(group_raw_data);
const group_nmembers = math.divExact(usize, group_raw_data.len, @sizeOf(u32)) catch {
- return elf_file.failFile(self.index, "corrupt section group: not evenly divisible ", .{});
+ return diags.failParse(path, "corrupt section group: not evenly divisible ", .{});
};
if (group_nmembers == 0) {
- return elf_file.failFile(self.index, "corrupt section group: empty section", .{});
+ return diags.failParse(path, "corrupt section group: empty section", .{});
}
const group_members = @as([*]align(1) const u32, @ptrCast(group_raw_data.ptr))[0..group_nmembers];
if (group_members[0] != elf.GRP_COMDAT) {
- return elf_file.failFile(self.index, "corrupt section group: unknown SHT_GROUP format", .{});
+ return diags.failParse(path, "corrupt section group: unknown SHT_GROUP format", .{});
}
const group_start: u32 = @intCast(self.comdat_group_data.items.len);
- try self.comdat_group_data.appendUnalignedSlice(allocator, group_members[1..]);
+ try self.comdat_group_data.appendUnalignedSlice(gpa, group_members[1..]);
- const comdat_group_index = try self.addComdatGroup(allocator);
+ const comdat_group_index = try self.addComdatGroup(gpa);
const comdat_group = self.comdatGroup(comdat_group_index);
comdat_group.* = .{
.signature_off = group_signature,
@@ -242,8 +295,8 @@ fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file:
const shndx: u32 = @intCast(i);
if (self.skipShdr(shndx, debug_fmt_strip)) continue;
const size, const alignment = if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) blk: {
- const data = try self.preadShdrContentsAlloc(allocator, handle, shndx);
- defer allocator.free(data);
+ const data = try self.preadShdrContentsAlloc(gpa, handle, shndx);
+ defer gpa.free(data);
const chdr = @as(*align(1) const elf.Elf64_Chdr, @ptrCast(data.ptr)).*;
break :blk .{ chdr.ch_size, Alignment.fromNonzeroByteUnits(chdr.ch_addralign) };
} else .{ shdr.sh_size, Alignment.fromNonzeroByteUnits(shdr.sh_addralign) };
@@ -263,13 +316,13 @@ fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file:
elf.SHT_REL, elf.SHT_RELA => {
const atom_index = self.atoms_indexes.items[shdr.sh_info];
if (self.atom(atom_index)) |atom_ptr| {
- const relocs = try self.preadRelocsAlloc(allocator, handle, @intCast(i));
- defer allocator.free(relocs);
+ const relocs = try self.preadRelocsAlloc(gpa, handle, @intCast(i));
+ defer gpa.free(relocs);
atom_ptr.relocs_section_index = @intCast(i);
const rel_index: u32 = @intCast(self.relocs.items.len);
const rel_count: u32 = @intCast(relocs.len);
self.setAtomFields(atom_ptr, .{ .rel_index = rel_index, .rel_count = rel_count });
- try self.relocs.appendUnalignedSlice(allocator, relocs);
+ try self.relocs.appendUnalignedSlice(gpa, relocs);
if (target.cpu.arch == .riscv64) {
sortRelocs(self.relocs.items[rel_index..][0..rel_count]);
}
@@ -293,14 +346,18 @@ fn skipShdr(self: *Object, index: u32, debug_fmt_strip: bool) bool {
return ignore;
}
-fn initSymbols(self: *Object, allocator: Allocator, elf_file: *Elf) !void {
+fn initSymbols(
+ self: *Object,
+ gpa: Allocator,
+ default_sym_version: elf.Versym,
+) !void {
const first_global = self.first_global orelse self.symtab.items.len;
const nglobals = self.symtab.items.len - first_global;
- try self.symbols.ensureTotalCapacityPrecise(allocator, self.symtab.items.len);
- try self.symbols_extra.ensureTotalCapacityPrecise(allocator, self.symtab.items.len * @sizeOf(Symbol.Extra));
- try self.symbols_resolver.ensureTotalCapacityPrecise(allocator, nglobals);
- self.symbols_resolver.resize(allocator, nglobals) catch unreachable;
+ try self.symbols.ensureTotalCapacityPrecise(gpa, self.symtab.items.len);
+ try self.symbols_extra.ensureTotalCapacityPrecise(gpa, self.symtab.items.len * @sizeOf(Symbol.Extra));
+ try self.symbols_resolver.ensureTotalCapacityPrecise(gpa, nglobals);
+ self.symbols_resolver.resize(gpa, nglobals) catch unreachable;
@memset(self.symbols_resolver.items, 0);
for (self.symtab.items, 0..) |sym, i| {
@@ -310,7 +367,7 @@ fn initSymbols(self: *Object, allocator: Allocator, elf_file: *Elf) !void {
sym_ptr.name_offset = sym.st_name;
sym_ptr.esym_index = @intCast(i);
sym_ptr.extra_index = self.addSymbolExtraAssumeCapacity(.{});
- sym_ptr.version_index = if (i >= first_global) elf_file.default_sym_version else .LOCAL;
+ sym_ptr.version_index = if (i >= first_global) default_sym_version else .LOCAL;
sym_ptr.flags.weak = sym.st_bind() == elf.STB_WEAK;
if (sym.st_shndx != elf.SHN_ABS and sym.st_shndx != elf.SHN_COMMON) {
sym_ptr.ref = .{ .index = self.atoms_indexes.items[sym.st_shndx], .file = self.index };
@@ -318,24 +375,30 @@ fn initSymbols(self: *Object, allocator: Allocator, elf_file: *Elf) !void {
}
}
-fn parseEhFrame(self: *Object, allocator: Allocator, handle: std.fs.File, shndx: u32, elf_file: *Elf) !void {
+fn parseEhFrame(
+ self: *Object,
+ gpa: Allocator,
+ handle: fs.File,
+ shndx: u32,
+ target: std.Target,
+) !void {
const relocs_shndx = for (self.shdrs.items, 0..) |shdr, i| switch (shdr.sh_type) {
elf.SHT_RELA => if (shdr.sh_info == shndx) break @as(u32, @intCast(i)),
else => {},
} else null;
- const raw = try self.preadShdrContentsAlloc(allocator, handle, shndx);
- defer allocator.free(raw);
- const data_start = @as(u32, @intCast(self.eh_frame_data.items.len));
- try self.eh_frame_data.appendSlice(allocator, raw);
+ const raw = try self.preadShdrContentsAlloc(gpa, handle, shndx);
+ defer gpa.free(raw);
+ const data_start: u32 = @intCast(self.eh_frame_data.items.len);
+ try self.eh_frame_data.appendSlice(gpa, raw);
const relocs = if (relocs_shndx) |index|
- try self.preadRelocsAlloc(allocator, handle, index)
+ try self.preadRelocsAlloc(gpa, handle, index)
else
&[0]elf.Elf64_Rela{};
- defer allocator.free(relocs);
- const rel_start = @as(u32, @intCast(self.relocs.items.len));
- try self.relocs.appendUnalignedSlice(allocator, relocs);
- if (elf_file.getTarget().cpu.arch == .riscv64) {
+ defer gpa.free(relocs);
+ const rel_start: u32 = @intCast(self.relocs.items.len);
+ try self.relocs.appendUnalignedSlice(gpa, relocs);
+ if (target.cpu.arch == .riscv64) {
sortRelocs(self.relocs.items[rel_start..][0..relocs.len]);
}
const fdes_start = self.fdes.items.len;
@@ -345,11 +408,11 @@ fn parseEhFrame(self: *Object, allocator: Allocator, handle: std.fs.File, shndx:
while (try it.next()) |rec| {
const rel_range = filterRelocs(self.relocs.items[rel_start..][0..relocs.len], rec.offset, rec.size + 4);
switch (rec.tag) {
- .cie => try self.cies.append(allocator, .{
+ .cie => try self.cies.append(gpa, .{
.offset = data_start + rec.offset,
.size = rec.size,
.rel_index = rel_start + @as(u32, @intCast(rel_range.start)),
- .rel_num = @as(u32, @intCast(rel_range.len)),
+ .rel_num = @intCast(rel_range.len),
.input_section_index = shndx,
.file_index = self.index,
}),
@@ -361,12 +424,12 @@ fn parseEhFrame(self: *Object, allocator: Allocator, handle: std.fs.File, shndx:
// this can happen for object files built with -r flag by the linker.
continue;
}
- try self.fdes.append(allocator, .{
+ try self.fdes.append(gpa, .{
.offset = data_start + rec.offset,
.size = rec.size,
.cie_index = undefined,
.rel_index = rel_start + @as(u32, @intCast(rel_range.start)),
- .rel_num = @as(u32, @intCast(rel_range.len)),
+ .rel_num = @intCast(rel_range.len),
.input_section_index = shndx,
.file_index = self.index,
});
@@ -376,7 +439,7 @@ fn parseEhFrame(self: *Object, allocator: Allocator, handle: std.fs.File, shndx:
// Tie each FDE to its CIE
for (self.fdes.items[fdes_start..]) |*fde| {
- const cie_ptr = fde.offset + 4 - fde.ciePointer(elf_file);
+ const cie_ptr = fde.offset + 4 - fde.ciePointer(self);
const cie_index = for (self.cies.items[cies_start..], cies_start..) |cie, cie_index| {
if (cie.offset == cie_ptr) break @as(u32, @intCast(cie_index));
} else {
@@ -392,26 +455,26 @@ fn parseEhFrame(self: *Object, allocator: Allocator, handle: std.fs.File, shndx:
// Tie each FDE record to its matching atom
const SortFdes = struct {
- pub fn lessThan(ctx: *Elf, lhs: Fde, rhs: Fde) bool {
+ pub fn lessThan(ctx: *Object, lhs: Fde, rhs: Fde) bool {
const lhs_atom = lhs.atom(ctx);
const rhs_atom = rhs.atom(ctx);
- return lhs_atom.priority(ctx) < rhs_atom.priority(ctx);
+ return Atom.priorityLookup(ctx.index, lhs_atom.input_section_index) < Atom.priorityLookup(ctx.index, rhs_atom.input_section_index);
}
};
- mem.sort(Fde, self.fdes.items[fdes_start..], elf_file, SortFdes.lessThan);
+ mem.sort(Fde, self.fdes.items[fdes_start..], self, SortFdes.lessThan);
// Create a back-link from atom to FDEs
- var i: u32 = @as(u32, @intCast(fdes_start));
+ var i: u32 = @intCast(fdes_start);
while (i < self.fdes.items.len) {
const fde = self.fdes.items[i];
- const atom_ptr = fde.atom(elf_file);
+ const atom_ptr = fde.atom(self);
const start = i;
i += 1;
while (i < self.fdes.items.len) : (i += 1) {
const next_fde = self.fdes.items[i];
- if (atom_ptr.atom_index != next_fde.atom(elf_file).atom_index) break;
+ if (atom_ptr.atom_index != next_fde.atom(self).atom_index) break;
}
- atom_ptr.addExtra(.{ .fde_start = start, .fde_count = i - start }, elf_file);
+ self.setAtomFields(atom_ptr, .{ .fde_start = start, .fde_count = i - start });
}
}
@@ -904,7 +967,7 @@ pub fn markComdatGroupsDead(self: *Object, elf_file: *Elf) void {
const atom_index = self.atoms_indexes.items[shndx];
if (self.atom(atom_index)) |atom_ptr| {
atom_ptr.alive = false;
- atom_ptr.markFdesDead(elf_file);
+ atom_ptr.markFdesDead(self);
}
}
}
@@ -970,12 +1033,6 @@ pub fn addAtomsToRelaSections(self: *Object, elf_file: *Elf) !void {
}
}
-pub fn parseAr(self: *Object, elf_file: *Elf) !void {
- const gpa = elf_file.base.comp.gpa;
- const handle = elf_file.fileHandle(self.file_handle);
- try self.parseCommon(gpa, handle, elf_file);
-}
-
pub fn updateArSymtab(self: Object, ar_symtab: *Archive.ArSymtab, elf_file: *Elf) !void {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
@@ -1000,7 +1057,7 @@ pub fn updateArSize(self: *Object, elf_file: *Elf) !void {
pub fn writeAr(self: Object, elf_file: *Elf, writer: anytype) !void {
const size = std.math.cast(usize, self.output_ar_state.size) orelse return error.Overflow;
const offset: u64 = if (self.archive) |ar| ar.offset else 0;
- const name = std.fs.path.basename(self.path.sub_path);
+ const name = fs.path.basename(self.path.sub_path);
const hdr = Archive.setArHdr(.{
.name = if (name.len <= Archive.max_member_name_len)
.{ .name = name }
@@ -1136,8 +1193,8 @@ pub fn resolveSymbol(self: Object, index: Symbol.Index, elf_file: *Elf) Elf.Ref
return elf_file.resolver.get(resolv).?;
}
-fn addSymbol(self: *Object, allocator: Allocator) !Symbol.Index {
- try self.symbols.ensureUnusedCapacity(allocator, 1);
+fn addSymbol(self: *Object, gpa: Allocator) !Symbol.Index {
+ try self.symbols.ensureUnusedCapacity(gpa, 1);
return self.addSymbolAssumeCapacity();
}
@@ -1147,9 +1204,9 @@ fn addSymbolAssumeCapacity(self: *Object) Symbol.Index {
return index;
}
-pub fn addSymbolExtra(self: *Object, allocator: Allocator, extra: Symbol.Extra) !u32 {
+pub fn addSymbolExtra(self: *Object, gpa: Allocator, extra: Symbol.Extra) !u32 {
const fields = @typeInfo(Symbol.Extra).@"struct".fields;
- try self.symbols_extra.ensureUnusedCapacity(allocator, fields.len);
+ try self.symbols_extra.ensureUnusedCapacity(gpa, fields.len);
return self.addSymbolExtraAssumeCapacity(extra);
}
@@ -1198,27 +1255,27 @@ pub fn getString(self: Object, off: u32) [:0]const u8 {
return mem.sliceTo(@as([*:0]const u8, @ptrCast(self.strtab.items.ptr + off)), 0);
}
-fn addString(self: *Object, allocator: Allocator, str: []const u8) !u32 {
+fn addString(self: *Object, gpa: Allocator, str: []const u8) !u32 {
const off: u32 = @intCast(self.strtab.items.len);
- try self.strtab.ensureUnusedCapacity(allocator, str.len + 1);
+ try self.strtab.ensureUnusedCapacity(gpa, str.len + 1);
self.strtab.appendSliceAssumeCapacity(str);
self.strtab.appendAssumeCapacity(0);
return off;
}
/// Caller owns the memory.
-fn preadShdrContentsAlloc(self: Object, allocator: Allocator, handle: std.fs.File, index: u32) ![]u8 {
+fn preadShdrContentsAlloc(self: Object, gpa: Allocator, handle: fs.File, index: u32) ![]u8 {
assert(index < self.shdrs.items.len);
const offset = if (self.archive) |ar| ar.offset else 0;
const shdr = self.shdrs.items[index];
const sh_offset = math.cast(u64, shdr.sh_offset) orelse return error.Overflow;
const sh_size = math.cast(u64, shdr.sh_size) orelse return error.Overflow;
- return Elf.preadAllAlloc(allocator, handle, offset + sh_offset, sh_size);
+ return Elf.preadAllAlloc(gpa, handle, offset + sh_offset, sh_size);
}
/// Caller owns the memory.
-fn preadRelocsAlloc(self: Object, allocator: Allocator, handle: std.fs.File, shndx: u32) ![]align(1) const elf.Elf64_Rela {
- const raw = try self.preadShdrContentsAlloc(allocator, handle, shndx);
+fn preadRelocsAlloc(self: Object, gpa: Allocator, handle: fs.File, shndx: u32) ![]align(1) const elf.Elf64_Rela {
+ const raw = try self.preadShdrContentsAlloc(gpa, handle, shndx);
const num = @divExact(raw.len, @sizeOf(elf.Elf64_Rela));
return @as([*]align(1) const elf.Elf64_Rela, @ptrCast(raw.ptr))[0..num];
}
@@ -1230,9 +1287,9 @@ const AddAtomArgs = struct {
alignment: Alignment,
};
-fn addAtom(self: *Object, allocator: Allocator, args: AddAtomArgs) !Atom.Index {
- try self.atoms.ensureUnusedCapacity(allocator, 1);
- try self.atoms_extra.ensureUnusedCapacity(allocator, @sizeOf(Atom.Extra));
+fn addAtom(self: *Object, gpa: Allocator, args: AddAtomArgs) !Atom.Index {
+ try self.atoms.ensureUnusedCapacity(gpa, 1);
+ try self.atoms_extra.ensureUnusedCapacity(gpa, @sizeOf(Atom.Extra));
return self.addAtomAssumeCapacity(args);
}
@@ -1257,9 +1314,9 @@ pub fn atom(self: *Object, atom_index: Atom.Index) ?*Atom {
return &self.atoms.items[atom_index];
}
-pub fn addAtomExtra(self: *Object, allocator: Allocator, extra: Atom.Extra) !u32 {
+pub fn addAtomExtra(self: *Object, gpa: Allocator, extra: Atom.Extra) !u32 {
const fields = @typeInfo(Atom.Extra).@"struct".fields;
- try self.atoms_extra.ensureUnusedCapacity(allocator, fields.len);
+ try self.atoms_extra.ensureUnusedCapacity(gpa, fields.len);
return self.addAtomExtraAssumeCapacity(extra);
}
@@ -1308,9 +1365,9 @@ fn setAtomFields(o: *Object, atom_ptr: *Atom, opts: Atom.Extra.AsOptionals) void
o.setAtomExtra(atom_ptr.extra_index, extras);
}
-fn addInputMergeSection(self: *Object, allocator: Allocator) !Merge.InputSection.Index {
+fn addInputMergeSection(self: *Object, gpa: Allocator) !Merge.InputSection.Index {
const index: Merge.InputSection.Index = @intCast(self.input_merge_sections.items.len);
- const msec = try self.input_merge_sections.addOne(allocator);
+ const msec = try self.input_merge_sections.addOne(gpa);
msec.* = .{};
return index;
}
@@ -1320,9 +1377,9 @@ fn inputMergeSection(self: *Object, index: Merge.InputSection.Index) ?*Merge.Inp
return &self.input_merge_sections.items[index];
}
-fn addComdatGroup(self: *Object, allocator: Allocator) !Elf.ComdatGroup.Index {
+fn addComdatGroup(self: *Object, gpa: Allocator) !Elf.ComdatGroup.Index {
const index = @as(Elf.ComdatGroup.Index, @intCast(self.comdat_groups.items.len));
- _ = try self.comdat_groups.addOne(allocator);
+ _ = try self.comdat_groups.addOne(gpa);
return index;
}
@@ -1516,8 +1573,9 @@ const log = std.log.scoped(.link);
const math = std.math;
const mem = std.mem;
const Path = std.Build.Cache.Path;
-const Allocator = mem.Allocator;
+const Allocator = std.mem.Allocator;
+const Diags = @import("../../link.zig").Diags;
const Archive = @import("Archive.zig");
const Atom = @import("Atom.zig");
const AtomList = @import("AtomList.zig");
@@ -1528,3 +1586,4 @@ const File = @import("file.zig").File;
const Merge = @import("Merge.zig");
const Symbol = @import("Symbol.zig");
const Alignment = Atom.Alignment;
+const riscv = @import("../riscv.zig");
diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig
index 893d3ce336..114a862fb6 100644
--- a/src/link/Elf/ZigObject.zig
+++ b/src/link/Elf/ZigObject.zig
@@ -928,7 +928,7 @@ pub fn getNavVAddr(
nav.name.toSlice(ip),
@"extern".lib_name.toSlice(ip),
),
- else => try self.getOrCreateMetadataForNav(elf_file, nav_index),
+ else => try self.getOrCreateMetadataForNav(zcu, nav_index),
};
const this_sym = self.symbol(this_sym_index);
const vaddr = this_sym.address(.{}, elf_file);
@@ -1102,21 +1102,15 @@ pub fn freeNav(self: *ZigObject, elf_file: *Elf, nav_index: InternPool.Nav.Index
}
}
-pub fn getOrCreateMetadataForNav(
- self: *ZigObject,
- elf_file: *Elf,
- nav_index: InternPool.Nav.Index,
-) !Symbol.Index {
- const gpa = elf_file.base.comp.gpa;
+pub fn getOrCreateMetadataForNav(self: *ZigObject, zcu: *Zcu, nav_index: InternPool.Nav.Index) !Symbol.Index {
+ const gpa = zcu.gpa;
const gop = try self.navs.getOrPut(gpa, nav_index);
if (!gop.found_existing) {
- const any_non_single_threaded = elf_file.base.comp.config.any_non_single_threaded;
const symbol_index = try self.newSymbolWithAtom(gpa, 0);
- const zcu = elf_file.base.comp.zcu.?;
const nav_val = Value.fromInterned(zcu.intern_pool.getNav(nav_index).status.resolved.val);
const sym = self.symbol(symbol_index);
if (nav_val.getVariable(zcu)) |variable| {
- if (variable.is_threadlocal and any_non_single_threaded) {
+ if (variable.is_threadlocal and zcu.comp.config.any_non_single_threaded) {
sym.flags.is_tls = true;
}
}
@@ -1425,8 +1419,8 @@ pub fn updateFunc(
log.debug("updateFunc {}({d})", .{ ip.getNav(func.owner_nav).fqn.fmt(ip), func.owner_nav });
- const sym_index = try self.getOrCreateMetadataForNav(elf_file, func.owner_nav);
- self.symbol(sym_index).atom(elf_file).?.freeRelocs(self);
+ const sym_index = try self.getOrCreateMetadataForNav(zcu, func.owner_nav);
+ self.atom(self.symbol(sym_index).ref.index).?.freeRelocs(self);
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
@@ -1460,12 +1454,12 @@ pub fn updateFunc(
ip.getNav(func.owner_nav).fqn.fmt(ip),
});
const old_rva, const old_alignment = blk: {
- const atom_ptr = self.symbol(sym_index).atom(elf_file).?;
+ const atom_ptr = self.atom(self.symbol(sym_index).ref.index).?;
break :blk .{ atom_ptr.value, atom_ptr.alignment };
};
try self.updateNavCode(elf_file, pt, func.owner_nav, sym_index, shndx, code, elf.STT_FUNC);
const new_rva, const new_alignment = blk: {
- const atom_ptr = self.symbol(sym_index).atom(elf_file).?;
+ const atom_ptr = self.atom(self.symbol(sym_index).ref.index).?;
break :blk .{ atom_ptr.value, atom_ptr.alignment };
};
@@ -1477,7 +1471,7 @@ pub fn updateFunc(
.{
.index = sym_index,
.addr = @intCast(sym.address(.{}, elf_file)),
- .size = sym.atom(elf_file).?.size,
+ .size = self.atom(sym.ref.index).?.size,
},
wip_nav,
);
@@ -1500,7 +1494,7 @@ pub fn updateFunc(
});
defer gpa.free(name);
const osec = if (self.text_index) |sect_sym_index|
- self.symbol(sect_sym_index).atom(elf_file).?.output_section_index
+ self.atom(self.symbol(sect_sym_index).ref.index).?.output_section_index
else osec: {
const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".text"),
@@ -1565,7 +1559,7 @@ pub fn updateNav(
};
if (nav_init != .none and Value.fromInterned(nav_init).typeOf(zcu).hasRuntimeBits(zcu)) {
- const sym_index = try self.getOrCreateMetadataForNav(elf_file, nav_index);
+ const sym_index = try self.getOrCreateMetadataForNav(zcu, nav_index);
self.symbol(sym_index).atom(elf_file).?.freeRelocs(self);
var code_buffer = std.ArrayList(u8).init(zcu.gpa);
@@ -1789,7 +1783,7 @@ pub fn updateExports(
const gpa = elf_file.base.comp.gpa;
const metadata = switch (exported) {
.nav => |nav| blk: {
- _ = try self.getOrCreateMetadataForNav(elf_file, nav);
+ _ = try self.getOrCreateMetadataForNav(zcu, nav);
break :blk self.navs.getPtr(nav).?;
},
.uav => |uav| self.uavs.getPtr(uav) orelse blk: {
diff --git a/src/link/Elf/eh_frame.zig b/src/link/Elf/eh_frame.zig
index 81913cb33c..1af1236526 100644
--- a/src/link/Elf/eh_frame.zig
+++ b/src/link/Elf/eh_frame.zig
@@ -19,18 +19,16 @@ pub const Fde = struct {
return base + fde.out_offset;
}
- pub fn data(fde: Fde, elf_file: *Elf) []u8 {
- const object = elf_file.file(fde.file_index).?.object;
+ pub fn data(fde: Fde, object: *Object) []u8 {
return object.eh_frame_data.items[fde.offset..][0..fde.calcSize()];
}
- pub fn cie(fde: Fde, elf_file: *Elf) Cie {
- const object = elf_file.file(fde.file_index).?.object;
+ pub fn cie(fde: Fde, object: *Object) Cie {
return object.cies.items[fde.cie_index];
}
- pub fn ciePointer(fde: Fde, elf_file: *Elf) u32 {
- const fde_data = fde.data(elf_file);
+ pub fn ciePointer(fde: Fde, object: *Object) u32 {
+ const fde_data = fde.data(object);
return std.mem.readInt(u32, fde_data[4..8], .little);
}
@@ -38,16 +36,14 @@ pub const Fde = struct {
return fde.size + 4;
}
- pub fn atom(fde: Fde, elf_file: *Elf) *Atom {
- const object = elf_file.file(fde.file_index).?.object;
- const rel = fde.relocs(elf_file)[0];
+ pub fn atom(fde: Fde, object: *Object) *Atom {
+ const rel = fde.relocs(object)[0];
const sym = object.symtab.items[rel.r_sym()];
const atom_index = object.atoms_indexes.items[sym.st_shndx];
return object.atom(atom_index).?;
}
- pub fn relocs(fde: Fde, elf_file: *Elf) []align(1) const elf.Elf64_Rela {
- const object = elf_file.file(fde.file_index).?.object;
+ pub fn relocs(fde: Fde, object: *Object) []const elf.Elf64_Rela {
return object.relocs.items[fde.rel_index..][0..fde.rel_num];
}
@@ -87,7 +83,8 @@ pub const Fde = struct {
const fde = ctx.fde;
const elf_file = ctx.elf_file;
const base_addr = fde.address(elf_file);
- const atom_name = fde.atom(elf_file).name(elf_file);
+ const object = elf_file.file(fde.file_index).?.object;
+ const atom_name = fde.atom(object).name(elf_file);
try writer.print("@{x} : size({x}) : cie({d}) : {s}", .{
base_addr + fde.out_offset,
fde.calcSize(),
@@ -306,7 +303,7 @@ pub fn calcEhFrameRelocs(elf_file: *Elf) usize {
}
for (object.fdes.items) |fde| {
if (!fde.alive) continue;
- count += fde.relocs(elf_file).len;
+ count += fde.relocs(object).len;
}
}
return count;
@@ -369,16 +366,16 @@ pub fn writeEhFrame(elf_file: *Elf, writer: anytype) !void {
for (object.fdes.items) |fde| {
if (!fde.alive) continue;
- const contents = fde.data(elf_file);
+ const contents = fde.data(object);
std.mem.writeInt(
i32,
contents[4..8],
- @truncate(@as(i64, @intCast(fde.out_offset + 4)) - @as(i64, @intCast(fde.cie(elf_file).out_offset))),
+ @truncate(@as(i64, @intCast(fde.out_offset + 4)) - @as(i64, @intCast(fde.cie(object).out_offset))),
.little,
);
- for (fde.relocs(elf_file)) |rel| {
+ for (fde.relocs(object)) |rel| {
const ref = object.resolveSymbol(rel.r_sym(), elf_file);
const sym = elf_file.symbol(ref).?;
resolveReloc(fde, sym, rel, elf_file, contents) catch |err| switch (err) {
@@ -412,12 +409,12 @@ pub fn writeEhFrameRelocatable(elf_file: *Elf, writer: anytype) !void {
for (object.fdes.items) |fde| {
if (!fde.alive) continue;
- const contents = fde.data(elf_file);
+ const contents = fde.data(object);
std.mem.writeInt(
i32,
contents[4..8],
- @truncate(@as(i64, @intCast(fde.out_offset + 4)) - @as(i64, @intCast(fde.cie(elf_file).out_offset))),
+ @truncate(@as(i64, @intCast(fde.out_offset + 4)) - @as(i64, @intCast(fde.cie(object).out_offset))),
.little,
);
@@ -490,7 +487,7 @@ pub fn writeEhFrameRelocs(elf_file: *Elf, writer: anytype) !void {
for (object.fdes.items) |fde| {
if (!fde.alive) continue;
- for (fde.relocs(elf_file)) |rel| {
+ for (fde.relocs(object)) |rel| {
const ref = object.resolveSymbol(rel.r_sym(), elf_file);
const sym = elf_file.symbol(ref).?;
const r_offset = fde.address(elf_file) + rel.r_offset - fde.offset;
@@ -548,7 +545,7 @@ pub fn writeEhFrameHdr(elf_file: *Elf, writer: anytype) !void {
for (object.fdes.items) |fde| {
if (!fde.alive) continue;
- const relocs = fde.relocs(elf_file);
+ const relocs = fde.relocs(object);
assert(relocs.len > 0); // Should this be an error? Things are completely broken anyhow if this trips...
const rel = relocs[0];
const ref = object.resolveSymbol(rel.r_sym(), elf_file);
diff --git a/src/link/Elf/file.zig b/src/link/Elf/file.zig
index 6eb4c2201f..e560644329 100644
--- a/src/link/Elf/file.zig
+++ b/src/link/Elf/file.zig
@@ -279,8 +279,8 @@ pub const File = union(enum) {
pub const Index = u32;
pub const Entry = union(enum) {
- null: void,
- zig_object: ZigObject,
+ null,
+ zig_object,
linker_defined: LinkerDefined,
object: Object,
shared_object: SharedObject,
diff --git a/src/link/Elf/gc.zig b/src/link/Elf/gc.zig
index e0680d5db6..ff6c0bb7ce 100644
--- a/src/link/Elf/gc.zig
+++ b/src/link/Elf/gc.zig
@@ -103,15 +103,20 @@ fn markLive(atom: *Atom, elf_file: *Elf) void {
assert(atom.visited);
const file = atom.file(elf_file).?;
- for (atom.fdes(elf_file)) |fde| {
- for (fde.relocs(elf_file)[1..]) |rel| {
- const ref = file.resolveSymbol(rel.r_sym(), elf_file);
- const target_sym = elf_file.symbol(ref) orelse continue;
- const target_atom = target_sym.atom(elf_file) orelse continue;
- target_atom.alive = true;
- gc_track_live_log.debug("{}marking live atom({d})", .{ track_live_level, target_atom.atom_index });
- if (markAtom(target_atom)) markLive(target_atom, elf_file);
- }
+ switch (file) {
+ .object => |object| {
+ for (atom.fdes(object)) |fde| {
+ for (fde.relocs(object)[1..]) |rel| {
+ const ref = file.resolveSymbol(rel.r_sym(), elf_file);
+ const target_sym = elf_file.symbol(ref) orelse continue;
+ const target_atom = target_sym.atom(elf_file) orelse continue;
+ target_atom.alive = true;
+ gc_track_live_log.debug("{}marking live atom({d})", .{ track_live_level, target_atom.atom_index });
+ if (markAtom(target_atom)) markLive(target_atom, elf_file);
+ }
+ }
+ },
+ else => {},
}
for (atom.relocs(elf_file)) |rel| {
@@ -135,23 +140,25 @@ fn mark(roots: std.ArrayList(*Atom), elf_file: *Elf) void {
}
}
-fn prune(elf_file: *Elf) void {
- const pruneInFile = struct {
- fn pruneInFile(file: File, ef: *Elf) void {
- for (file.atoms()) |atom_index| {
- const atom = file.atom(atom_index) orelse continue;
- if (atom.alive and !atom.visited) {
- atom.alive = false;
- atom.markFdesDead(ef);
- }
+fn pruneInFile(file: File) void {
+ for (file.atoms()) |atom_index| {
+ const atom = file.atom(atom_index) orelse continue;
+ if (atom.alive and !atom.visited) {
+ atom.alive = false;
+ switch (file) {
+ .object => |object| atom.markFdesDead(object),
+ else => {},
}
}
- }.pruneInFile;
+ }
+}
+
+fn prune(elf_file: *Elf) void {
if (elf_file.zigObjectPtr()) |zo| {
- pruneInFile(zo.asFile(), elf_file);
+ pruneInFile(zo.asFile());
}
for (elf_file.objects.items) |index| {
- pruneInFile(elf_file.file(index).?, elf_file);
+ pruneInFile(elf_file.file(index).?);
}
}
diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig
index c88e95fec0..b494360446 100644
--- a/src/link/Elf/relocatable.zig
+++ b/src/link/Elf/relocatable.zig
@@ -1,27 +1,7 @@
-pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation, module_obj_path: ?Path) link.File.FlushError!void {
+pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) link.File.FlushError!void {
const gpa = comp.gpa;
const diags = &comp.link_diags;
- for (comp.objects) |obj| {
- switch (Compilation.classifyFileExt(obj.path.sub_path)) {
- .object => parseObjectStaticLibReportingFailure(elf_file, obj.path),
- .static_library => parseArchiveStaticLibReportingFailure(elf_file, obj.path),
- else => diags.addParseError(obj.path, "unrecognized file extension", .{}),
- }
- }
-
- for (comp.c_object_table.keys()) |key| {
- parseObjectStaticLibReportingFailure(elf_file, key.status.success.object_path);
- }
-
- if (module_obj_path) |path| {
- parseObjectStaticLibReportingFailure(elf_file, path);
- }
-
- if (comp.include_compiler_rt) {
- parseObjectStaticLibReportingFailure(elf_file, comp.compiler_rt_obj.?.full_object_path);
- }
-
if (diags.hasErrors()) return error.FlushFailure;
// First, we flush relocatable object file generated with our backends.
@@ -150,22 +130,9 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation, module_obj_path: ?Path
if (diags.hasErrors()) return error.FlushFailure;
}
-pub fn flushObject(elf_file: *Elf, comp: *Compilation, module_obj_path: ?Path) link.File.FlushError!void {
+pub fn flushObject(elf_file: *Elf, comp: *Compilation) link.File.FlushError!void {
const diags = &comp.link_diags;
- for (comp.objects) |obj| {
- elf_file.parseInputReportingFailure(obj.path, false, obj.must_link);
- }
-
- // This is a set of object files emitted by clang in a single `build-exe` invocation.
- // For instance, the implicit `a.o` as compiled by `zig build-exe a.c` will end up
- // in this set.
- for (comp.c_object_table.keys()) |key| {
- elf_file.parseObjectReportingFailure(key.status.success.object_path);
- }
-
- if (module_obj_path) |path| elf_file.parseObjectReportingFailure(path);
-
if (diags.hasErrors()) return error.FlushFailure;
// Now, we are ready to resolve the symbols across all input files.
@@ -215,64 +182,6 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation, module_obj_path: ?Path) l
if (diags.hasErrors()) return error.FlushFailure;
}
-fn parseObjectStaticLibReportingFailure(elf_file: *Elf, path: Path) void {
- const diags = &elf_file.base.comp.link_diags;
- parseObjectStaticLib(elf_file, path) catch |err| switch (err) {
- error.LinkFailure => return,
- else => |e| diags.addParseError(path, "parsing object failed: {s}", .{@errorName(e)}),
- };
-}
-
-fn parseArchiveStaticLibReportingFailure(elf_file: *Elf, path: Path) void {
- const diags = &elf_file.base.comp.link_diags;
- parseArchiveStaticLib(elf_file, path) catch |err| switch (err) {
- error.LinkFailure => return,
- else => |e| diags.addParseError(path, "parsing static library failed: {s}", .{@errorName(e)}),
- };
-}
-
-fn parseObjectStaticLib(elf_file: *Elf, path: Path) Elf.ParseError!void {
- const gpa = elf_file.base.comp.gpa;
- const handle = try path.root_dir.handle.openFile(path.sub_path, .{});
- const fh = try elf_file.addFileHandle(handle);
-
- const index: File.Index = @intCast(try elf_file.files.addOne(gpa));
- elf_file.files.set(index, .{ .object = .{
- .path = .{
- .root_dir = path.root_dir,
- .sub_path = try gpa.dupe(u8, path.sub_path),
- },
- .file_handle = fh,
- .index = index,
- } });
- try elf_file.objects.append(gpa, index);
-
- const object = elf_file.file(index).?.object;
- try object.parseAr(elf_file);
-}
-
-fn parseArchiveStaticLib(elf_file: *Elf, path: Path) Elf.ParseError!void {
- const gpa = elf_file.base.comp.gpa;
- const handle = try path.root_dir.handle.openFile(path.sub_path, .{});
- const fh = try elf_file.addFileHandle(handle);
-
- var archive = Archive{};
- defer archive.deinit(gpa);
- try archive.parse(elf_file, path, fh);
-
- const objects = try archive.objects.toOwnedSlice(gpa);
- defer gpa.free(objects);
-
- for (objects) |extracted| {
- const index = @as(File.Index, @intCast(try elf_file.files.addOne(gpa)));
- elf_file.files.set(index, .{ .object = extracted });
- const object = &elf_file.files.items(.data)[index].object;
- object.index = index;
- try object.parseAr(elf_file);
- try elf_file.objects.append(gpa, index);
- }
-}
-
fn claimUnresolved(elf_file: *Elf) void {
if (elf_file.zigObjectPtr()) |zig_object| {
zig_object.claimUnresolvedRelocatable(elf_file);
@@ -473,11 +382,12 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
const SortRelocs = struct {
pub fn lessThan(ctx: void, lhs: elf.Elf64_Rela, rhs: elf.Elf64_Rela) bool {
_ = ctx;
+ assert(lhs.r_offset != rhs.r_offset);
return lhs.r_offset < rhs.r_offset;
}
};
- mem.sort(elf.Elf64_Rela, relocs.items, {}, SortRelocs.lessThan);
+ mem.sortUnstable(elf.Elf64_Rela, relocs.items, {}, SortRelocs.lessThan);
log.debug("writing {s} from 0x{x} to 0x{x}", .{
elf_file.getShString(shdr.sh_name),
diff --git a/src/link/Elf/LdScript.zig b/src/link/LdScript.zig
index 349011a20a..ed5dbc4681 100644
--- a/src/link/Elf/LdScript.zig
+++ b/src/link/LdScript.zig
@@ -1,45 +1,47 @@
path: Path,
-cpu_arch: ?std.Target.Cpu.Arch = null,
-args: std.ArrayListUnmanaged(Arg) = .empty,
+cpu_arch: ?std.Target.Cpu.Arch,
+args: []const Arg,
pub const Arg = struct {
needed: bool = false,
path: []const u8,
};
-pub fn deinit(scr: *LdScript, allocator: Allocator) void {
- scr.args.deinit(allocator);
+pub fn deinit(ls: *LdScript, gpa: Allocator) void {
+ gpa.free(ls.args);
+ ls.* = undefined;
}
pub const Error = error{
LinkFailure,
- UnexpectedToken,
UnknownCpuArch,
OutOfMemory,
};
-pub fn parse(scr: *LdScript, data: []const u8, elf_file: *Elf) Error!void {
- const comp = elf_file.base.comp;
- const gpa = comp.gpa;
- const diags = &comp.link_diags;
-
+pub fn parse(
+ gpa: Allocator,
+ diags: *Diags,
+ /// For error reporting.
+ path: Path,
+ data: []const u8,
+) Error!LdScript {
var tokenizer = Tokenizer{ .source = data };
- var tokens = std.ArrayList(Token).init(gpa);
- defer tokens.deinit();
- var line_col = std.ArrayList(LineColumn).init(gpa);
- defer line_col.deinit();
+ var tokens: std.ArrayListUnmanaged(Token) = .empty;
+ defer tokens.deinit(gpa);
+ var line_col: std.ArrayListUnmanaged(LineColumn) = .empty;
+ defer line_col.deinit(gpa);
var line: usize = 0;
var prev_line_last_col: usize = 0;
while (true) {
const tok = tokenizer.next();
- try tokens.append(tok);
+ try tokens.append(gpa, tok);
const column = tok.start - prev_line_last_col;
- try line_col.append(.{ .line = line, .column = column });
+ try line_col.append(gpa, .{ .line = line, .column = column });
switch (tok.id) {
.invalid => {
- return diags.failParse(scr.path, "invalid token in LD script: '{s}' ({d}:{d})", .{
+ return diags.failParse(path, "invalid token in LD script: '{s}' ({d}:{d})", .{
std.fmt.fmtSliceEscapeLower(tok.get(data)), line, column,
});
},
@@ -52,18 +54,22 @@ pub fn parse(scr: *LdScript, data: []const u8, elf_file: *Elf) Error!void {
}
}
- var it = TokenIterator{ .tokens = tokens.items };
- var parser = Parser{ .source = data, .it = &it };
- var args = std.ArrayList(Arg).init(gpa);
- scr.doParse(.{
- .parser = &parser,
- .args = &args,
- }) catch |err| switch (err) {
+ var it: TokenIterator = .{ .tokens = tokens.items };
+ var parser: Parser = .{
+ .gpa = gpa,
+ .source = data,
+ .it = &it,
+ .args = .empty,
+ .cpu_arch = null,
+ };
+ defer parser.args.deinit(gpa);
+
+ parser.start() catch |err| switch (err) {
error.UnexpectedToken => {
const last_token_id = parser.it.pos - 1;
const last_token = parser.it.get(last_token_id);
const lcol = line_col.items[last_token_id];
- return diags.failParse(scr.path, "unexpected token in LD script: {s}: '{s}' ({d}:{d})", .{
+ return diags.failParse(path, "unexpected token in LD script: {s}: '{s}' ({d}:{d})", .{
@tagName(last_token.id),
last_token.get(data),
lcol.line,
@@ -72,30 +78,10 @@ pub fn parse(scr: *LdScript, data: []const u8, elf_file: *Elf) Error!void {
},
else => |e| return e,
};
- scr.args = args.moveToUnmanaged();
-}
-
-fn doParse(scr: *LdScript, ctx: struct {
- parser: *Parser,
- args: *std.ArrayList(Arg),
-}) !void {
- while (true) {
- ctx.parser.skipAny(&.{ .comment, .new_line });
-
- if (ctx.parser.maybe(.command)) |cmd_id| {
- const cmd = ctx.parser.getCommand(cmd_id);
- switch (cmd) {
- .output_format => scr.cpu_arch = try ctx.parser.outputFormat(),
- // TODO we should verify that group only contains libraries
- .input, .group => try ctx.parser.group(ctx.args),
- else => return error.UnexpectedToken,
- }
- } else break;
- }
-
- if (ctx.parser.it.next()) |tok| switch (tok.id) {
- .eof => {},
- else => return error.UnexpectedToken,
+ return .{
+ .path = path,
+ .cpu_arch = parser.cpu_arch,
+ .args = try parser.args.toOwnedSlice(gpa),
};
}
@@ -126,9 +112,34 @@ const Command = enum {
};
const Parser = struct {
+ gpa: Allocator,
source: []const u8,
it: *TokenIterator,
+ cpu_arch: ?std.Target.Cpu.Arch,
+ args: std.ArrayListUnmanaged(Arg),
+
+ fn start(parser: *Parser) !void {
+ while (true) {
+ parser.skipAny(&.{ .comment, .new_line });
+
+ if (parser.maybe(.command)) |cmd_id| {
+ const cmd = parser.getCommand(cmd_id);
+ switch (cmd) {
+ .output_format => parser.cpu_arch = try parser.outputFormat(),
+ // TODO we should verify that group only contains libraries
+ .input, .group => try parser.group(),
+ else => return error.UnexpectedToken,
+ }
+ } else break;
+ }
+
+ if (parser.it.next()) |tok| switch (tok.id) {
+ .eof => {},
+ else => return error.UnexpectedToken,
+ };
+ }
+
fn outputFormat(p: *Parser) !std.Target.Cpu.Arch {
const value = value: {
if (p.skip(&.{.lparen})) {
@@ -149,18 +160,19 @@ const Parser = struct {
return error.UnknownCpuArch;
}
- fn group(p: *Parser, args: *std.ArrayList(Arg)) !void {
+ fn group(p: *Parser) !void {
+ const gpa = p.gpa;
if (!p.skip(&.{.lparen})) return error.UnexpectedToken;
while (true) {
if (p.maybe(.literal)) |tok_id| {
const tok = p.it.get(tok_id);
const path = tok.get(p.source);
- try args.append(.{ .path = path, .needed = true });
+ try p.args.append(gpa, .{ .path = path, .needed = true });
} else if (p.maybe(.command)) |cmd_id| {
const cmd = p.getCommand(cmd_id);
switch (cmd) {
- .as_needed => try p.asNeeded(args),
+ .as_needed => try p.asNeeded(),
else => return error.UnexpectedToken,
}
} else break;
@@ -169,13 +181,14 @@ const Parser = struct {
_ = try p.require(.rparen);
}
- fn asNeeded(p: *Parser, args: *std.ArrayList(Arg)) !void {
+ fn asNeeded(p: *Parser) !void {
+ const gpa = p.gpa;
if (!p.skip(&.{.lparen})) return error.UnexpectedToken;
while (p.maybe(.literal)) |tok_id| {
const tok = p.it.get(tok_id);
const path = tok.get(p.source);
- try args.append(.{ .path = path, .needed = false });
+ try p.args.append(gpa, .{ .path = path, .needed = false });
}
_ = try p.require(.rparen);
@@ -227,21 +240,19 @@ const Token = struct {
end: usize,
const Id = enum {
- // zig fmt: off
eof,
invalid,
new_line,
- lparen, // (
- rparen, // )
- lbrace, // {
- rbrace, // }
+ lparen, // (
+ rparen, // )
+ lbrace, // {
+ rbrace, // }
- comment, // /* */
+ comment, // /* */
- command, // literal with special meaning, see Command
+ command, // literal with special meaning, see Command
literal,
- // zig fmt: on
};
const Index = usize;
@@ -430,10 +441,9 @@ const TokenIterator = struct {
};
const LdScript = @This();
+const Diags = @import("../link.zig").Diags;
const std = @import("std");
const assert = std.debug.assert;
const Path = std.Build.Cache.Path;
-
const Allocator = std.mem.Allocator;
-const Elf = @import("../Elf.zig");
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 0563f0fb5e..1ffcabb1f5 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -1,3 +1,7 @@
+pub const Atom = @import("MachO/Atom.zig");
+pub const DebugSymbols = @import("MachO/DebugSymbols.zig");
+pub const Relocation = @import("MachO/Relocation.zig");
+
base: link.File,
rpath_list: []const []const u8,
@@ -114,8 +118,8 @@ headerpad_max_install_names: bool,
dead_strip_dylibs: bool,
/// Treatment of undefined symbols
undefined_treatment: UndefinedTreatment,
-/// Resolved list of library search directories
-lib_dirs: []const []const u8,
+/// TODO: delete this, libraries need to be resolved by the frontend instead
+lib_directories: []const Directory,
/// Resolved list of framework search directories
framework_dirs: []const []const u8,
/// List of input frameworks
@@ -213,7 +217,8 @@ pub fn createEmpty(
.platform = Platform.fromTarget(target),
.sdk_version = if (options.darwin_sdk_layout) |layout| inferSdkVersion(comp, layout) else null,
.undefined_treatment = if (allow_shlib_undefined) .dynamic_lookup else .@"error",
- .lib_dirs = options.lib_dirs,
+ // TODO delete this, directories must instead be resolved by the frontend
+ .lib_directories = options.lib_directories,
.framework_dirs = options.framework_dirs,
.force_load_objc = options.force_load_objc,
};
@@ -371,48 +376,44 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
if (self.base.isStaticLib()) return relocatable.flushStaticLib(self, comp, module_obj_path);
if (self.base.isObject()) return relocatable.flushObject(self, comp, module_obj_path);
- var positionals = std.ArrayList(Compilation.LinkObject).init(gpa);
+ var positionals = std.ArrayList(link.Input).init(gpa);
defer positionals.deinit();
- try positionals.ensureUnusedCapacity(comp.objects.len);
- positionals.appendSliceAssumeCapacity(comp.objects);
+ try positionals.ensureUnusedCapacity(comp.link_inputs.len);
+
+ for (comp.link_inputs) |link_input| switch (link_input) {
+ .dso => continue, // handled below
+ .object, .archive => positionals.appendAssumeCapacity(link_input),
+ .dso_exact => @panic("TODO"),
+ .res => unreachable,
+ };
// This is a set of object files emitted by clang in a single `build-exe` invocation.
// For instance, the implicit `a.o` as compiled by `zig build-exe a.c` will end up
// in this set.
try positionals.ensureUnusedCapacity(comp.c_object_table.keys().len);
for (comp.c_object_table.keys()) |key| {
- positionals.appendAssumeCapacity(.{ .path = key.status.success.object_path });
+ positionals.appendAssumeCapacity(try link.openObjectInput(diags, key.status.success.object_path));
}
- if (module_obj_path) |path| try positionals.append(.{ .path = path });
+ if (module_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path));
if (comp.config.any_sanitize_thread) {
- try positionals.append(.{ .path = comp.tsan_lib.?.full_object_path });
+ try positionals.append(try link.openObjectInput(diags, comp.tsan_lib.?.full_object_path));
}
if (comp.config.any_fuzz) {
- try positionals.append(.{ .path = comp.fuzzer_lib.?.full_object_path });
+ try positionals.append(try link.openObjectInput(diags, comp.fuzzer_lib.?.full_object_path));
}
- for (positionals.items) |obj| {
- self.classifyInputFile(obj.path, .{ .path = obj.path }, obj.must_link) catch |err|
- diags.addParseError(obj.path, "failed to read input file: {s}", .{@errorName(err)});
+ for (positionals.items) |link_input| {
+ self.classifyInputFile(link_input) catch |err|
+ diags.addParseError(link_input.path().?, "failed to read input file: {s}", .{@errorName(err)});
}
var system_libs = std.ArrayList(SystemLib).init(gpa);
defer system_libs.deinit();
- // libs
- try system_libs.ensureUnusedCapacity(comp.system_libs.values().len);
- for (comp.system_libs.values()) |info| {
- system_libs.appendAssumeCapacity(.{
- .needed = info.needed,
- .weak = info.weak,
- .path = info.path.?,
- });
- }
-
// frameworks
try system_libs.ensureUnusedCapacity(self.frameworks.len);
for (self.frameworks) |info| {
@@ -436,20 +437,40 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
else => |e| return e, // TODO: convert into an error
};
+ for (comp.link_inputs) |link_input| switch (link_input) {
+ .object, .archive, .dso_exact => continue,
+ .res => unreachable,
+ .dso => {
+ self.classifyInputFile(link_input) catch |err|
+ diags.addParseError(link_input.path().?, "failed to parse input file: {s}", .{@errorName(err)});
+ },
+ };
+
for (system_libs.items) |lib| {
- self.classifyInputFile(lib.path, lib, false) catch |err|
- diags.addParseError(lib.path, "failed to parse input file: {s}", .{@errorName(err)});
+ switch (Compilation.classifyFileExt(lib.path.sub_path)) {
+ .shared_library => {
+ const dso_input = try link.openDsoInput(diags, lib.path, lib.needed, lib.weak, lib.reexport);
+ self.classifyInputFile(dso_input) catch |err|
+ diags.addParseError(lib.path, "failed to parse input file: {s}", .{@errorName(err)});
+ },
+ .static_library => {
+ const archive_input = try link.openArchiveInput(diags, lib.path, lib.must_link, lib.hidden);
+ self.classifyInputFile(archive_input) catch |err|
+ diags.addParseError(lib.path, "failed to parse input file: {s}", .{@errorName(err)});
+ },
+ else => unreachable,
+ }
}
// Finally, link against compiler_rt.
- const compiler_rt_path: ?Path = blk: {
- if (comp.compiler_rt_lib) |x| break :blk x.full_object_path;
- if (comp.compiler_rt_obj) |x| break :blk x.full_object_path;
- break :blk null;
- };
- if (compiler_rt_path) |path| {
- self.classifyInputFile(path, .{ .path = path }, false) catch |err|
- diags.addParseError(path, "failed to parse input file: {s}", .{@errorName(err)});
+ if (comp.compiler_rt_lib) |crt_file| {
+ const path = crt_file.full_object_path;
+ self.classifyInputFile(try link.openArchiveInput(diags, path, false, false)) catch |err|
+ diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
+ } else if (comp.compiler_rt_obj) |crt_file| {
+ const path = crt_file.full_object_path;
+ self.classifyInputFile(try link.openObjectInput(diags, path)) catch |err|
+ diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
}
try self.parseInputFiles();
@@ -596,9 +617,12 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void {
}
if (self.base.isRelocatable()) {
- for (comp.objects) |obj| {
- try argv.append(try obj.path.toString(arena));
- }
+ for (comp.link_inputs) |link_input| switch (link_input) {
+ .object, .archive => |obj| try argv.append(try obj.path.toString(arena)),
+ .res => |res| try argv.append(try res.path.toString(arena)),
+ .dso => |dso| try argv.append(try dso.path.toString(arena)),
+ .dso_exact => |dso_exact| try argv.appendSlice(&.{ "-l", dso_exact.name }),
+ };
for (comp.c_object_table.keys()) |key| {
try argv.append(try key.status.success.object_path.toString(arena));
@@ -678,13 +702,15 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void {
try argv.append("dynamic_lookup");
}
- for (comp.objects) |obj| {
- // TODO: verify this
- if (obj.must_link) {
- try argv.append("-force_load");
- }
- try argv.append(try obj.path.toString(arena));
- }
+ for (comp.link_inputs) |link_input| switch (link_input) {
+ .dso => continue, // handled below
+ .res => unreachable, // windows only
+ .object, .archive => |obj| {
+ if (obj.must_link) try argv.append("-force_load"); // TODO: verify this
+ try argv.append(try obj.path.toString(arena));
+ },
+ .dso_exact => |dso_exact| try argv.appendSlice(&.{ "-l", dso_exact.name }),
+ };
for (comp.c_object_table.keys()) |key| {
try argv.append(try key.status.success.object_path.toString(arena));
@@ -703,21 +729,25 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void {
try argv.append(try comp.fuzzer_lib.?.full_object_path.toString(arena));
}
- for (self.lib_dirs) |lib_dir| {
- const arg = try std.fmt.allocPrint(arena, "-L{s}", .{lib_dir});
+ for (self.lib_directories) |lib_directory| {
+ // TODO delete this, directories must instead be resolved by the frontend
+ const arg = try std.fmt.allocPrint(arena, "-L{s}", .{lib_directory.path orelse "."});
try argv.append(arg);
}
- for (comp.system_libs.keys()) |l_name| {
- const info = comp.system_libs.get(l_name).?;
- const arg = if (info.needed)
- try std.fmt.allocPrint(arena, "-needed-l{s}", .{l_name})
- else if (info.weak)
- try std.fmt.allocPrint(arena, "-weak-l{s}", .{l_name})
- else
- try std.fmt.allocPrint(arena, "-l{s}", .{l_name});
- try argv.append(arg);
- }
+ for (comp.link_inputs) |link_input| switch (link_input) {
+ .object, .archive, .dso_exact => continue, // handled above
+ .res => unreachable, // windows only
+ .dso => |dso| {
+ if (dso.needed) {
+ try argv.appendSlice(&.{ "-needed-l", try dso.path.toString(arena) });
+ } else if (dso.weak) {
+ try argv.appendSlice(&.{ "-weak-l", try dso.path.toString(arena) });
+ } else {
+ try argv.appendSlice(&.{ "-l", try dso.path.toString(arena) });
+ }
+ },
+ };
for (self.framework_dirs) |f_dir| {
try argv.append("-F");
@@ -751,6 +781,7 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void {
Compilation.dump_argv(argv.items);
}
+/// TODO delete this, libsystem must be resolved when setting up the compilation pipeline
pub fn resolveLibSystem(
self: *MachO,
arena: Allocator,
@@ -774,8 +805,8 @@ pub fn resolveLibSystem(
},
};
- for (self.lib_dirs) |dir| {
- if (try accessLibPath(arena, &test_path, &checked_paths, dir, "System")) break :success;
+ for (self.lib_directories) |directory| {
+ if (try accessLibPath(arena, &test_path, &checked_paths, directory.path orelse ".", "System")) break :success;
}
diags.addMissingLibraryError(checked_paths.items, "unable to find libSystem system library", .{});
@@ -789,13 +820,14 @@ pub fn resolveLibSystem(
});
}
-pub fn classifyInputFile(self: *MachO, path: Path, lib: SystemLib, must_link: bool) !void {
+pub fn classifyInputFile(self: *MachO, input: link.Input) !void {
const tracy = trace(@src());
defer tracy.end();
+ const path, const file = input.pathAndFile().?;
+ // TODO don't classify now, it's too late. The input file has already been classified
log.debug("classifying input file {}", .{path});
- const file = try path.root_dir.handle.openFile(path.sub_path, .{});
const fh = try self.addFileHandle(file);
var buffer: [Archive.SARMAG]u8 = undefined;
@@ -806,17 +838,17 @@ pub fn classifyInputFile(self: *MachO, path: Path, lib: SystemLib, must_link: bo
if (h.magic != macho.MH_MAGIC_64) break :blk;
switch (h.filetype) {
macho.MH_OBJECT => try self.addObject(path, fh, offset),
- macho.MH_DYLIB => _ = try self.addDylib(lib, true, fh, offset),
+ macho.MH_DYLIB => _ = try self.addDylib(.fromLinkInput(input), true, fh, offset),
else => return error.UnknownFileType,
}
return;
}
if (readArMagic(file, offset, &buffer) catch null) |ar_magic| blk: {
if (!mem.eql(u8, ar_magic, Archive.ARMAG)) break :blk;
- try self.addArchive(lib, must_link, fh, fat_arch);
+ try self.addArchive(input.archive, fh, fat_arch);
return;
}
- _ = try self.addTbd(lib, true, fh);
+ _ = try self.addTbd(.fromLinkInput(input), true, fh);
}
fn parseFatFile(self: *MachO, file: std.fs.File, path: Path) !?fat.Arch {
@@ -903,7 +935,7 @@ fn parseInputFileWorker(self: *MachO, file: File) void {
};
}
-fn addArchive(self: *MachO, lib: SystemLib, must_link: bool, handle: File.HandleIndex, fat_arch: ?fat.Arch) !void {
+fn addArchive(self: *MachO, lib: link.Input.Object, handle: File.HandleIndex, fat_arch: ?fat.Arch) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -918,7 +950,7 @@ fn addArchive(self: *MachO, lib: SystemLib, must_link: bool, handle: File.Handle
self.files.set(index, .{ .object = unpacked });
const object = &self.files.items(.data)[index].object;
object.index = index;
- object.alive = must_link or lib.needed; // TODO: or self.options.all_load;
+ object.alive = lib.must_link; // TODO: or self.options.all_load;
object.hidden = lib.hidden;
try self.objects.append(gpa, index);
}
@@ -993,6 +1025,7 @@ fn isHoisted(self: *MachO, install_name: []const u8) bool {
return false;
}
+/// TODO delete this, libraries must be instead resolved when instantiating the compilation pipeline
fn accessLibPath(
arena: Allocator,
test_path: *std.ArrayList(u8),
@@ -1051,9 +1084,11 @@ fn parseDependentDylibs(self: *MachO) !void {
if (self.dylibs.items.len == 0) return;
const gpa = self.base.comp.gpa;
- const lib_dirs = self.lib_dirs;
const framework_dirs = self.framework_dirs;
+ // TODO delete this, directories must instead be resolved by the frontend
+ const lib_directories = self.lib_directories;
+
var arena_alloc = std.heap.ArenaAllocator.init(gpa);
defer arena_alloc.deinit();
const arena = arena_alloc.allocator();
@@ -1094,9 +1129,9 @@ fn parseDependentDylibs(self: *MachO) !void {
// Library
const lib_name = eatPrefix(stem, "lib") orelse stem;
- for (lib_dirs) |dir| {
+ for (lib_directories) |lib_directory| {
test_path.clearRetainingCapacity();
- if (try accessLibPath(arena, &test_path, &checked_paths, dir, lib_name)) break :full_path test_path.items;
+ if (try accessLibPath(arena, &test_path, &checked_paths, lib_directory.path orelse ".", lib_name)) break :full_path test_path.items;
}
}
@@ -4366,6 +4401,24 @@ const SystemLib = struct {
hidden: bool = false,
reexport: bool = false,
must_link: bool = false,
+
+ fn fromLinkInput(link_input: link.Input) SystemLib {
+ return switch (link_input) {
+ .dso_exact => unreachable,
+ .res => unreachable,
+ .object, .archive => |obj| .{
+ .path = obj.path,
+ .must_link = obj.must_link,
+ .hidden = obj.hidden,
+ },
+ .dso => |dso| .{
+ .path = dso.path,
+ .needed = dso.needed,
+ .weak = dso.weak,
+ .reexport = dso.reexport,
+ },
+ };
+ }
};
pub const SdkLayout = std.zig.LibCDirs.DarwinSdkLayout;
@@ -5303,17 +5356,16 @@ const Air = @import("../Air.zig");
const Alignment = Atom.Alignment;
const Allocator = mem.Allocator;
const Archive = @import("MachO/Archive.zig");
-pub const Atom = @import("MachO/Atom.zig");
const AtomicBool = std.atomic.Value(bool);
const Bind = bind.Bind;
const Cache = std.Build.Cache;
-const Path = Cache.Path;
const CodeSignature = @import("MachO/CodeSignature.zig");
const Compilation = @import("../Compilation.zig");
const DataInCode = synthetic.DataInCode;
-pub const DebugSymbols = @import("MachO/DebugSymbols.zig");
+const Directory = Cache.Directory;
const Dylib = @import("MachO/Dylib.zig");
const ExportTrie = @import("MachO/dyld_info/Trie.zig");
+const Path = Cache.Path;
const File = @import("MachO/file.zig").File;
const GotSection = synthetic.GotSection;
const Hash = std.hash.Wyhash;
@@ -5329,7 +5381,6 @@ const Md5 = std.crypto.hash.Md5;
const Zcu = @import("../Zcu.zig");
const InternPool = @import("../InternPool.zig");
const Rebase = @import("MachO/dyld_info/Rebase.zig");
-pub const Relocation = @import("MachO/Relocation.zig");
const StringTable = @import("StringTable.zig");
const StubsSection = synthetic.StubsSection;
const StubsHelperSection = synthetic.StubsHelperSection;
diff --git a/src/link/MachO/relocatable.zig b/src/link/MachO/relocatable.zig
index b27e82c793..497969ab90 100644
--- a/src/link/MachO/relocatable.zig
+++ b/src/link/MachO/relocatable.zig
@@ -3,16 +3,16 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat
const diags = &macho_file.base.comp.link_diags;
// TODO: "positional arguments" is a CLI concept, not a linker concept. Delete this unnecessary array list.
- var positionals = std.ArrayList(Compilation.LinkObject).init(gpa);
+ var positionals = std.ArrayList(link.Input).init(gpa);
defer positionals.deinit();
- try positionals.ensureUnusedCapacity(comp.objects.len);
- positionals.appendSliceAssumeCapacity(comp.objects);
+ try positionals.ensureUnusedCapacity(comp.link_inputs.len);
+ positionals.appendSliceAssumeCapacity(comp.link_inputs);
for (comp.c_object_table.keys()) |key| {
- try positionals.append(.{ .path = key.status.success.object_path });
+ try positionals.append(try link.openObjectInput(diags, key.status.success.object_path));
}
- if (module_obj_path) |path| try positionals.append(.{ .path = path });
+ if (module_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path));
if (macho_file.getZigObject() == null and positionals.items.len == 1) {
// Instead of invoking a full-blown `-r` mode on the input which sadly will strip all
@@ -20,7 +20,7 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat
// the *only* input file over.
// TODO: in the future, when we implement `dsymutil` alternative directly in the Zig
// compiler, investigate if we can get rid of this `if` prong here.
- const path = positionals.items[0].path;
+ const path = positionals.items[0].path().?;
const in_file = try path.root_dir.handle.openFile(path.sub_path, .{});
const stat = try in_file.stat();
const amt = try in_file.copyRangeAll(0, macho_file.base.file.?, 0, stat.size);
@@ -28,9 +28,9 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat
return;
}
- for (positionals.items) |obj| {
- macho_file.classifyInputFile(obj.path, .{ .path = obj.path }, obj.must_link) catch |err|
- diags.addParseError(obj.path, "failed to read input file: {s}", .{@errorName(err)});
+ for (positionals.items) |link_input| {
+ macho_file.classifyInputFile(link_input) catch |err|
+ diags.addParseError(link_input.path().?, "failed to read input file: {s}", .{@errorName(err)});
}
if (diags.hasErrors()) return error.FlushFailure;
@@ -72,25 +72,25 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
const gpa = comp.gpa;
const diags = &macho_file.base.comp.link_diags;
- var positionals = std.ArrayList(Compilation.LinkObject).init(gpa);
+ var positionals = std.ArrayList(link.Input).init(gpa);
defer positionals.deinit();
- try positionals.ensureUnusedCapacity(comp.objects.len);
- positionals.appendSliceAssumeCapacity(comp.objects);
+ try positionals.ensureUnusedCapacity(comp.link_inputs.len);
+ positionals.appendSliceAssumeCapacity(comp.link_inputs);
for (comp.c_object_table.keys()) |key| {
- try positionals.append(.{ .path = key.status.success.object_path });
+ try positionals.append(try link.openObjectInput(diags, key.status.success.object_path));
}
- if (module_obj_path) |path| try positionals.append(.{ .path = path });
+ if (module_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path));
if (comp.include_compiler_rt) {
- try positionals.append(.{ .path = comp.compiler_rt_obj.?.full_object_path });
+ try positionals.append(try link.openObjectInput(diags, comp.compiler_rt_obj.?.full_object_path));
}
- for (positionals.items) |obj| {
- macho_file.classifyInputFile(obj.path, .{ .path = obj.path }, obj.must_link) catch |err|
- diags.addParseError(obj.path, "failed to read input file: {s}", .{@errorName(err)});
+ for (positionals.items) |link_input| {
+ macho_file.classifyInputFile(link_input) catch |err|
+ diags.addParseError(link_input.path().?, "failed to read input file: {s}", .{@errorName(err)});
}
if (diags.hasErrors()) return error.FlushFailure;
@@ -745,20 +745,15 @@ fn writeHeader(macho_file: *MachO, ncmds: usize, sizeofcmds: usize) !void {
try macho_file.base.file.?.pwriteAll(mem.asBytes(&header), 0);
}
+const std = @import("std");
+const Path = std.Build.Cache.Path;
+const WaitGroup = std.Thread.WaitGroup;
const assert = std.debug.assert;
-const build_options = @import("build_options");
-const eh_frame = @import("eh_frame.zig");
-const fat = @import("fat.zig");
-const link = @import("../../link.zig");
-const load_commands = @import("load_commands.zig");
const log = std.log.scoped(.link);
const macho = std.macho;
const math = std.math;
const mem = std.mem;
const state_log = std.log.scoped(.link_state);
-const std = @import("std");
-const trace = @import("../../tracy.zig").trace;
-const Path = std.Build.Cache.Path;
const Archive = @import("Archive.zig");
const Atom = @import("Atom.zig");
@@ -767,3 +762,9 @@ const File = @import("file.zig").File;
const MachO = @import("../MachO.zig");
const Object = @import("Object.zig");
const Symbol = @import("Symbol.zig");
+const build_options = @import("build_options");
+const eh_frame = @import("eh_frame.zig");
+const fat = @import("fat.zig");
+const link = @import("../../link.zig");
+const load_commands = @import("load_commands.zig");
+const trace = @import("../../tracy.zig").trace;
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index df4131b7fe..f291d6c2d6 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -637,14 +637,6 @@ fn createSyntheticSymbolOffset(wasm: *Wasm, name_offset: u32, tag: Symbol.Tag) !
return loc;
}
-fn parseInputFiles(wasm: *Wasm, files: []const []const u8) !void {
- for (files) |path| {
- if (try wasm.parseObjectFile(path)) continue;
- if (try wasm.parseArchive(path, false)) continue; // load archives lazily
- log.warn("Unexpected file format at path: '{s}'", .{path});
- }
-}
-
/// Parses the object file from given path. Returns true when the given file was an object
/// file and parsed successfully. Returns false when file is not an object file.
/// May return an error instead when parsing failed.
@@ -2522,7 +2514,7 @@ pub fn flushModule(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
// Positional arguments to the linker such as object files and static archives.
// TODO: "positional arguments" is a CLI concept, not a linker concept. Delete this unnecessary array list.
var positionals = std.ArrayList([]const u8).init(arena);
- try positionals.ensureUnusedCapacity(comp.objects.len);
+ try positionals.ensureUnusedCapacity(comp.link_inputs.len);
const target = comp.root_mod.resolved_target.result;
const output_mode = comp.config.output_mode;
@@ -2566,9 +2558,12 @@ pub fn flushModule(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
try positionals.append(path);
}
- for (comp.objects) |object| {
- try positionals.append(try object.path.toString(arena));
- }
+ for (comp.link_inputs) |link_input| switch (link_input) {
+ .object, .archive => |obj| try positionals.append(try obj.path.toString(arena)),
+ .dso => |dso| try positionals.append(try dso.path.toString(arena)),
+ .dso_exact => unreachable, // forbidden by frontend
+ .res => unreachable, // windows only
+ };
for (comp.c_object_table.keys()) |c_object| {
try positionals.append(try c_object.status.success.object_path.toString(arena));
@@ -2577,7 +2572,11 @@ pub fn flushModule(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
if (comp.compiler_rt_lib) |lib| try positionals.append(try lib.full_object_path.toString(arena));
if (comp.compiler_rt_obj) |obj| try positionals.append(try obj.full_object_path.toString(arena));
- try wasm.parseInputFiles(positionals.items);
+ for (positionals.items) |path| {
+ if (try wasm.parseObjectFile(path)) continue;
+ if (try wasm.parseArchive(path, false)) continue; // load archives lazily
+ log.warn("Unexpected file format at path: '{s}'", .{path});
+ }
if (wasm.zig_object_index != .null) {
try wasm.resolveSymbolsInObject(wasm.zig_object_index);
@@ -3401,10 +3400,7 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node:
comptime assert(Compilation.link_hash_implementation_version == 14);
- for (comp.objects) |obj| {
- _ = try man.addFilePath(obj.path, null);
- man.hash.add(obj.must_link);
- }
+ try link.hashInputs(&man, comp.link_inputs);
for (comp.c_object_table.keys()) |key| {
_ = try man.addFilePath(key.status.success.object_path, null);
}
@@ -3458,8 +3454,7 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node:
// here. TODO: think carefully about how we can avoid this redundant operation when doing
// build-obj. See also the corresponding TODO in linkAsArchive.
const the_object_path = blk: {
- if (comp.objects.len != 0)
- break :blk comp.objects[0].path;
+ if (link.firstObjectInput(comp.link_inputs)) |obj| break :blk obj.path;
if (comp.c_object_table.count() != 0)
break :blk comp.c_object_table.keys()[0].status.success.object_path;
@@ -3621,16 +3616,23 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node:
// Positional arguments to the linker such as object files.
var whole_archive = false;
- for (comp.objects) |obj| {
- if (obj.must_link and !whole_archive) {
- try argv.append("-whole-archive");
- whole_archive = true;
- } else if (!obj.must_link and whole_archive) {
- try argv.append("-no-whole-archive");
- whole_archive = false;
- }
- try argv.append(try obj.path.toString(arena));
- }
+ for (comp.link_inputs) |link_input| switch (link_input) {
+ .object, .archive => |obj| {
+ if (obj.must_link and !whole_archive) {
+ try argv.append("-whole-archive");
+ whole_archive = true;
+ } else if (!obj.must_link and whole_archive) {
+ try argv.append("-no-whole-archive");
+ whole_archive = false;
+ }
+ try argv.append(try obj.path.toString(arena));
+ },
+ .dso => |dso| {
+ try argv.append(try dso.path.toString(arena));
+ },
+ .dso_exact => unreachable,
+ .res => unreachable,
+ };
if (whole_archive) {
try argv.append("-no-whole-archive");
whole_archive = false;
@@ -3643,11 +3645,8 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node:
try argv.append(p);
}
- if (comp.config.output_mode != .Obj and
- !comp.skip_linker_dependencies and
- !comp.config.link_libc)
- {
- try argv.append(try comp.libc_static_lib.?.full_object_path.toString(arena));
+ if (comp.libc_static_lib) |crt_file| {
+ try argv.append(try crt_file.full_object_path.toString(arena));
}
if (compiler_rt_path) |p| {
diff --git a/src/main.zig b/src/main.zig
index 058de72442..af26e7159b 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -15,6 +15,7 @@ const cleanExit = std.process.cleanExit;
const native_os = builtin.os.tag;
const Cache = std.Build.Cache;
const Path = std.Build.Cache.Path;
+const Directory = std.Build.Cache.Directory;
const EnvVar = std.zig.EnvVar;
const LibCInstallation = std.zig.LibCInstallation;
const AstGen = std.zig.AstGen;
@@ -55,7 +56,7 @@ pub fn wasi_cwd() std.os.wasi.fd_t {
return cwd_fd;
}
-fn getWasiPreopen(name: []const u8) Compilation.Directory {
+fn getWasiPreopen(name: []const u8) Directory {
return .{
.path = name,
.handle = .{
@@ -555,6 +556,8 @@ const usage_build_generic =
\\ -fno-each-lib-rpath Prevent adding rpath for each used dynamic library
\\ -fallow-shlib-undefined Allows undefined symbols in shared libraries
\\ -fno-allow-shlib-undefined Disallows undefined symbols in shared libraries
+ \\ -fallow-so-scripts Allows .so files to be GNU ld scripts
+ \\ -fno-allow-so-scripts (default) .so files must be ELF files
\\ --build-id[=style] At a minor link-time expense, coordinates stripped binaries
\\ fast, uuid, sha1, md5 with debug symbols via a '.note.gnu.build-id' section
\\ 0x[hexstring] Maximum 32 bytes
@@ -766,27 +769,6 @@ const ArgsIterator = struct {
}
};
-/// In contrast to `link.SystemLib`, this stores arguments that may need to be
-/// resolved into static libraries so that we can pass only dynamic libraries
-/// as system libs to `Compilation`.
-const SystemLib = struct {
- needed: bool,
- weak: bool,
-
- preferred_mode: std.builtin.LinkMode,
- search_strategy: SearchStrategy,
-
- const SearchStrategy = enum { paths_first, mode_first, no_fallback };
-
- fn fallbackMode(this: SystemLib) std.builtin.LinkMode {
- assert(this.search_strategy != .no_fallback);
- return switch (this.preferred_mode) {
- .dynamic => .static,
- .static => .dynamic,
- };
- }
-};
-
/// Similar to `link.Framework` except it doesn't store yet unresolved
/// path to the framework.
const Framework = struct {
@@ -867,6 +849,7 @@ fn buildOutputType(
var linker_gc_sections: ?bool = null;
var linker_compress_debug_sections: ?link.File.Elf.CompressDebugSections = null;
var linker_allow_shlib_undefined: ?bool = null;
+ var allow_so_scripts: bool = false;
var linker_bind_global_refs_locally: ?bool = null;
var linker_import_symbols: bool = false;
var linker_import_table: bool = false;
@@ -919,7 +902,7 @@ fn buildOutputType(
var hash_style: link.File.Elf.HashStyle = .both;
var entitlements: ?[]const u8 = null;
var pagezero_size: ?u64 = null;
- var lib_search_strategy: SystemLib.SearchStrategy = .paths_first;
+ var lib_search_strategy: link.UnresolvedInput.SearchStrategy = .paths_first;
var lib_preferred_mode: std.builtin.LinkMode = .dynamic;
var headerpad_size: ?u32 = null;
var headerpad_max_install_names: bool = false;
@@ -983,8 +966,10 @@ fn buildOutputType(
// Populated in the call to `createModule` for the root module.
.resolved_options = undefined,
- .system_libs = .{},
- .resolved_system_libs = .{},
+ .cli_link_inputs = .empty,
+ .windows_libs = .empty,
+ .link_inputs = .empty,
+
.wasi_emulated_libs = .{},
.c_source_files = .{},
@@ -992,7 +977,7 @@ fn buildOutputType(
.llvm_m_args = .{},
.sysroot = null,
- .lib_dirs = .{}, // populated by createModule()
+ .lib_directories = .{}, // populated by createModule()
.lib_dir_args = .{}, // populated from CLI arg parsing
.libc_installation = null,
.want_native_include_dirs = false,
@@ -1001,7 +986,6 @@ fn buildOutputType(
.rpath_list = .{},
.each_lib_rpath = null,
.libc_paths_file = try EnvVar.ZIG_LIBC.get(arena),
- .link_objects = .{},
.native_system_include_paths = &.{},
};
@@ -1237,30 +1221,42 @@ fn buildOutputType(
// We don't know whether this library is part of libc
// or libc++ until we resolve the target, so we append
// to the list for now.
- try create_module.system_libs.put(arena, args_iter.nextOrFatal(), .{
- .needed = false,
- .weak = false,
- .preferred_mode = lib_preferred_mode,
- .search_strategy = lib_search_strategy,
- });
+ try create_module.cli_link_inputs.append(arena, .{ .name_query = .{
+ .name = args_iter.nextOrFatal(),
+ .query = .{
+ .needed = false,
+ .weak = false,
+ .preferred_mode = lib_preferred_mode,
+ .search_strategy = lib_search_strategy,
+ .allow_so_scripts = allow_so_scripts,
+ },
+ } });
} else if (mem.eql(u8, arg, "--needed-library") or
mem.eql(u8, arg, "-needed-l") or
mem.eql(u8, arg, "-needed_library"))
{
const next_arg = args_iter.nextOrFatal();
- try create_module.system_libs.put(arena, next_arg, .{
- .needed = true,
- .weak = false,
- .preferred_mode = lib_preferred_mode,
- .search_strategy = lib_search_strategy,
- });
+ try create_module.cli_link_inputs.append(arena, .{ .name_query = .{
+ .name = next_arg,
+ .query = .{
+ .needed = true,
+ .weak = false,
+ .preferred_mode = lib_preferred_mode,
+ .search_strategy = lib_search_strategy,
+ .allow_so_scripts = allow_so_scripts,
+ },
+ } });
} else if (mem.eql(u8, arg, "-weak_library") or mem.eql(u8, arg, "-weak-l")) {
- try create_module.system_libs.put(arena, args_iter.nextOrFatal(), .{
- .needed = false,
- .weak = true,
- .preferred_mode = lib_preferred_mode,
- .search_strategy = lib_search_strategy,
- });
+ try create_module.cli_link_inputs.append(arena, .{ .name_query = .{
+ .name = args_iter.nextOrFatal(),
+ .query = .{
+ .needed = false,
+ .weak = true,
+ .preferred_mode = lib_preferred_mode,
+ .search_strategy = lib_search_strategy,
+ .allow_so_scripts = allow_so_scripts,
+ },
+ } });
} else if (mem.eql(u8, arg, "-D")) {
try cc_argv.appendSlice(arena, &.{ arg, args_iter.nextOrFatal() });
} else if (mem.eql(u8, arg, "-I")) {
@@ -1573,6 +1569,10 @@ fn buildOutputType(
linker_allow_shlib_undefined = true;
} else if (mem.eql(u8, arg, "-fno-allow-shlib-undefined")) {
linker_allow_shlib_undefined = false;
+ } else if (mem.eql(u8, arg, "-fallow-so-scripts")) {
+ allow_so_scripts = true;
+ } else if (mem.eql(u8, arg, "-fno-allow-so-scripts")) {
+ allow_so_scripts = false;
} else if (mem.eql(u8, arg, "-z")) {
const z_arg = args_iter.nextOrFatal();
if (mem.eql(u8, z_arg, "nodelete")) {
@@ -1680,26 +1680,38 @@ fn buildOutputType(
// We don't know whether this library is part of libc
// or libc++ until we resolve the target, so we append
// to the list for now.
- try create_module.system_libs.put(arena, arg["-l".len..], .{
- .needed = false,
- .weak = false,
- .preferred_mode = lib_preferred_mode,
- .search_strategy = lib_search_strategy,
- });
+ try create_module.cli_link_inputs.append(arena, .{ .name_query = .{
+ .name = arg["-l".len..],
+ .query = .{
+ .needed = false,
+ .weak = false,
+ .preferred_mode = lib_preferred_mode,
+ .search_strategy = lib_search_strategy,
+ .allow_so_scripts = allow_so_scripts,
+ },
+ } });
} else if (mem.startsWith(u8, arg, "-needed-l")) {
- try create_module.system_libs.put(arena, arg["-needed-l".len..], .{
- .needed = true,
- .weak = false,
- .preferred_mode = lib_preferred_mode,
- .search_strategy = lib_search_strategy,
- });
+ try create_module.cli_link_inputs.append(arena, .{ .name_query = .{
+ .name = arg["-needed-l".len..],
+ .query = .{
+ .needed = true,
+ .weak = false,
+ .preferred_mode = lib_preferred_mode,
+ .search_strategy = lib_search_strategy,
+ .allow_so_scripts = allow_so_scripts,
+ },
+ } });
} else if (mem.startsWith(u8, arg, "-weak-l")) {
- try create_module.system_libs.put(arena, arg["-weak-l".len..], .{
- .needed = false,
- .weak = true,
- .preferred_mode = lib_preferred_mode,
- .search_strategy = lib_search_strategy,
- });
+ try create_module.cli_link_inputs.append(arena, .{ .name_query = .{
+ .name = arg["-weak-l".len..],
+ .query = .{
+ .needed = false,
+ .weak = true,
+ .preferred_mode = lib_preferred_mode,
+ .search_strategy = lib_search_strategy,
+ .allow_so_scripts = allow_so_scripts,
+ },
+ } });
} else if (mem.startsWith(u8, arg, "-D")) {
try cc_argv.append(arena, arg);
} else if (mem.startsWith(u8, arg, "-I")) {
@@ -1724,15 +1736,28 @@ fn buildOutputType(
fatal("unrecognized parameter: '{s}'", .{arg});
}
} else switch (file_ext orelse Compilation.classifyFileExt(arg)) {
- .shared_library => {
- try create_module.link_objects.append(arena, .{ .path = Path.initCwd(arg) });
- create_module.opts.any_dyn_libs = true;
- },
- .object, .static_library => {
- try create_module.link_objects.append(arena, .{ .path = Path.initCwd(arg) });
+ .shared_library, .object, .static_library => {
+ try create_module.cli_link_inputs.append(arena, .{ .path_query = .{
+ .path = Path.initCwd(arg),
+ .query = .{
+ .preferred_mode = lib_preferred_mode,
+ .search_strategy = lib_search_strategy,
+ .allow_so_scripts = allow_so_scripts,
+ },
+ } });
+ // We do not set `any_dyn_libs` yet because a .so file
+ // may actually resolve to a GNU ld script which ends
+ // up being a static library.
},
.res => {
- try create_module.link_objects.append(arena, .{ .path = Path.initCwd(arg) });
+ try create_module.cli_link_inputs.append(arena, .{ .path_query = .{
+ .path = Path.initCwd(arg),
+ .query = .{
+ .preferred_mode = lib_preferred_mode,
+ .search_strategy = lib_search_strategy,
+ .allow_so_scripts = allow_so_scripts,
+ },
+ } });
contains_res_file = true;
},
.manifest => {
@@ -1785,6 +1810,7 @@ fn buildOutputType(
// some functionality that depend on it, such as C++ exceptions and
// DWARF-based stack traces.
link_eh_frame_hdr = true;
+ allow_so_scripts = true;
const COutMode = enum {
link,
@@ -1844,24 +1870,32 @@ fn buildOutputType(
.ext = file_ext, // duped while parsing the args.
});
},
- .shared_library => {
- try create_module.link_objects.append(arena, .{
+ .unknown, .object, .static_library, .shared_library => {
+ try create_module.cli_link_inputs.append(arena, .{ .path_query = .{
.path = Path.initCwd(it.only_arg),
- .must_link = must_link,
- });
- create_module.opts.any_dyn_libs = true;
- },
- .unknown, .object, .static_library => {
- try create_module.link_objects.append(arena, .{
- .path = Path.initCwd(it.only_arg),
- .must_link = must_link,
- });
+ .query = .{
+ .must_link = must_link,
+ .needed = needed,
+ .preferred_mode = lib_preferred_mode,
+ .search_strategy = lib_search_strategy,
+ .allow_so_scripts = allow_so_scripts,
+ },
+ } });
+ // We do not set `any_dyn_libs` yet because a .so file
+ // may actually resolve to a GNU ld script which ends
+ // up being a static library.
},
.res => {
- try create_module.link_objects.append(arena, .{
+ try create_module.cli_link_inputs.append(arena, .{ .path_query = .{
.path = Path.initCwd(it.only_arg),
- .must_link = must_link,
- });
+ .query = .{
+ .must_link = must_link,
+ .needed = needed,
+ .preferred_mode = lib_preferred_mode,
+ .search_strategy = lib_search_strategy,
+ .allow_so_scripts = allow_so_scripts,
+ },
+ } });
contains_res_file = true;
},
.manifest => {
@@ -1893,19 +1927,21 @@ fn buildOutputType(
// -l :path/to/filename is used when callers need
// more control over what's in the resulting
// binary: no extra rpaths and DSO filename exactly
- // as provided. Hello, Go.
- try create_module.link_objects.append(arena, .{
- .path = Path.initCwd(it.only_arg),
- .must_link = must_link,
- .loption = true,
- });
+ // as provided. CGo compilation depends on this.
+ try create_module.cli_link_inputs.append(arena, .{ .dso_exact = .{
+ .name = it.only_arg,
+ } });
} else {
- try create_module.system_libs.put(arena, it.only_arg, .{
- .needed = needed,
- .weak = false,
- .preferred_mode = lib_preferred_mode,
- .search_strategy = lib_search_strategy,
- });
+ try create_module.cli_link_inputs.append(arena, .{ .name_query = .{
+ .name = it.only_arg,
+ .query = .{
+ .needed = needed,
+ .weak = false,
+ .preferred_mode = lib_preferred_mode,
+ .search_strategy = lib_search_strategy,
+ .allow_so_scripts = allow_so_scripts,
+ },
+ } });
}
},
.ignore => {},
@@ -2174,12 +2210,16 @@ fn buildOutputType(
},
.force_load_objc => force_load_objc = true,
.mingw_unicode_entry_point => mingw_unicode_entry_point = true,
- .weak_library => try create_module.system_libs.put(arena, it.only_arg, .{
- .needed = false,
- .weak = true,
- .preferred_mode = lib_preferred_mode,
- .search_strategy = lib_search_strategy,
- }),
+ .weak_library => try create_module.cli_link_inputs.append(arena, .{ .name_query = .{
+ .name = it.only_arg,
+ .query = .{
+ .needed = false,
+ .weak = true,
+ .preferred_mode = lib_preferred_mode,
+ .search_strategy = lib_search_strategy,
+ .allow_so_scripts = allow_so_scripts,
+ },
+ } }),
.weak_framework => try create_module.frameworks.put(arena, it.only_arg, .{ .weak = true }),
.headerpad_max_install_names => headerpad_max_install_names = true,
.compress_debug_sections => {
@@ -2482,26 +2522,38 @@ fn buildOutputType(
} else if (mem.eql(u8, arg, "-needed_framework")) {
try create_module.frameworks.put(arena, linker_args_it.nextOrFatal(), .{ .needed = true });
} else if (mem.eql(u8, arg, "-needed_library")) {
- try create_module.system_libs.put(arena, linker_args_it.nextOrFatal(), .{
- .weak = false,
- .needed = true,
- .preferred_mode = lib_preferred_mode,
- .search_strategy = lib_search_strategy,
- });
+ try create_module.cli_link_inputs.append(arena, .{ .name_query = .{
+ .name = linker_args_it.nextOrFatal(),
+ .query = .{
+ .weak = false,
+ .needed = true,
+ .preferred_mode = lib_preferred_mode,
+ .search_strategy = lib_search_strategy,
+ .allow_so_scripts = allow_so_scripts,
+ },
+ } });
} else if (mem.startsWith(u8, arg, "-weak-l")) {
- try create_module.system_libs.put(arena, arg["-weak-l".len..], .{
- .weak = true,
- .needed = false,
- .preferred_mode = lib_preferred_mode,
- .search_strategy = lib_search_strategy,
- });
+ try create_module.cli_link_inputs.append(arena, .{ .name_query = .{
+ .name = arg["-weak-l".len..],
+ .query = .{
+ .weak = true,
+ .needed = false,
+ .preferred_mode = lib_preferred_mode,
+ .search_strategy = lib_search_strategy,
+ .allow_so_scripts = allow_so_scripts,
+ },
+ } });
} else if (mem.eql(u8, arg, "-weak_library")) {
- try create_module.system_libs.put(arena, linker_args_it.nextOrFatal(), .{
- .weak = true,
- .needed = false,
- .preferred_mode = lib_preferred_mode,
- .search_strategy = lib_search_strategy,
- });
+ try create_module.cli_link_inputs.append(arena, .{ .name_query = .{
+ .name = linker_args_it.nextOrFatal(),
+ .query = .{
+ .weak = true,
+ .needed = false,
+ .preferred_mode = lib_preferred_mode,
+ .search_strategy = lib_search_strategy,
+ .allow_so_scripts = allow_so_scripts,
+ },
+ } });
} else if (mem.eql(u8, arg, "-compatibility_version")) {
const compat_version = linker_args_it.nextOrFatal();
compatibility_version = std.SemanticVersion.parse(compat_version) catch |err| {
@@ -2532,10 +2584,14 @@ fn buildOutputType(
} else if (mem.eql(u8, arg, "-install_name")) {
install_name = linker_args_it.nextOrFatal();
} else if (mem.eql(u8, arg, "-force_load")) {
- try create_module.link_objects.append(arena, .{
+ try create_module.cli_link_inputs.append(arena, .{ .path_query = .{
.path = Path.initCwd(linker_args_it.nextOrFatal()),
- .must_link = true,
- });
+ .query = .{
+ .must_link = true,
+ .preferred_mode = .static,
+ .search_strategy = .no_fallback,
+ },
+ } });
} else if (mem.eql(u8, arg, "-hash-style") or
mem.eql(u8, arg, "--hash-style"))
{
@@ -2665,7 +2721,7 @@ fn buildOutputType(
},
}
if (create_module.c_source_files.items.len == 0 and
- create_module.link_objects.items.len == 0 and
+ !anyObjectLinkInputs(create_module.cli_link_inputs.items) and
root_src_file == null)
{
// For example `zig cc` and no args should print the "no input files" message.
@@ -2707,8 +2763,11 @@ fn buildOutputType(
if (create_module.c_source_files.items.len >= 1)
break :b create_module.c_source_files.items[0].src_path;
- if (create_module.link_objects.items.len >= 1)
- break :b create_module.link_objects.items[0].path.sub_path;
+ for (create_module.cli_link_inputs.items) |unresolved_link_input| switch (unresolved_link_input) {
+ // Intentionally includes dynamic libraries provided by file path.
+ .path_query => |pq| break :b pq.path.sub_path,
+ else => continue,
+ };
if (emit_bin == .yes)
break :b emit_bin.yes;
@@ -2794,7 +2853,7 @@ fn buildOutputType(
fatal("unable to find zig self exe path: {s}", .{@errorName(err)});
};
- var zig_lib_directory: Compilation.Directory = d: {
+ var zig_lib_directory: Directory = d: {
if (override_lib_dir) |unresolved_lib_dir| {
const lib_dir = try introspect.resolvePath(arena, unresolved_lib_dir);
break :d .{
@@ -2815,7 +2874,7 @@ fn buildOutputType(
};
defer zig_lib_directory.handle.close();
- var global_cache_directory: Compilation.Directory = l: {
+ var global_cache_directory: Directory = l: {
if (override_global_cache_dir) |p| {
break :l .{
.handle = try fs.cwd().makeOpenPath(p, .{}),
@@ -2845,7 +2904,7 @@ fn buildOutputType(
var builtin_modules: std.StringHashMapUnmanaged(*Package.Module) = .empty;
// `builtin_modules` allocated into `arena`, so no deinit
- const main_mod = try createModule(gpa, arena, &create_module, 0, null, zig_lib_directory, &builtin_modules);
+ const main_mod = try createModule(gpa, arena, &create_module, 0, null, zig_lib_directory, &builtin_modules, color);
for (create_module.modules.keys(), create_module.modules.values()) |key, cli_mod| {
if (cli_mod.resolved == null)
fatal("module '{s}' declared but not used", .{key});
@@ -2939,7 +2998,6 @@ fn buildOutputType(
}
}
- // We now repeat part of the process for frameworks.
var resolved_frameworks = std.ArrayList(Compilation.Framework).init(arena);
if (create_module.frameworks.keys().len > 0) {
@@ -2996,7 +3054,7 @@ fn buildOutputType(
const total_obj_count = create_module.c_source_files.items.len +
@intFromBool(root_src_file != null) +
create_module.rc_source_files.items.len +
- create_module.link_objects.items.len;
+ link.countObjectInputs(create_module.link_inputs.items);
if (total_obj_count > 1) {
fatal("{s} does not support linking multiple objects into one", .{@tagName(target.ofmt)});
}
@@ -3212,7 +3270,7 @@ fn buildOutputType(
var cleanup_local_cache_dir: ?fs.Dir = null;
defer if (cleanup_local_cache_dir) |*dir| dir.close();
- var local_cache_directory: Compilation.Directory = l: {
+ var local_cache_directory: Directory = l: {
if (override_local_cache_dir) |local_cache_dir_path| {
const dir = try fs.cwd().makeOpenPath(local_cache_dir_path, .{});
cleanup_local_cache_dir = dir;
@@ -3349,7 +3407,7 @@ fn buildOutputType(
.emit_llvm_bc = emit_llvm_bc_resolved.data,
.emit_docs = emit_docs_resolved.data,
.emit_implib = emit_implib_resolved.data,
- .lib_dirs = create_module.lib_dirs.items,
+ .lib_directories = create_module.lib_directories.items,
.rpath_list = create_module.rpath_list.items,
.symbol_wrap_set = symbol_wrap_set,
.c_source_files = create_module.c_source_files.items,
@@ -3357,11 +3415,10 @@ fn buildOutputType(
.manifest_file = manifest_file,
.rc_includes = rc_includes,
.mingw_unicode_entry_point = mingw_unicode_entry_point,
- .link_objects = create_module.link_objects.items,
+ .link_inputs = create_module.link_inputs.items,
.framework_dirs = create_module.framework_dirs.items,
.frameworks = resolved_frameworks.items,
- .system_lib_names = create_module.resolved_system_libs.items(.name),
- .system_lib_infos = create_module.resolved_system_libs.items(.lib),
+ .windows_lib_names = create_module.windows_libs.keys(),
.wasi_emulated_libs = create_module.wasi_emulated_libs.items,
.want_compiler_rt = want_compiler_rt,
.hash_style = hash_style,
@@ -3630,12 +3687,14 @@ const CreateModule = struct {
/// This one is used while collecting CLI options. The set of libs is used
/// directly after computing the target and used to compute link_libc,
/// link_libcpp, and then the libraries are filtered into
- /// `external_system_libs` and `resolved_system_libs`.
- system_libs: std.StringArrayHashMapUnmanaged(SystemLib),
- resolved_system_libs: std.MultiArrayList(struct {
- name: []const u8,
- lib: Compilation.SystemLib,
- }),
+ /// `unresolved_linker_inputs` and `windows_libs`.
+ cli_link_inputs: std.ArrayListUnmanaged(link.UnresolvedInput),
+ windows_libs: std.StringArrayHashMapUnmanaged(void),
+ /// The local variable `unresolved_link_inputs` is fed into library
+ /// resolution, mutating the input array, and producing this data as
+ /// output. Allocated with gpa.
+ link_inputs: std.ArrayListUnmanaged(link.Input),
+
wasi_emulated_libs: std.ArrayListUnmanaged(wasi_libc.CrtFile),
c_source_files: std.ArrayListUnmanaged(Compilation.CSourceFile),
@@ -3646,7 +3705,7 @@ const CreateModule = struct {
/// CPU features.
llvm_m_args: std.ArrayListUnmanaged([]const u8),
sysroot: ?[]const u8,
- lib_dirs: std.ArrayListUnmanaged([]const u8),
+ lib_directories: std.ArrayListUnmanaged(Directory),
lib_dir_args: std.ArrayListUnmanaged([]const u8),
libc_installation: ?LibCInstallation,
want_native_include_dirs: bool,
@@ -3656,7 +3715,6 @@ const CreateModule = struct {
rpath_list: std.ArrayListUnmanaged([]const u8),
each_lib_rpath: ?bool,
libc_paths_file: ?[]const u8,
- link_objects: std.ArrayListUnmanaged(Compilation.LinkObject),
};
fn createModule(
@@ -3667,6 +3725,7 @@ fn createModule(
parent: ?*Package.Module,
zig_lib_directory: Cache.Directory,
builtin_modules: *std.StringHashMapUnmanaged(*Package.Module),
+ color: std.zig.Color,
) Allocator.Error!*Package.Module {
const cli_mod = &create_module.modules.values()[index];
if (cli_mod.resolved) |m| return m;
@@ -3760,85 +3819,83 @@ fn createModule(
// First, remove libc, libc++, and compiler_rt libraries from the system libraries list.
// We need to know whether the set of system libraries contains anything besides these
// to decide whether to trigger native path detection logic.
- var external_system_libs: std.MultiArrayList(struct {
- name: []const u8,
- info: SystemLib,
- }) = .{};
- for (create_module.system_libs.keys(), create_module.system_libs.values()) |lib_name, info| {
- if (std.zig.target.isLibCLibName(target, lib_name)) {
- create_module.opts.link_libc = true;
- continue;
- }
- if (std.zig.target.isLibCxxLibName(target, lib_name)) {
- create_module.opts.link_libcpp = true;
- continue;
- }
- switch (target_util.classifyCompilerRtLibName(target, lib_name)) {
- .none => {},
- .only_libunwind, .both => {
- create_module.opts.link_libunwind = true;
+ // Preserves linker input order.
+ var unresolved_link_inputs: std.ArrayListUnmanaged(link.UnresolvedInput) = .empty;
+ try unresolved_link_inputs.ensureUnusedCapacity(arena, create_module.cli_link_inputs.items.len);
+ var any_name_queries_remaining = false;
+ for (create_module.cli_link_inputs.items) |cli_link_input| switch (cli_link_input) {
+ .name_query => |nq| {
+ const lib_name = nq.name;
+ if (std.zig.target.isLibCLibName(target, lib_name)) {
+ create_module.opts.link_libc = true;
continue;
- },
- .only_compiler_rt => {
- warn("ignoring superfluous library '{s}': this dependency is fulfilled instead by compiler-rt which zig unconditionally provides", .{lib_name});
+ }
+ if (std.zig.target.isLibCxxLibName(target, lib_name)) {
+ create_module.opts.link_libcpp = true;
continue;
- },
- }
+ }
+ switch (target_util.classifyCompilerRtLibName(target, lib_name)) {
+ .none => {},
+ .only_libunwind, .both => {
+ create_module.opts.link_libunwind = true;
+ continue;
+ },
+ .only_compiler_rt => {
+ warn("ignoring superfluous library '{s}': this dependency is fulfilled instead by compiler-rt which zig unconditionally provides", .{lib_name});
+ continue;
+ },
+ }
- if (target.isMinGW()) {
- const exists = mingw.libExists(arena, target, zig_lib_directory, lib_name) catch |err| {
- fatal("failed to check zig installation for DLL import libs: {s}", .{
- @errorName(err),
- });
- };
- if (exists) {
- try create_module.resolved_system_libs.append(arena, .{
- .name = lib_name,
- .lib = .{
- .needed = true,
- .weak = false,
- .path = null,
- },
- });
- continue;
+ if (target.isMinGW()) {
+ const exists = mingw.libExists(arena, target, zig_lib_directory, lib_name) catch |err| {
+ fatal("failed to check zig installation for DLL import libs: {s}", .{
+ @errorName(err),
+ });
+ };
+ if (exists) {
+ try create_module.windows_libs.put(arena, lib_name, {});
+ continue;
+ }
}
- }
- if (fs.path.isAbsolute(lib_name)) {
- fatal("cannot use absolute path as a system library: {s}", .{lib_name});
- }
+ if (fs.path.isAbsolute(lib_name)) {
+ fatal("cannot use absolute path as a system library: {s}", .{lib_name});
+ }
- if (target.os.tag == .wasi) {
- if (wasi_libc.getEmulatedLibCrtFile(lib_name)) |crt_file| {
- try create_module.wasi_emulated_libs.append(arena, crt_file);
- continue;
+ if (target.os.tag == .wasi) {
+ if (wasi_libc.getEmulatedLibCrtFile(lib_name)) |crt_file| {
+ try create_module.wasi_emulated_libs.append(arena, crt_file);
+ continue;
+ }
}
- }
+ unresolved_link_inputs.appendAssumeCapacity(cli_link_input);
+ any_name_queries_remaining = true;
+ },
+ else => {
+ unresolved_link_inputs.appendAssumeCapacity(cli_link_input);
+ },
+ }; // After this point, unresolved_link_inputs is used instead of cli_link_inputs.
- try external_system_libs.append(arena, .{
- .name = lib_name,
- .info = info,
- });
- }
- // After this point, external_system_libs is used instead of system_libs.
- if (external_system_libs.len != 0)
- create_module.want_native_include_dirs = true;
+ if (any_name_queries_remaining) create_module.want_native_include_dirs = true;
// Resolve the library path arguments with respect to sysroot.
+ try create_module.lib_directories.ensureUnusedCapacity(arena, create_module.lib_dir_args.items.len);
if (create_module.sysroot) |root| {
- try create_module.lib_dirs.ensureUnusedCapacity(arena, create_module.lib_dir_args.items.len * 2);
- for (create_module.lib_dir_args.items) |dir| {
- if (fs.path.isAbsolute(dir)) {
- const stripped_dir = dir[fs.path.diskDesignator(dir).len..];
+ for (create_module.lib_dir_args.items) |lib_dir_arg| {
+ if (fs.path.isAbsolute(lib_dir_arg)) {
+ const stripped_dir = lib_dir_arg[fs.path.diskDesignator(lib_dir_arg).len..];
const full_path = try fs.path.join(arena, &[_][]const u8{ root, stripped_dir });
- create_module.lib_dirs.appendAssumeCapacity(full_path);
+ addLibDirectoryWarn(&create_module.lib_directories, full_path);
+ } else {
+ addLibDirectoryWarn(&create_module.lib_directories, lib_dir_arg);
}
- create_module.lib_dirs.appendAssumeCapacity(dir);
}
} else {
- create_module.lib_dirs = create_module.lib_dir_args;
+ for (create_module.lib_dir_args.items) |lib_dir_arg| {
+ addLibDirectoryWarn(&create_module.lib_directories, lib_dir_arg);
+ }
}
- create_module.lib_dir_args = undefined; // From here we use lib_dirs instead.
+ create_module.lib_dir_args = undefined; // From here we use lib_directories instead.
if (resolved_target.is_native_os and target.isDarwin()) {
// If we want to link against frameworks, we need system headers.
@@ -3847,7 +3904,10 @@ fn createModule(
}
if (create_module.each_lib_rpath orelse resolved_target.is_native_os) {
- try create_module.rpath_list.appendSlice(arena, create_module.lib_dirs.items);
+ try create_module.rpath_list.ensureUnusedCapacity(arena, create_module.lib_directories.items.len);
+ for (create_module.lib_directories.items) |lib_directory| {
+ create_module.rpath_list.appendAssumeCapacity(lib_directory.path.?);
+ }
}
// Trigger native system library path detection if necessary.
@@ -3865,8 +3925,10 @@ fn createModule(
create_module.native_system_include_paths = try paths.include_dirs.toOwnedSlice(arena);
try create_module.framework_dirs.appendSlice(arena, paths.framework_dirs.items);
- try create_module.lib_dirs.appendSlice(arena, paths.lib_dirs.items);
try create_module.rpath_list.appendSlice(arena, paths.rpaths.items);
+
+ try create_module.lib_directories.ensureUnusedCapacity(arena, paths.lib_dirs.items.len);
+ for (paths.lib_dirs.items) |path| addLibDirectoryWarn(&create_module.lib_directories, path);
}
if (create_module.libc_paths_file) |paths_file| {
@@ -3878,7 +3940,7 @@ fn createModule(
}
if (builtin.target.os.tag == .windows and (target.abi == .msvc or target.abi == .itanium) and
- external_system_libs.len != 0)
+ any_name_queries_remaining)
{
if (create_module.libc_installation == null) {
create_module.libc_installation = LibCInstallation.findNative(.{
@@ -3889,181 +3951,31 @@ fn createModule(
fatal("unable to find native libc installation: {s}", .{@errorName(err)});
};
- try create_module.lib_dirs.appendSlice(arena, &.{
- create_module.libc_installation.?.msvc_lib_dir.?,
- create_module.libc_installation.?.kernel32_lib_dir.?,
- });
- }
- }
-
- // If any libs in this list are statically provided, we omit them from the
- // resolved list and populate the link_objects array instead.
- {
- var test_path = std.ArrayList(u8).init(gpa);
- defer test_path.deinit();
-
- var checked_paths = std.ArrayList(u8).init(gpa);
- defer checked_paths.deinit();
-
- var failed_libs = std.ArrayList(struct {
- name: []const u8,
- strategy: SystemLib.SearchStrategy,
- checked_paths: []const u8,
- preferred_mode: std.builtin.LinkMode,
- }).init(arena);
-
- syslib: for (external_system_libs.items(.name), external_system_libs.items(.info)) |lib_name, info| {
- // Checked in the first pass above while looking for libc libraries.
- assert(!fs.path.isAbsolute(lib_name));
-
- checked_paths.clearRetainingCapacity();
-
- switch (info.search_strategy) {
- .mode_first, .no_fallback => {
- // check for preferred mode
- for (create_module.lib_dirs.items) |lib_dir_path| {
- if (try accessLibPath(
- &test_path,
- &checked_paths,
- lib_dir_path,
- lib_name,
- target,
- info.preferred_mode,
- )) {
- const path = Path.initCwd(try arena.dupe(u8, test_path.items));
- switch (info.preferred_mode) {
- .static => try create_module.link_objects.append(arena, .{ .path = path }),
- .dynamic => try create_module.resolved_system_libs.append(arena, .{
- .name = lib_name,
- .lib = .{
- .needed = info.needed,
- .weak = info.weak,
- .path = path,
- },
- }),
- }
- continue :syslib;
- }
- }
- // check for fallback mode
- if (info.search_strategy == .no_fallback) {
- try failed_libs.append(.{
- .name = lib_name,
- .strategy = info.search_strategy,
- .checked_paths = try arena.dupe(u8, checked_paths.items),
- .preferred_mode = info.preferred_mode,
- });
- continue :syslib;
- }
- for (create_module.lib_dirs.items) |lib_dir_path| {
- if (try accessLibPath(
- &test_path,
- &checked_paths,
- lib_dir_path,
- lib_name,
- target,
- info.fallbackMode(),
- )) {
- const path = Path.initCwd(try arena.dupe(u8, test_path.items));
- switch (info.fallbackMode()) {
- .static => try create_module.link_objects.append(arena, .{ .path = path }),
- .dynamic => try create_module.resolved_system_libs.append(arena, .{
- .name = lib_name,
- .lib = .{
- .needed = info.needed,
- .weak = info.weak,
- .path = path,
- },
- }),
- }
- continue :syslib;
- }
- }
- try failed_libs.append(.{
- .name = lib_name,
- .strategy = info.search_strategy,
- .checked_paths = try arena.dupe(u8, checked_paths.items),
- .preferred_mode = info.preferred_mode,
- });
- continue :syslib;
- },
- .paths_first => {
- for (create_module.lib_dirs.items) |lib_dir_path| {
- // check for preferred mode
- if (try accessLibPath(
- &test_path,
- &checked_paths,
- lib_dir_path,
- lib_name,
- target,
- info.preferred_mode,
- )) {
- const path = Path.initCwd(try arena.dupe(u8, test_path.items));
- switch (info.preferred_mode) {
- .static => try create_module.link_objects.append(arena, .{ .path = path }),
- .dynamic => try create_module.resolved_system_libs.append(arena, .{
- .name = lib_name,
- .lib = .{
- .needed = info.needed,
- .weak = info.weak,
- .path = path,
- },
- }),
- }
- continue :syslib;
- }
-
- // check for fallback mode
- if (try accessLibPath(
- &test_path,
- &checked_paths,
- lib_dir_path,
- lib_name,
- target,
- info.fallbackMode(),
- )) {
- const path = Path.initCwd(try arena.dupe(u8, test_path.items));
- switch (info.fallbackMode()) {
- .static => try create_module.link_objects.append(arena, .{ .path = path }),
- .dynamic => try create_module.resolved_system_libs.append(arena, .{
- .name = lib_name,
- .lib = .{
- .needed = info.needed,
- .weak = info.weak,
- .path = path,
- },
- }),
- }
- continue :syslib;
- }
- }
- try failed_libs.append(.{
- .name = lib_name,
- .strategy = info.search_strategy,
- .checked_paths = try arena.dupe(u8, checked_paths.items),
- .preferred_mode = info.preferred_mode,
- });
- continue :syslib;
- },
- }
- @compileError("unreachable");
- }
-
- if (failed_libs.items.len > 0) {
- for (failed_libs.items) |f| {
- const searched_paths = if (f.checked_paths.len == 0) " none" else f.checked_paths;
- std.log.err("unable to find {s} system library '{s}' using strategy '{s}'. searched paths:{s}", .{
- @tagName(f.preferred_mode), f.name, @tagName(f.strategy), searched_paths,
- });
- }
- process.exit(1);
+ try create_module.lib_directories.ensureUnusedCapacity(arena, 2);
+ addLibDirectoryWarn(&create_module.lib_directories, create_module.libc_installation.?.msvc_lib_dir.?);
+ addLibDirectoryWarn(&create_module.lib_directories, create_module.libc_installation.?.kernel32_lib_dir.?);
}
}
- // After this point, create_module.resolved_system_libs is used instead of
- // create_module.external_system_libs.
- if (create_module.resolved_system_libs.len != 0)
- create_module.opts.any_dyn_libs = true;
+ // Destructively mutates but does not transfer ownership of `unresolved_link_inputs`.
+ link.resolveInputs(
+ gpa,
+ arena,
+ target,
+ &unresolved_link_inputs,
+ &create_module.link_inputs,
+ create_module.lib_directories.items,
+ color,
+ ) catch |err| fatal("failed to resolve link inputs: {s}", .{@errorName(err)});
+
+ if (create_module.windows_libs.count() != 0) create_module.opts.any_dyn_libs = true;
+ if (!create_module.opts.any_dyn_libs) for (create_module.link_inputs.items) |item| switch (item) {
+ .dso, .dso_exact => {
+ create_module.opts.any_dyn_libs = true;
+ break;
+ },
+ else => {},
+ };
create_module.resolved_options = Compilation.Config.resolve(create_module.opts) catch |err| switch (err) {
error.WasiExecModelRequiresWasi => fatal("only WASI OS targets support execution model", .{}),
@@ -4131,7 +4043,7 @@ fn createModule(
for (cli_mod.deps) |dep| {
const dep_index = create_module.modules.getIndex(dep.value) orelse
fatal("module '{s}' depends on non-existent module '{s}'", .{ name, dep.key });
- const dep_mod = try createModule(gpa, arena, create_module, dep_index, mod, zig_lib_directory, builtin_modules);
+ const dep_mod = try createModule(gpa, arena, create_module, dep_index, mod, zig_lib_directory, builtin_modules, color);
try mod.deps.put(arena, dep.key, dep_mod);
}
@@ -4996,7 +4908,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
process.raiseFileDescriptorLimit();
- var zig_lib_directory: Compilation.Directory = if (override_lib_dir) |lib_dir| .{
+ var zig_lib_directory: Directory = if (override_lib_dir) |lib_dir| .{
.path = lib_dir,
.handle = fs.cwd().openDir(lib_dir, .{}) catch |err| {
fatal("unable to open zig lib directory from 'zig-lib-dir' argument: '{s}': {s}", .{ lib_dir, @errorName(err) });
@@ -5015,7 +4927,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
});
child_argv.items[argv_index_build_file] = build_root.directory.path orelse cwd_path;
- var global_cache_directory: Compilation.Directory = l: {
+ var global_cache_directory: Directory = l: {
const p = override_global_cache_dir orelse try introspect.resolveGlobalCacheDir(arena);
break :l .{
.handle = try fs.cwd().makeOpenPath(p, .{}),
@@ -5026,7 +4938,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
child_argv.items[argv_index_global_cache_dir] = global_cache_directory.path orelse cwd_path;
- var local_cache_directory: Compilation.Directory = l: {
+ var local_cache_directory: Directory = l: {
if (override_local_cache_dir) |local_cache_dir_path| {
break :l .{
.handle = try fs.cwd().makeOpenPath(local_cache_dir_path, .{}),
@@ -5460,7 +5372,7 @@ fn jitCmd(
const override_lib_dir: ?[]const u8 = try EnvVar.ZIG_LIB_DIR.get(arena);
const override_global_cache_dir: ?[]const u8 = try EnvVar.ZIG_GLOBAL_CACHE_DIR.get(arena);
- var zig_lib_directory: Compilation.Directory = if (override_lib_dir) |lib_dir| .{
+ var zig_lib_directory: Directory = if (override_lib_dir) |lib_dir| .{
.path = lib_dir,
.handle = fs.cwd().openDir(lib_dir, .{}) catch |err| {
fatal("unable to open zig lib directory from 'zig-lib-dir' argument: '{s}': {s}", .{ lib_dir, @errorName(err) });
@@ -5470,7 +5382,7 @@ fn jitCmd(
};
defer zig_lib_directory.handle.close();
- var global_cache_directory: Compilation.Directory = l: {
+ var global_cache_directory: Directory = l: {
const p = override_global_cache_dir orelse try introspect.resolveGlobalCacheDir(arena);
break :l .{
.handle = try fs.cwd().makeOpenPath(p, .{}),
@@ -6857,86 +6769,6 @@ const ClangSearchSanitizer = struct {
};
};
-fn accessLibPath(
- test_path: *std.ArrayList(u8),
- checked_paths: *std.ArrayList(u8),
- lib_dir_path: []const u8,
- lib_name: []const u8,
- target: std.Target,
- link_mode: std.builtin.LinkMode,
-) !bool {
- const sep = fs.path.sep_str;
-
- if (target.isDarwin() and link_mode == .dynamic) tbd: {
- // Prefer .tbd over .dylib.
- test_path.clearRetainingCapacity();
- try test_path.writer().print("{s}" ++ sep ++ "lib{s}.tbd", .{ lib_dir_path, lib_name });
- try checked_paths.writer().print("\n {s}", .{test_path.items});
- fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
- error.FileNotFound => break :tbd,
- else => |e| fatal("unable to search for tbd library '{s}': {s}", .{
- test_path.items, @errorName(e),
- }),
- };
- return true;
- }
-
- main_check: {
- test_path.clearRetainingCapacity();
- try test_path.writer().print("{s}" ++ sep ++ "{s}{s}{s}", .{
- lib_dir_path,
- target.libPrefix(),
- lib_name,
- switch (link_mode) {
- .static => target.staticLibSuffix(),
- .dynamic => target.dynamicLibSuffix(),
- },
- });
- try checked_paths.writer().print("\n {s}", .{test_path.items});
- fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
- error.FileNotFound => break :main_check,
- else => |e| fatal("unable to search for {s} library '{s}': {s}", .{
- @tagName(link_mode), test_path.items, @errorName(e),
- }),
- };
- return true;
- }
-
- // In the case of Darwin, the main check will be .dylib, so here we
- // additionally check for .so files.
- if (target.isDarwin() and link_mode == .dynamic) so: {
- test_path.clearRetainingCapacity();
- try test_path.writer().print("{s}" ++ sep ++ "lib{s}.so", .{ lib_dir_path, lib_name });
- try checked_paths.writer().print("\n {s}", .{test_path.items});
- fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
- error.FileNotFound => break :so,
- else => |e| fatal("unable to search for so library '{s}': {s}", .{
- test_path.items, @errorName(e),
- }),
- };
- return true;
- }
-
- // In the case of MinGW, the main check will be .lib but we also need to
- // look for `libfoo.a`.
- if (target.isMinGW() and link_mode == .static) mingw: {
- test_path.clearRetainingCapacity();
- try test_path.writer().print("{s}" ++ sep ++ "lib{s}.a", .{
- lib_dir_path, lib_name,
- });
- try checked_paths.writer().print("\n {s}", .{test_path.items});
- fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
- error.FileNotFound => break :mingw,
- else => |e| fatal("unable to search for static library '{s}': {s}", .{
- test_path.items, @errorName(e),
- }),
- };
- return true;
- }
-
- return false;
-}
-
fn accessFrameworkPath(
test_path: *std.ArrayList(u8),
checked_paths: *std.ArrayList(u8),
@@ -7057,7 +6889,7 @@ fn cmdFetch(
});
defer root_prog_node.end();
- var global_cache_directory: Compilation.Directory = l: {
+ var global_cache_directory: Directory = l: {
const p = override_global_cache_dir orelse try introspect.resolveGlobalCacheDir(arena);
break :l .{
.handle = try fs.cwd().makeOpenPath(p, .{}),
@@ -7634,3 +7466,24 @@ fn handleModArg(
c_source_files_owner_index.* = create_module.c_source_files.items.len;
rc_source_files_owner_index.* = create_module.rc_source_files.items.len;
}
+
+fn anyObjectLinkInputs(link_inputs: []const link.UnresolvedInput) bool {
+ for (link_inputs) |link_input| switch (link_input) {
+ .path_query => |pq| switch (Compilation.classifyFileExt(pq.path.sub_path)) {
+ .object, .static_library, .res => return true,
+ else => continue,
+ },
+ else => continue,
+ };
+ return false;
+}
+
+fn addLibDirectoryWarn(lib_directories: *std.ArrayListUnmanaged(Directory), path: []const u8) void {
+ lib_directories.appendAssumeCapacity(.{
+ .handle = fs.cwd().openDir(path, .{}) catch |err| {
+ warn("unable to open library directory '{s}': {s}", .{ path, @errorName(err) });
+ return;
+ },
+ .path = path,
+ });
+}
diff --git a/src/musl.zig b/src/musl.zig
index 48717dc5a1..975929a59d 100644
--- a/src/musl.zig
+++ b/src/musl.zig
@@ -19,7 +19,7 @@ pub const CrtFile = enum {
libc_so,
};
-pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progress.Node) !void {
+pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Progress.Node) !void {
if (!build_options.have_llvm) {
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
@@ -28,7 +28,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
- switch (crt_file) {
+ switch (in_crt_file) {
.crti_o => {
var args = std.ArrayList([]const u8).init(arena);
try addCcArgs(comp, arena, &args, false);
@@ -195,8 +195,9 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
.libc_so => {
const optimize_mode = comp.compilerRtOptMode();
const strip = comp.compilerRtStrip();
+ const output_mode: std.builtin.OutputMode = .Lib;
const config = try Compilation.Config.resolve(.{
- .output_mode = .Lib,
+ .output_mode = output_mode,
.link_mode = .dynamic,
.resolved_target = comp.root_mod.resolved_target,
.is_test = false,
@@ -276,28 +277,39 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
try comp.updateSubCompilation(sub_compilation, .@"musl libc.so", prog_node);
- try comp.crt_files.ensureUnusedCapacity(comp.gpa, 1);
-
const basename = try comp.gpa.dupe(u8, "libc.so");
errdefer comp.gpa.free(basename);
- comp.crt_files.putAssumeCapacityNoClobber(basename, try sub_compilation.toCrtFile());
+ const crt_file = try sub_compilation.toCrtFile();
+ comp.queueLinkTaskMode(crt_file.full_object_path, output_mode);
+ {
+ comp.mutex.lock();
+ defer comp.mutex.unlock();
+ try comp.crt_files.ensureUnusedCapacity(comp.gpa, 1);
+ comp.crt_files.putAssumeCapacityNoClobber(basename, crt_file);
+ }
},
}
}
-// Return true if musl has arch-specific crti/crtn sources.
-// See lib/libc/musl/crt/ARCH/crt?.s .
+/// Return true if musl has arch-specific crti/crtn sources.
+/// See lib/libc/musl/crt/ARCH/crt?.s .
pub fn needsCrtiCrtn(target: std.Target) bool {
- // zig fmt: off
return switch (target.cpu.arch) {
- .riscv32,
- .riscv64,
- .wasm32, .wasm64 => false,
+ .riscv32, .riscv64, .wasm32, .wasm64 => false,
.loongarch64 => false,
else => true,
};
- // zig fmt: on
+}
+
+pub fn needsCrt0(output_mode: std.builtin.OutputMode, link_mode: std.builtin.LinkMode, pie: bool) ?CrtFile {
+ return switch (output_mode) {
+ .Obj, .Lib => null,
+ .Exe => switch (link_mode) {
+ .dynamic => if (pie) .scrt1_o else .crt1_o,
+ .static => if (pie) .rcrt1_o else .crt1_o,
+ },
+ };
}
fn isMuslArchName(name: []const u8) bool {
diff --git a/src/target.zig b/src/target.zig
index 7d30781f43..ec34a23f0d 100644
--- a/src/target.zig
+++ b/src/target.zig
@@ -1,4 +1,6 @@
const std = @import("std");
+const assert = std.debug.assert;
+
const Type = @import("Type.zig");
const AddressSpace = std.builtin.AddressSpace;
const Alignment = @import("InternPool.zig").Alignment;
@@ -284,40 +286,18 @@ pub fn hasRedZone(target: std.Target) bool {
pub fn libcFullLinkFlags(target: std.Target) []const []const u8 {
// The linking order of these is significant and should match the order other
// c compilers such as gcc or clang use.
- return switch (target.os.tag) {
- .netbsd, .openbsd => &[_][]const u8{
- "-lm",
- "-lpthread",
- "-lc",
- "-lutil",
- },
- .solaris, .illumos => &[_][]const u8{
- "-lm",
- "-lsocket",
- "-lnsl",
- // Solaris releases after 10 merged the threading libraries into libc.
- "-lc",
- },
- .haiku => &[_][]const u8{
- "-lm",
- "-lroot",
- "-lpthread",
- "-lc",
- "-lnetwork",
- },
- else => if (target.isAndroid() or target.abi.isOpenHarmony()) &[_][]const u8{
- "-lm",
- "-lc",
- "-ldl",
- } else &[_][]const u8{
- "-lm",
- "-lpthread",
- "-lc",
- "-ldl",
- "-lrt",
- "-lutil",
+ const result: []const []const u8 = switch (target.os.tag) {
+ .netbsd, .openbsd => &.{ "-lm", "-lpthread", "-lc", "-lutil" },
+ // Solaris releases after 10 merged the threading libraries into libc.
+ .solaris, .illumos => &.{ "-lm", "-lsocket", "-lnsl", "-lc" },
+ .haiku => &.{ "-lm", "-lroot", "-lpthread", "-lc", "-lnetwork" },
+ .linux => switch (target.abi) {
+ .android, .androideabi, .ohos, .ohoseabi => &.{ "-lm", "-lc", "-ldl" },
+ else => &.{ "-lm", "-lpthread", "-lc", "-ldl", "-lrt", "-lutil" },
},
+ else => &.{},
};
+ return result;
}
pub fn clangMightShellOutForAssembly(target: std.Target) bool {