diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2020-12-20 21:19:05 -0500 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2020-12-20 21:19:05 -0500 |
| commit | 4918605176c2e48c178847ea281b790334207733 (patch) | |
| tree | 4adb4cbf5e6764df65747fb3bae2c1640060eb89 /src/Compilation.zig | |
| parent | 4964bb3282bf13de03a79fad1fb9bca104dc1930 (diff) | |
| parent | 1d94a6893689ad1fb8e06308ae51603a6c8708a8 (diff) | |
| download | zig-4918605176c2e48c178847ea281b790334207733.tar.gz zig-4918605176c2e48c178847ea281b790334207733.zip | |
Merge pull request #7462 from ziglang/parallel-c-objects
Introduce a ThreadPool and parallel execution of some of the compilation work items
Diffstat (limited to 'src/Compilation.zig')
| -rw-r--r-- | src/Compilation.zig | 115 |
1 files changed, 85 insertions, 30 deletions
diff --git a/src/Compilation.zig b/src/Compilation.zig index 5e95575642..23f67f5b37 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -26,6 +26,8 @@ const Module = @import("Module.zig"); const Cache = @import("Cache.zig"); const stage1 = @import("stage1.zig"); const translate_c = @import("translate_c.zig"); +const ThreadPool = @import("ThreadPool.zig"); +const WaitGroup = @import("WaitGroup.zig"); /// General-purpose allocator. Used for both temporary and long-term storage. gpa: *Allocator, @@ -41,7 +43,12 @@ link_error_flags: link.File.ErrorFlags = .{}, work_queue: std.fifo.LinearFifo(Job, .Dynamic), +/// These jobs are to invoke the Clang compiler to create an object file, which +/// gets linked with the Compilation. +c_object_work_queue: std.fifo.LinearFifo(*CObject, .Dynamic), + /// The ErrorMsg memory is owned by the `CObject`, using Compilation's general purpose allocator. +/// This data is accessed by multiple threads and is protected by `mutex`. failed_c_objects: std.AutoArrayHashMapUnmanaged(*CObject, *ErrorMsg) = .{}, keep_source_files_loaded: bool, @@ -74,6 +81,7 @@ zig_lib_directory: Directory, local_cache_directory: Directory, global_cache_directory: Directory, libc_include_dir_list: []const []const u8, +thread_pool: *ThreadPool, /// Populated when we build the libc++ static library. A Job to build this is placed in the queue /// and resolved before calling linker.flush(). @@ -111,6 +119,9 @@ owned_link_dir: ?std.fs.Dir, /// Don't use this for anything other than stage1 compatibility. color: @import("main.zig").Color = .auto, +/// This mutex guards all `Compilation` mutable state. +mutex: std.Mutex = .{}, + test_filter: ?[]const u8, test_name_prefix: ?[]const u8, test_evented_io: bool, @@ -150,9 +161,6 @@ const Job = union(enum) { /// The source file containing the Decl has been updated, and so the /// Decl may need its line number information updated in the debug info. update_line_number: *Module.Decl, - /// Invoke the Clang compiler to create an object file, which gets linked - /// with the Compilation. - c_object: *CObject, /// one of the glibc static objects glibc_crt_file: glibc.CRTFile, @@ -330,6 +338,7 @@ pub const InitOptions = struct { root_name: []const u8, root_pkg: ?*Package, output_mode: std.builtin.OutputMode, + thread_pool: *ThreadPool, dynamic_linker: ?[]const u8 = null, /// `null` means to not emit a binary file. emit_bin: ?EmitLoc, @@ -971,6 +980,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation { .emit_analysis = options.emit_analysis, .emit_docs = options.emit_docs, .work_queue = std.fifo.LinearFifo(Job, .Dynamic).init(gpa), + .c_object_work_queue = std.fifo.LinearFifo(*CObject, .Dynamic).init(gpa), .keep_source_files_loaded = options.keep_source_files_loaded, .use_clang = use_clang, .clang_argv = options.clang_argv, @@ -979,6 +989,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation { .self_exe_path = options.self_exe_path, .libc_include_dir_list = libc_dirs.libc_include_dir_list, .sanitize_c = sanitize_c, + .thread_pool = options.thread_pool, .clang_passthrough_mode = options.clang_passthrough_mode, .clang_preprocessor_mode = options.clang_preprocessor_mode, .verbose_cc = options.verbose_cc, @@ -1190,11 +1201,13 @@ pub fn update(self: *Compilation) !void { const tracy = trace(@src()); defer tracy.end(); + self.c_object_cache_digest_set.clearRetainingCapacity(); + // For compiling C objects, we rely on the cache hash system to avoid duplicating work. // Add a Job for each C object. - try self.work_queue.ensureUnusedCapacity(self.c_object_table.items().len); + try self.c_object_work_queue.ensureUnusedCapacity(self.c_object_table.items().len); for (self.c_object_table.items()) |entry| { - self.work_queue.writeItemAssumeCapacity(.{ .c_object = entry.key }); + self.c_object_work_queue.writeItemAssumeCapacity(entry.key); } const use_stage1 = build_options.is_stage1 and self.bin_file.options.use_llvm; @@ -1365,13 +1378,23 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors { pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemory }!void { var progress: std.Progress = .{}; - var main_progress_node = try progress.start("", null); + var main_progress_node = try progress.start("", 0); defer main_progress_node.end(); if (self.color == .off) progress.terminal = null; var c_comp_progress_node = main_progress_node.start("Compile C Objects", self.c_source_files.len); defer c_comp_progress_node.end(); + var wg = WaitGroup{}; + defer wg.wait(); + + while (self.c_object_work_queue.readItem()) |c_object| { + wg.start(); + try self.thread_pool.spawn(workerUpdateCObject, .{ + self, c_object, &c_comp_progress_node, &wg, + }); + } + while (self.work_queue.readItem()) |work_item| switch (work_item) { .codegen_decl => |decl| switch (decl.analysis) { .unreferenced => unreachable, @@ -1447,21 +1470,6 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor decl.analysis = .codegen_failure_retryable; }; }, - .c_object => |c_object| { - self.updateCObject(c_object, &c_comp_progress_node) catch |err| switch (err) { - error.AnalysisFail => continue, - else => { - try self.failed_c_objects.ensureCapacity(self.gpa, self.failed_c_objects.items().len + 1); - self.failed_c_objects.putAssumeCapacityNoClobber(c_object, try ErrorMsg.create( - self.gpa, - 0, - "unable to build C object: {s}", - .{@errorName(err)}, - )); - c_object.status = .{ .failure = {} }; - }, - }; - }, .glibc_crt_file => |crt_file| { glibc.buildCRTFile(self, crt_file) catch |err| { // TODO Expose this as a normal compile error rather than crashing here. @@ -1553,7 +1561,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor }; } -pub fn obtainCObjectCacheManifest(comp: *Compilation) Cache.Manifest { +pub fn obtainCObjectCacheManifest(comp: *const Compilation) Cache.Manifest { var man = comp.cache_parent.obtain(); // Only things that need to be added on top of the base hash, and only things @@ -1708,6 +1716,37 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult { }; } +fn workerUpdateCObject( + comp: *Compilation, + c_object: *CObject, + progress_node: *std.Progress.Node, + wg: *WaitGroup, +) void { + defer wg.stop(); + + comp.updateCObject(c_object, progress_node) catch |err| switch (err) { + error.AnalysisFail => return, + else => { + { + const lock = comp.mutex.acquire(); + defer lock.release(); + comp.failed_c_objects.ensureCapacity(comp.gpa, comp.failed_c_objects.items().len + 1) catch { + fatal("TODO handle this by setting c_object.status = oom failure", .{}); + }; + comp.failed_c_objects.putAssumeCapacityNoClobber(c_object, ErrorMsg.create( + comp.gpa, + 0, + "unable to build C object: {s}", + .{@errorName(err)}, + ) catch { + fatal("TODO handle this by setting c_object.status = oom failure", .{}); + }); + } + c_object.status = .{ .failure = {} }; + }, + }; +} + fn updateCObject(comp: *Compilation, c_object: *CObject, c_comp_progress_node: *std.Progress.Node) !void { if (!build_options.have_llvm) { return comp.failCObj(c_object, "clang not available: compiler built without LLVM extensions", .{}); @@ -1720,6 +1759,8 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_comp_progress_node: * if (c_object.clearStatus(comp.gpa)) { // There was previous failure. + const lock = comp.mutex.acquire(); + defer lock.release(); comp.failed_c_objects.removeAssertDiscard(c_object); } @@ -1747,8 +1788,16 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_comp_progress_node: * } { - const gop = try comp.c_object_cache_digest_set.getOrPut(comp.gpa, man.hash.peekBin()); - if (gop.found_existing) { + const is_collision = blk: { + const bin_digest = man.hash.peekBin(); + + const lock = comp.mutex.acquire(); + defer lock.release(); + + const gop = try comp.c_object_cache_digest_set.getOrPut(comp.gpa, bin_digest); + break :blk gop.found_existing; + }; + if (is_collision) { return comp.failCObj( c_object, "the same source file was already added to the same compilation with the same flags", @@ -1764,7 +1813,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_comp_progress_node: * const c_source_basename = std.fs.path.basename(c_object.src.src_path); c_comp_progress_node.activate(); - var child_progress_node = c_comp_progress_node.start(c_source_basename, null); + var child_progress_node = c_comp_progress_node.start(c_source_basename, 0); child_progress_node.activate(); defer child_progress_node.end(); @@ -1929,7 +1978,7 @@ pub fn addTranslateCCArgs( /// Add common C compiler args between translate-c and C object compilation. pub fn addCCArgs( - comp: *Compilation, + comp: *const Compilation, arena: *Allocator, argv: *std.ArrayList([]const u8), ext: FileExt, @@ -2164,10 +2213,14 @@ fn failCObj(comp: *Compilation, c_object: *CObject, comptime format: []const u8, fn failCObjWithOwnedErrorMsg(comp: *Compilation, c_object: *CObject, err_msg: *ErrorMsg) InnerError { { - errdefer err_msg.destroy(comp.gpa); - try comp.failed_c_objects.ensureCapacity(comp.gpa, comp.failed_c_objects.items().len + 1); + const lock = comp.mutex.acquire(); + defer lock.release(); + { + errdefer err_msg.destroy(comp.gpa); + try comp.failed_c_objects.ensureCapacity(comp.gpa, comp.failed_c_objects.items().len + 1); + } + comp.failed_c_objects.putAssumeCapacityNoClobber(c_object, err_msg); } - comp.failed_c_objects.putAssumeCapacityNoClobber(c_object, err_msg); c_object.status = .failure; return error.AnalysisFail; } @@ -2324,7 +2377,7 @@ test "classifyFileExt" { std.testing.expectEqual(FileExt.zir, classifyFileExt("foo.zir")); } -fn haveFramePointer(comp: *Compilation) bool { +fn haveFramePointer(comp: *const Compilation) bool { // If you complicate this logic make sure you update the parent cache hash. // Right now it's not in the cache hash because the value depends on optimize_mode // and strip which are both already part of the hash. @@ -2775,6 +2828,7 @@ fn buildOutputFromZig( .root_name = root_name, .root_pkg = &root_pkg, .output_mode = fixed_output_mode, + .thread_pool = comp.thread_pool, .libc_installation = comp.bin_file.options.libc_installation, .emit_bin = emit_bin, .optimize_mode = optimize_mode, @@ -3148,6 +3202,7 @@ pub fn build_crt_file( .root_name = root_name, .root_pkg = null, .output_mode = output_mode, + .thread_pool = comp.thread_pool, .libc_installation = comp.bin_file.options.libc_installation, .emit_bin = emit_bin, .optimize_mode = comp.bin_file.options.optimize_mode, |
