aboutsummaryrefslogtreecommitdiff
path: root/lib/std/Build
diff options
context:
space:
mode:
Diffstat (limited to 'lib/std/Build')
-rw-r--r--lib/std/Build/Cache.zig135
-rw-r--r--lib/std/Build/CheckFileStep.zig87
-rw-r--r--lib/std/Build/CheckObjectStep.zig261
-rw-r--r--lib/std/Build/CompileStep.zig842
-rw-r--r--lib/std/Build/ConfigHeaderStep.zig155
-rw-r--r--lib/std/Build/EmulatableRunStep.zig213
-rw-r--r--lib/std/Build/FmtStep.zig85
-rw-r--r--lib/std/Build/InstallArtifactStep.zig104
-rw-r--r--lib/std/Build/InstallDirStep.zig69
-rw-r--r--lib/std/Build/InstallFileStep.zig51
-rw-r--r--lib/std/Build/LogStep.zig23
-rw-r--r--lib/std/Build/ObjCopyStep.zig46
-rw-r--r--lib/std/Build/OptionsStep.zig133
-rw-r--r--lib/std/Build/RemoveDirStep.zig35
-rw-r--r--lib/std/Build/RunStep.zig1247
-rw-r--r--lib/std/Build/Step.zig494
-rw-r--r--lib/std/Build/TranslateCStep.zig52
-rw-r--r--lib/std/Build/WriteFileStep.zig212
18 files changed, 2793 insertions, 1451 deletions
diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig
index d4dbe6ec14..b25e349168 100644
--- a/lib/std/Build/Cache.zig
+++ b/lib/std/Build/Cache.zig
@@ -7,27 +7,27 @@ pub const Directory = struct {
/// directly, but it is needed when passing the directory to a child process.
/// `null` means cwd.
path: ?[]const u8,
- handle: std.fs.Dir,
+ handle: fs.Dir,
pub fn join(self: Directory, allocator: Allocator, paths: []const []const u8) ![]u8 {
if (self.path) |p| {
// TODO clean way to do this with only 1 allocation
- const part2 = try std.fs.path.join(allocator, paths);
+ const part2 = try fs.path.join(allocator, paths);
defer allocator.free(part2);
- return std.fs.path.join(allocator, &[_][]const u8{ p, part2 });
+ return fs.path.join(allocator, &[_][]const u8{ p, part2 });
} else {
- return std.fs.path.join(allocator, paths);
+ return fs.path.join(allocator, paths);
}
}
pub fn joinZ(self: Directory, allocator: Allocator, paths: []const []const u8) ![:0]u8 {
if (self.path) |p| {
// TODO clean way to do this with only 1 allocation
- const part2 = try std.fs.path.join(allocator, paths);
+ const part2 = try fs.path.join(allocator, paths);
defer allocator.free(part2);
- return std.fs.path.joinZ(allocator, &[_][]const u8{ p, part2 });
+ return fs.path.joinZ(allocator, &[_][]const u8{ p, part2 });
} else {
- return std.fs.path.joinZ(allocator, paths);
+ return fs.path.joinZ(allocator, paths);
}
}
@@ -39,6 +39,20 @@ pub const Directory = struct {
if (self.path) |p| gpa.free(p);
self.* = undefined;
}
+
+ pub fn format(
+ self: Directory,
+ comptime fmt_string: []const u8,
+ options: fmt.FormatOptions,
+ writer: anytype,
+ ) !void {
+ _ = options;
+ if (fmt_string.len != 0) fmt.invalidFmtError(fmt, self);
+ if (self.path) |p| {
+ try writer.writeAll(p);
+ try writer.writeAll(fs.path.sep_str);
+ }
+ }
};
gpa: Allocator,
@@ -243,10 +257,10 @@ pub const HashHelper = struct {
hh.hasher.final(&bin_digest);
var out_digest: [hex_digest_len]u8 = undefined;
- _ = std.fmt.bufPrint(
+ _ = fmt.bufPrint(
&out_digest,
"{s}",
- .{std.fmt.fmtSliceHexLower(&bin_digest)},
+ .{fmt.fmtSliceHexLower(&bin_digest)},
) catch unreachable;
return out_digest;
}
@@ -365,10 +379,10 @@ pub const Manifest = struct {
var bin_digest: BinDigest = undefined;
self.hash.hasher.final(&bin_digest);
- _ = std.fmt.bufPrint(
+ _ = fmt.bufPrint(
&self.hex_digest,
"{s}",
- .{std.fmt.fmtSliceHexLower(&bin_digest)},
+ .{fmt.fmtSliceHexLower(&bin_digest)},
) catch unreachable;
self.hash.hasher = hasher_init;
@@ -408,7 +422,11 @@ pub const Manifest = struct {
self.have_exclusive_lock = true;
return false; // cache miss; exclusive lock already held
} else |err| switch (err) {
- error.WouldBlock => continue,
+ // There are no dir components, so you would think
+ // that this was unreachable, however we have
+ // observed on macOS two processes racing to do
+ // openat() with O_CREAT manifest in ENOENT.
+ error.WouldBlock, error.FileNotFound => continue,
else => |e| return e,
}
},
@@ -425,7 +443,10 @@ pub const Manifest = struct {
self.manifest_file = manifest_file;
self.have_exclusive_lock = true;
} else |err| switch (err) {
- error.WouldBlock => {
+ // There are no dir components, so you would think that this was
+ // unreachable, however we have observed on macOS two processes racing
+ // to do openat() with O_CREAT manifest in ENOENT.
+ error.WouldBlock, error.FileNotFound => {
self.manifest_file = try self.cache.manifest_dir.openFile(&manifest_file_path, .{
.lock = .Shared,
});
@@ -469,7 +490,7 @@ pub const Manifest = struct {
cache_hash_file.stat.size = fmt.parseInt(u64, size, 10) catch return error.InvalidFormat;
cache_hash_file.stat.inode = fmt.parseInt(fs.File.INode, inode, 10) catch return error.InvalidFormat;
cache_hash_file.stat.mtime = fmt.parseInt(i64, mtime_nsec_str, 10) catch return error.InvalidFormat;
- _ = std.fmt.hexToBytes(&cache_hash_file.bin_digest, digest_str) catch return error.InvalidFormat;
+ _ = fmt.hexToBytes(&cache_hash_file.bin_digest, digest_str) catch return error.InvalidFormat;
const prefix = fmt.parseInt(u8, prefix_str, 10) catch return error.InvalidFormat;
if (prefix >= self.cache.prefixes_len) return error.InvalidFormat;
@@ -806,10 +827,10 @@ pub const Manifest = struct {
self.hash.hasher.final(&bin_digest);
var out_digest: [hex_digest_len]u8 = undefined;
- _ = std.fmt.bufPrint(
+ _ = fmt.bufPrint(
&out_digest,
"{s}",
- .{std.fmt.fmtSliceHexLower(&bin_digest)},
+ .{fmt.fmtSliceHexLower(&bin_digest)},
) catch unreachable;
return out_digest;
@@ -831,10 +852,10 @@ pub const Manifest = struct {
var encoded_digest: [hex_digest_len]u8 = undefined;
for (self.files.items) |file| {
- _ = std.fmt.bufPrint(
+ _ = fmt.bufPrint(
&encoded_digest,
"{s}",
- .{std.fmt.fmtSliceHexLower(&file.bin_digest)},
+ .{fmt.fmtSliceHexLower(&file.bin_digest)},
) catch unreachable;
try writer.print("{d} {d} {d} {s} {d} {s}\n", .{
file.stat.size,
@@ -955,16 +976,16 @@ fn hashFile(file: fs.File, bin_digest: *[Hasher.mac_length]u8) !void {
}
// Create/Write a file, close it, then grab its stat.mtime timestamp.
-fn testGetCurrentFileTimestamp() !i128 {
+fn testGetCurrentFileTimestamp(dir: fs.Dir) !i128 {
const test_out_file = "test-filetimestamp.tmp";
- var file = try fs.cwd().createFile(test_out_file, .{
+ var file = try dir.createFile(test_out_file, .{
.read = true,
.truncate = true,
});
defer {
file.close();
- fs.cwd().deleteFile(test_out_file) catch {};
+ dir.deleteFile(test_out_file) catch {};
}
return (try file.stat()).mtime;
@@ -976,16 +997,17 @@ test "cache file and then recall it" {
return error.SkipZigTest;
}
- const cwd = fs.cwd();
+ var tmp = testing.tmpDir(.{});
+ defer tmp.cleanup();
const temp_file = "test.txt";
const temp_manifest_dir = "temp_manifest_dir";
- try cwd.writeFile(temp_file, "Hello, world!\n");
+ try tmp.dir.writeFile(temp_file, "Hello, world!\n");
// Wait for file timestamps to tick
- const initial_time = try testGetCurrentFileTimestamp();
- while ((try testGetCurrentFileTimestamp()) == initial_time) {
+ const initial_time = try testGetCurrentFileTimestamp(tmp.dir);
+ while ((try testGetCurrentFileTimestamp(tmp.dir)) == initial_time) {
std.time.sleep(1);
}
@@ -995,9 +1017,9 @@ test "cache file and then recall it" {
{
var cache = Cache{
.gpa = testing.allocator,
- .manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
+ .manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
};
- cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
+ cache.addPrefix(.{ .path = null, .handle = tmp.dir });
defer cache.manifest_dir.close();
{
@@ -1033,9 +1055,6 @@ test "cache file and then recall it" {
try testing.expectEqual(digest1, digest2);
}
-
- try cwd.deleteTree(temp_manifest_dir);
- try cwd.deleteFile(temp_file);
}
test "check that changing a file makes cache fail" {
@@ -1043,21 +1062,19 @@ test "check that changing a file makes cache fail" {
// https://github.com/ziglang/zig/issues/5437
return error.SkipZigTest;
}
- const cwd = fs.cwd();
+ var tmp = testing.tmpDir(.{});
+ defer tmp.cleanup();
const temp_file = "cache_hash_change_file_test.txt";
const temp_manifest_dir = "cache_hash_change_file_manifest_dir";
const original_temp_file_contents = "Hello, world!\n";
const updated_temp_file_contents = "Hello, world; but updated!\n";
- try cwd.deleteTree(temp_manifest_dir);
- try cwd.deleteTree(temp_file);
-
- try cwd.writeFile(temp_file, original_temp_file_contents);
+ try tmp.dir.writeFile(temp_file, original_temp_file_contents);
// Wait for file timestamps to tick
- const initial_time = try testGetCurrentFileTimestamp();
- while ((try testGetCurrentFileTimestamp()) == initial_time) {
+ const initial_time = try testGetCurrentFileTimestamp(tmp.dir);
+ while ((try testGetCurrentFileTimestamp(tmp.dir)) == initial_time) {
std.time.sleep(1);
}
@@ -1067,9 +1084,9 @@ test "check that changing a file makes cache fail" {
{
var cache = Cache{
.gpa = testing.allocator,
- .manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
+ .manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
};
- cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
+ cache.addPrefix(.{ .path = null, .handle = tmp.dir });
defer cache.manifest_dir.close();
{
@@ -1089,7 +1106,7 @@ test "check that changing a file makes cache fail" {
try ch.writeManifest();
}
- try cwd.writeFile(temp_file, updated_temp_file_contents);
+ try tmp.dir.writeFile(temp_file, updated_temp_file_contents);
{
var ch = cache.obtain();
@@ -1111,9 +1128,6 @@ test "check that changing a file makes cache fail" {
try testing.expect(!mem.eql(u8, digest1[0..], digest2[0..]));
}
-
- try cwd.deleteTree(temp_manifest_dir);
- try cwd.deleteTree(temp_file);
}
test "no file inputs" {
@@ -1121,18 +1135,20 @@ test "no file inputs" {
// https://github.com/ziglang/zig/issues/5437
return error.SkipZigTest;
}
- const cwd = fs.cwd();
+
+ var tmp = testing.tmpDir(.{});
+ defer tmp.cleanup();
+
const temp_manifest_dir = "no_file_inputs_manifest_dir";
- defer cwd.deleteTree(temp_manifest_dir) catch {};
var digest1: [hex_digest_len]u8 = undefined;
var digest2: [hex_digest_len]u8 = undefined;
var cache = Cache{
.gpa = testing.allocator,
- .manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
+ .manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
};
- cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
+ cache.addPrefix(.{ .path = null, .handle = tmp.dir });
defer cache.manifest_dir.close();
{
@@ -1167,18 +1183,19 @@ test "Manifest with files added after initial hash work" {
// https://github.com/ziglang/zig/issues/5437
return error.SkipZigTest;
}
- const cwd = fs.cwd();
+ var tmp = testing.tmpDir(.{});
+ defer tmp.cleanup();
const temp_file1 = "cache_hash_post_file_test1.txt";
const temp_file2 = "cache_hash_post_file_test2.txt";
const temp_manifest_dir = "cache_hash_post_file_manifest_dir";
- try cwd.writeFile(temp_file1, "Hello, world!\n");
- try cwd.writeFile(temp_file2, "Hello world the second!\n");
+ try tmp.dir.writeFile(temp_file1, "Hello, world!\n");
+ try tmp.dir.writeFile(temp_file2, "Hello world the second!\n");
// Wait for file timestamps to tick
- const initial_time = try testGetCurrentFileTimestamp();
- while ((try testGetCurrentFileTimestamp()) == initial_time) {
+ const initial_time = try testGetCurrentFileTimestamp(tmp.dir);
+ while ((try testGetCurrentFileTimestamp(tmp.dir)) == initial_time) {
std.time.sleep(1);
}
@@ -1189,9 +1206,9 @@ test "Manifest with files added after initial hash work" {
{
var cache = Cache{
.gpa = testing.allocator,
- .manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
+ .manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
};
- cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
+ cache.addPrefix(.{ .path = null, .handle = tmp.dir });
defer cache.manifest_dir.close();
{
@@ -1224,11 +1241,11 @@ test "Manifest with files added after initial hash work" {
try testing.expect(mem.eql(u8, &digest1, &digest2));
// Modify the file added after initial hash
- try cwd.writeFile(temp_file2, "Hello world the second, updated\n");
+ try tmp.dir.writeFile(temp_file2, "Hello world the second, updated\n");
// Wait for file timestamps to tick
- const initial_time2 = try testGetCurrentFileTimestamp();
- while ((try testGetCurrentFileTimestamp()) == initial_time2) {
+ const initial_time2 = try testGetCurrentFileTimestamp(tmp.dir);
+ while ((try testGetCurrentFileTimestamp(tmp.dir)) == initial_time2) {
std.time.sleep(1);
}
@@ -1251,8 +1268,4 @@ test "Manifest with files added after initial hash work" {
try testing.expect(!mem.eql(u8, &digest1, &digest3));
}
-
- try cwd.deleteTree(temp_manifest_dir);
- try cwd.deleteFile(temp_file1);
- try cwd.deleteFile(temp_file2);
}
diff --git a/lib/std/Build/CheckFileStep.zig b/lib/std/Build/CheckFileStep.zig
index b08a797e84..1c2b6b7786 100644
--- a/lib/std/Build/CheckFileStep.zig
+++ b/lib/std/Build/CheckFileStep.zig
@@ -1,51 +1,88 @@
-const std = @import("../std.zig");
-const Step = std.Build.Step;
-const fs = std.fs;
-const mem = std.mem;
-
-const CheckFileStep = @This();
-
-pub const base_id = .check_file;
+//! Fail the build step if a file does not match certain checks.
+//! TODO: make this more flexible, supporting more kinds of checks.
+//! TODO: generalize the code in std.testing.expectEqualStrings and make this
+//! CheckFileStep produce those helpful diagnostics when there is not a match.
step: Step,
-builder: *std.Build,
expected_matches: []const []const u8,
+expected_exact: ?[]const u8,
source: std.Build.FileSource,
max_bytes: usize = 20 * 1024 * 1024,
+pub const base_id = .check_file;
+
+pub const Options = struct {
+ expected_matches: []const []const u8 = &.{},
+ expected_exact: ?[]const u8 = null,
+};
+
pub fn create(
- builder: *std.Build,
+ owner: *std.Build,
source: std.Build.FileSource,
- expected_matches: []const []const u8,
+ options: Options,
) *CheckFileStep {
- const self = builder.allocator.create(CheckFileStep) catch @panic("OOM");
- self.* = CheckFileStep{
- .builder = builder,
- .step = Step.init(.check_file, "CheckFile", builder.allocator, make),
- .source = source.dupe(builder),
- .expected_matches = builder.dupeStrings(expected_matches),
+ const self = owner.allocator.create(CheckFileStep) catch @panic("OOM");
+ self.* = .{
+ .step = Step.init(.{
+ .id = .check_file,
+ .name = "CheckFile",
+ .owner = owner,
+ .makeFn = make,
+ }),
+ .source = source.dupe(owner),
+ .expected_matches = owner.dupeStrings(options.expected_matches),
+ .expected_exact = options.expected_exact,
};
self.source.addStepDependencies(&self.step);
return self;
}
-fn make(step: *Step) !void {
+pub fn setName(self: *CheckFileStep, name: []const u8) void {
+ self.step.name = name;
+}
+
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ _ = prog_node;
+ const b = step.owner;
const self = @fieldParentPtr(CheckFileStep, "step", step);
- const src_path = self.source.getPath(self.builder);
- const contents = try fs.cwd().readFileAlloc(self.builder.allocator, src_path, self.max_bytes);
+ const src_path = self.source.getPath(b);
+ const contents = fs.cwd().readFileAlloc(b.allocator, src_path, self.max_bytes) catch |err| {
+ return step.fail("unable to read '{s}': {s}", .{
+ src_path, @errorName(err),
+ });
+ };
for (self.expected_matches) |expected_match| {
if (mem.indexOf(u8, contents, expected_match) == null) {
- std.debug.print(
+ return step.fail(
\\
- \\========= Expected to find: ===================
+ \\========= expected to find: ===================
\\{s}
- \\========= But file does not contain it: =======
+ \\========= but file does not contain it: =======
\\{s}
- \\
+ \\===============================================
, .{ expected_match, contents });
- return error.TestFailed;
+ }
+ }
+
+ if (self.expected_exact) |expected_exact| {
+ if (!mem.eql(u8, expected_exact, contents)) {
+ return step.fail(
+ \\
+ \\========= expected: =====================
+ \\{s}
+ \\========= but found: ====================
+ \\{s}
+ \\========= from the following file: ======
+ \\{s}
+ , .{ expected_exact, contents, src_path });
}
}
}
+
+const CheckFileStep = @This();
+const std = @import("../std.zig");
+const Step = std.Build.Step;
+const fs = std.fs;
+const mem = std.mem;
diff --git a/lib/std/Build/CheckObjectStep.zig b/lib/std/Build/CheckObjectStep.zig
index 5cb096581f..fbeb87baee 100644
--- a/lib/std/Build/CheckObjectStep.zig
+++ b/lib/std/Build/CheckObjectStep.zig
@@ -10,25 +10,31 @@ const CheckObjectStep = @This();
const Allocator = mem.Allocator;
const Step = std.Build.Step;
-const EmulatableRunStep = std.Build.EmulatableRunStep;
pub const base_id = .check_object;
step: Step,
-builder: *std.Build,
source: std.Build.FileSource,
max_bytes: usize = 20 * 1024 * 1024,
checks: std.ArrayList(Check),
dump_symtab: bool = false,
obj_format: std.Target.ObjectFormat,
-pub fn create(builder: *std.Build, source: std.Build.FileSource, obj_format: std.Target.ObjectFormat) *CheckObjectStep {
- const gpa = builder.allocator;
+pub fn create(
+ owner: *std.Build,
+ source: std.Build.FileSource,
+ obj_format: std.Target.ObjectFormat,
+) *CheckObjectStep {
+ const gpa = owner.allocator;
const self = gpa.create(CheckObjectStep) catch @panic("OOM");
self.* = .{
- .builder = builder,
- .step = Step.init(.check_file, "CheckObject", gpa, make),
- .source = source.dupe(builder),
+ .step = Step.init(.{
+ .id = .check_file,
+ .name = "CheckObject",
+ .owner = owner,
+ .makeFn = make,
+ }),
+ .source = source.dupe(owner),
.checks = std.ArrayList(Check).init(gpa),
.obj_format = obj_format,
};
@@ -38,16 +44,30 @@ pub fn create(builder: *std.Build, source: std.Build.FileSource, obj_format: std
/// Runs and (optionally) compares the output of a binary.
/// Asserts `self` was generated from an executable step.
-pub fn runAndCompare(self: *CheckObjectStep) *EmulatableRunStep {
+/// TODO this doesn't actually compare, and there's no apparent reason for it
+/// to depend on the check object step. I don't see why this function should exist,
+/// the caller could just add the run step directly.
+pub fn runAndCompare(self: *CheckObjectStep) *std.Build.RunStep {
const dependencies_len = self.step.dependencies.items.len;
assert(dependencies_len > 0);
const exe_step = self.step.dependencies.items[dependencies_len - 1];
const exe = exe_step.cast(std.Build.CompileStep).?;
- const emulatable_step = EmulatableRunStep.create(self.builder, "EmulatableRun", exe);
- emulatable_step.step.dependOn(&self.step);
- return emulatable_step;
+ const run = self.step.owner.addRunArtifact(exe);
+ run.skip_foreign_checks = true;
+ run.step.dependOn(&self.step);
+ return run;
}
+const SearchPhrase = struct {
+ string: []const u8,
+ file_source: ?std.Build.FileSource = null,
+
+ fn resolve(phrase: SearchPhrase, b: *std.Build, step: *Step) []const u8 {
+ const file_source = phrase.file_source orelse return phrase.string;
+ return b.fmt("{s} {s}", .{ phrase.string, file_source.getPath2(b, step) });
+ }
+};
+
/// There two types of actions currently suported:
/// * `.match` - is the main building block of standard matchers with optional eat-all token `{*}`
/// and extractors by name such as `{n_value}`. Please note this action is very simplistic in nature
@@ -62,7 +82,7 @@ pub fn runAndCompare(self: *CheckObjectStep) *EmulatableRunStep {
/// they could then be added with this simple program `vmaddr entryoff +`.
const Action = struct {
tag: enum { match, not_present, compute_cmp },
- phrase: []const u8,
+ phrase: SearchPhrase,
expected: ?ComputeCompareExpected = null,
/// Will return true if the `phrase` was found in the `haystack`.
@@ -73,12 +93,18 @@ const Action = struct {
/// and save under `vmaddr` global name (see `global_vars` param)
/// name {*}libobjc{*}.dylib => will match `name` followed by a token which contains `libobjc` and `.dylib`
/// in that order with other letters in between
- fn match(act: Action, haystack: []const u8, global_vars: anytype) !bool {
+ fn match(
+ act: Action,
+ b: *std.Build,
+ step: *Step,
+ haystack: []const u8,
+ global_vars: anytype,
+ ) !bool {
assert(act.tag == .match or act.tag == .not_present);
-
+ const phrase = act.phrase.resolve(b, step);
var candidate_var: ?struct { name: []const u8, value: u64 } = null;
var hay_it = mem.tokenize(u8, mem.trim(u8, haystack, " "), " ");
- var needle_it = mem.tokenize(u8, mem.trim(u8, act.phrase, " "), " ");
+ var needle_it = mem.tokenize(u8, mem.trim(u8, phrase, " "), " ");
while (needle_it.next()) |needle_tok| {
const hay_tok = hay_it.next() orelse return false;
@@ -123,11 +149,13 @@ const Action = struct {
/// Will return true if the `phrase` is correctly parsed into an RPN program and
/// its reduced, computed value compares using `op` with the expected value, either
/// a literal or another extracted variable.
- fn computeCmp(act: Action, gpa: Allocator, global_vars: anytype) !bool {
+ fn computeCmp(act: Action, b: *std.Build, step: *Step, global_vars: anytype) !bool {
+ const gpa = step.owner.allocator;
+ const phrase = act.phrase.resolve(b, step);
var op_stack = std.ArrayList(enum { add, sub, mod, mul }).init(gpa);
var values = std.ArrayList(u64).init(gpa);
- var it = mem.tokenize(u8, act.phrase, " ");
+ var it = mem.tokenize(u8, phrase, " ");
while (it.next()) |next| {
if (mem.eql(u8, next, "+")) {
try op_stack.append(.add);
@@ -140,11 +168,11 @@ const Action = struct {
} else {
const val = std.fmt.parseInt(u64, next, 0) catch blk: {
break :blk global_vars.get(next) orelse {
- std.debug.print(
+ try step.addError(
\\
- \\========= Variable was not extracted: ===========
+ \\========= variable was not extracted: ===========
\\{s}
- \\
+ \\=================================================
, .{next});
return error.UnknownVariable;
};
@@ -176,11 +204,11 @@ const Action = struct {
const exp_value = switch (act.expected.?.value) {
.variable => |name| global_vars.get(name) orelse {
- std.debug.print(
+ try step.addError(
\\
- \\========= Variable was not extracted: ===========
+ \\========= variable was not extracted: ===========
\\{s}
- \\
+ \\=================================================
, .{name});
return error.UnknownVariable;
},
@@ -214,34 +242,32 @@ const ComputeCompareExpected = struct {
};
const Check = struct {
- builder: *std.Build,
actions: std.ArrayList(Action),
- fn create(b: *std.Build) Check {
+ fn create(allocator: Allocator) Check {
return .{
- .builder = b,
- .actions = std.ArrayList(Action).init(b.allocator),
+ .actions = std.ArrayList(Action).init(allocator),
};
}
- fn match(self: *Check, phrase: []const u8) void {
+ fn match(self: *Check, phrase: SearchPhrase) void {
self.actions.append(.{
.tag = .match,
- .phrase = self.builder.dupe(phrase),
+ .phrase = phrase,
}) catch @panic("OOM");
}
- fn notPresent(self: *Check, phrase: []const u8) void {
+ fn notPresent(self: *Check, phrase: SearchPhrase) void {
self.actions.append(.{
.tag = .not_present,
- .phrase = self.builder.dupe(phrase),
+ .phrase = phrase,
}) catch @panic("OOM");
}
- fn computeCmp(self: *Check, phrase: []const u8, expected: ComputeCompareExpected) void {
+ fn computeCmp(self: *Check, phrase: SearchPhrase, expected: ComputeCompareExpected) void {
self.actions.append(.{
.tag = .compute_cmp,
- .phrase = self.builder.dupe(phrase),
+ .phrase = phrase,
.expected = expected,
}) catch @panic("OOM");
}
@@ -249,8 +275,8 @@ const Check = struct {
/// Creates a new sequence of actions with `phrase` as the first anchor searched phrase.
pub fn checkStart(self: *CheckObjectStep, phrase: []const u8) void {
- var new_check = Check.create(self.builder);
- new_check.match(phrase);
+ var new_check = Check.create(self.step.owner.allocator);
+ new_check.match(.{ .string = self.step.owner.dupe(phrase) });
self.checks.append(new_check) catch @panic("OOM");
}
@@ -259,7 +285,19 @@ pub fn checkStart(self: *CheckObjectStep, phrase: []const u8) void {
pub fn checkNext(self: *CheckObjectStep, phrase: []const u8) void {
assert(self.checks.items.len > 0);
const last = &self.checks.items[self.checks.items.len - 1];
- last.match(phrase);
+ last.match(.{ .string = self.step.owner.dupe(phrase) });
+}
+
+/// Like `checkNext()` but takes an additional argument `FileSource` which will be
+/// resolved to a full search query in `make()`.
+pub fn checkNextFileSource(
+ self: *CheckObjectStep,
+ phrase: []const u8,
+ file_source: std.Build.FileSource,
+) void {
+ assert(self.checks.items.len > 0);
+ const last = &self.checks.items[self.checks.items.len - 1];
+ last.match(.{ .string = self.step.owner.dupe(phrase), .file_source = file_source });
}
/// Adds another searched phrase to the latest created Check with `CheckObjectStep.checkStart(...)`
@@ -268,7 +306,7 @@ pub fn checkNext(self: *CheckObjectStep, phrase: []const u8) void {
pub fn checkNotPresent(self: *CheckObjectStep, phrase: []const u8) void {
assert(self.checks.items.len > 0);
const last = &self.checks.items[self.checks.items.len - 1];
- last.notPresent(phrase);
+ last.notPresent(.{ .string = self.step.owner.dupe(phrase) });
}
/// Creates a new check checking specifically symbol table parsed and dumped from the object
@@ -291,34 +329,34 @@ pub fn checkComputeCompare(
program: []const u8,
expected: ComputeCompareExpected,
) void {
- var new_check = Check.create(self.builder);
- new_check.computeCmp(program, expected);
+ var new_check = Check.create(self.step.owner.allocator);
+ new_check.computeCmp(.{ .string = self.step.owner.dupe(program) }, expected);
self.checks.append(new_check) catch @panic("OOM");
}
-fn make(step: *Step) !void {
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ _ = prog_node;
+ const b = step.owner;
+ const gpa = b.allocator;
const self = @fieldParentPtr(CheckObjectStep, "step", step);
- const gpa = self.builder.allocator;
- const src_path = self.source.getPath(self.builder);
- const contents = try fs.cwd().readFileAllocOptions(
+ const src_path = self.source.getPath(b);
+ const contents = fs.cwd().readFileAllocOptions(
gpa,
src_path,
self.max_bytes,
null,
@alignOf(u64),
null,
- );
+ ) catch |err| return step.fail("unable to read '{s}': {s}", .{ src_path, @errorName(err) });
const output = switch (self.obj_format) {
- .macho => try MachODumper.parseAndDump(contents, .{
- .gpa = gpa,
+ .macho => try MachODumper.parseAndDump(step, contents, .{
.dump_symtab = self.dump_symtab,
}),
.elf => @panic("TODO elf parser"),
.coff => @panic("TODO coff parser"),
- .wasm => try WasmDumper.parseAndDump(contents, .{
- .gpa = gpa,
+ .wasm => try WasmDumper.parseAndDump(step, contents, .{
.dump_symtab = self.dump_symtab,
}),
else => unreachable,
@@ -332,56 +370,52 @@ fn make(step: *Step) !void {
switch (act.tag) {
.match => {
while (it.next()) |line| {
- if (try act.match(line, &vars)) break;
+ if (try act.match(b, step, line, &vars)) break;
} else {
- std.debug.print(
+ return step.fail(
\\
- \\========= Expected to find: ==========================
+ \\========= expected to find: ==========================
\\{s}
- \\========= But parsed file does not contain it: =======
+ \\========= but parsed file does not contain it: =======
\\{s}
- \\
- , .{ act.phrase, output });
- return error.TestFailed;
+ \\======================================================
+ , .{ act.phrase.resolve(b, step), output });
}
},
.not_present => {
while (it.next()) |line| {
- if (try act.match(line, &vars)) {
- std.debug.print(
+ if (try act.match(b, step, line, &vars)) {
+ return step.fail(
\\
- \\========= Expected not to find: ===================
+ \\========= expected not to find: ===================
\\{s}
- \\========= But parsed file does contain it: ========
+ \\========= but parsed file does contain it: ========
\\{s}
- \\
- , .{ act.phrase, output });
- return error.TestFailed;
+ \\===================================================
+ , .{ act.phrase.resolve(b, step), output });
}
}
},
.compute_cmp => {
- const res = act.computeCmp(gpa, vars) catch |err| switch (err) {
+ const res = act.computeCmp(b, step, vars) catch |err| switch (err) {
error.UnknownVariable => {
- std.debug.print(
- \\========= From parsed file: =====================
+ return step.fail(
+ \\========= from parsed file: =====================
\\{s}
- \\
+ \\=================================================
, .{output});
- return error.TestFailed;
},
else => |e| return e,
};
if (!res) {
- std.debug.print(
+ return step.fail(
\\
- \\========= Comparison failed for action: ===========
+ \\========= comparison failed for action: ===========
\\{s} {}
- \\========= From parsed file: =======================
+ \\========= from parsed file: =======================
\\{s}
- \\
- , .{ act.phrase, act.expected.?, output });
- return error.TestFailed;
+ \\===================================================
+ , .{ act.phrase.resolve(b, step), act.expected.?, output });
}
},
}
@@ -390,7 +424,6 @@ fn make(step: *Step) !void {
}
const Opts = struct {
- gpa: ?Allocator = null,
dump_symtab: bool = false,
};
@@ -398,8 +431,8 @@ const MachODumper = struct {
const LoadCommandIterator = macho.LoadCommandIterator;
const symtab_label = "symtab";
- fn parseAndDump(bytes: []align(@alignOf(u64)) const u8, opts: Opts) ![]const u8 {
- const gpa = opts.gpa orelse unreachable; // MachO dumper requires an allocator
+ fn parseAndDump(step: *Step, bytes: []align(@alignOf(u64)) const u8, opts: Opts) ![]const u8 {
+ const gpa = step.owner.allocator;
var stream = std.io.fixedBufferStream(bytes);
const reader = stream.reader();
@@ -681,8 +714,8 @@ const MachODumper = struct {
const WasmDumper = struct {
const symtab_label = "symbols";
- fn parseAndDump(bytes: []const u8, opts: Opts) ![]const u8 {
- const gpa = opts.gpa orelse unreachable; // Wasm dumper requires an allocator
+ fn parseAndDump(step: *Step, bytes: []const u8, opts: Opts) ![]const u8 {
+ const gpa = step.owner.allocator;
if (opts.dump_symtab) {
@panic("TODO: Implement symbol table parsing and dumping");
}
@@ -703,20 +736,24 @@ const WasmDumper = struct {
const writer = output.writer();
while (reader.readByte()) |current_byte| {
- const section = std.meta.intToEnum(std.wasm.Section, current_byte) catch |err| {
- std.debug.print("Found invalid section id '{d}'\n", .{current_byte});
- return err;
+ const section = std.meta.intToEnum(std.wasm.Section, current_byte) catch {
+ return step.fail("Found invalid section id '{d}'", .{current_byte});
};
const section_length = try std.leb.readULEB128(u32, reader);
- try parseAndDumpSection(section, bytes[fbs.pos..][0..section_length], writer);
+ try parseAndDumpSection(step, section, bytes[fbs.pos..][0..section_length], writer);
fbs.pos += section_length;
} else |_| {} // reached end of stream
return output.toOwnedSlice();
}
- fn parseAndDumpSection(section: std.wasm.Section, data: []const u8, writer: anytype) !void {
+ fn parseAndDumpSection(
+ step: *Step,
+ section: std.wasm.Section,
+ data: []const u8,
+ writer: anytype,
+ ) !void {
var fbs = std.io.fixedBufferStream(data);
const reader = fbs.reader();
@@ -739,7 +776,7 @@ const WasmDumper = struct {
=> {
const entries = try std.leb.readULEB128(u32, reader);
try writer.print("\nentries {d}\n", .{entries});
- try dumpSection(section, data[fbs.pos..], entries, writer);
+ try dumpSection(step, section, data[fbs.pos..], entries, writer);
},
.custom => {
const name_length = try std.leb.readULEB128(u32, reader);
@@ -748,7 +785,7 @@ const WasmDumper = struct {
try writer.print("\nname {s}\n", .{name});
if (mem.eql(u8, name, "name")) {
- try parseDumpNames(reader, writer, data);
+ try parseDumpNames(step, reader, writer, data);
} else if (mem.eql(u8, name, "producers")) {
try parseDumpProducers(reader, writer, data);
} else if (mem.eql(u8, name, "target_features")) {
@@ -764,7 +801,7 @@ const WasmDumper = struct {
}
}
- fn dumpSection(section: std.wasm.Section, data: []const u8, entries: u32, writer: anytype) !void {
+ fn dumpSection(step: *Step, section: std.wasm.Section, data: []const u8, entries: u32, writer: anytype) !void {
var fbs = std.io.fixedBufferStream(data);
const reader = fbs.reader();
@@ -774,19 +811,18 @@ const WasmDumper = struct {
while (i < entries) : (i += 1) {
const func_type = try reader.readByte();
if (func_type != std.wasm.function_type) {
- std.debug.print("Expected function type, found byte '{d}'\n", .{func_type});
- return error.UnexpectedByte;
+ return step.fail("expected function type, found byte '{d}'", .{func_type});
}
const params = try std.leb.readULEB128(u32, reader);
try writer.print("params {d}\n", .{params});
var index: u32 = 0;
while (index < params) : (index += 1) {
- try parseDumpType(std.wasm.Valtype, reader, writer);
+ try parseDumpType(step, std.wasm.Valtype, reader, writer);
} else index = 0;
const returns = try std.leb.readULEB128(u32, reader);
try writer.print("returns {d}\n", .{returns});
while (index < returns) : (index += 1) {
- try parseDumpType(std.wasm.Valtype, reader, writer);
+ try parseDumpType(step, std.wasm.Valtype, reader, writer);
}
}
},
@@ -800,9 +836,8 @@ const WasmDumper = struct {
const name = data[fbs.pos..][0..name_len];
fbs.pos += name_len;
- const kind = std.meta.intToEnum(std.wasm.ExternalKind, try reader.readByte()) catch |err| {
- std.debug.print("Invalid import kind\n", .{});
- return err;
+ const kind = std.meta.intToEnum(std.wasm.ExternalKind, try reader.readByte()) catch {
+ return step.fail("invalid import kind", .{});
};
try writer.print(
@@ -819,11 +854,11 @@ const WasmDumper = struct {
try parseDumpLimits(reader, writer);
},
.global => {
- try parseDumpType(std.wasm.Valtype, reader, writer);
+ try parseDumpType(step, std.wasm.Valtype, reader, writer);
try writer.print("mutable {}\n", .{0x01 == try std.leb.readULEB128(u32, reader)});
},
.table => {
- try parseDumpType(std.wasm.RefType, reader, writer);
+ try parseDumpType(step, std.wasm.RefType, reader, writer);
try parseDumpLimits(reader, writer);
},
}
@@ -838,7 +873,7 @@ const WasmDumper = struct {
.table => {
var i: u32 = 0;
while (i < entries) : (i += 1) {
- try parseDumpType(std.wasm.RefType, reader, writer);
+ try parseDumpType(step, std.wasm.RefType, reader, writer);
try parseDumpLimits(reader, writer);
}
},
@@ -851,9 +886,9 @@ const WasmDumper = struct {
.global => {
var i: u32 = 0;
while (i < entries) : (i += 1) {
- try parseDumpType(std.wasm.Valtype, reader, writer);
+ try parseDumpType(step, std.wasm.Valtype, reader, writer);
try writer.print("mutable {}\n", .{0x01 == try std.leb.readULEB128(u1, reader)});
- try parseDumpInit(reader, writer);
+ try parseDumpInit(step, reader, writer);
}
},
.@"export" => {
@@ -863,9 +898,8 @@ const WasmDumper = struct {
const name = data[fbs.pos..][0..name_len];
fbs.pos += name_len;
const kind_byte = try std.leb.readULEB128(u8, reader);
- const kind = std.meta.intToEnum(std.wasm.ExternalKind, kind_byte) catch |err| {
- std.debug.print("invalid export kind value '{d}'\n", .{kind_byte});
- return err;
+ const kind = std.meta.intToEnum(std.wasm.ExternalKind, kind_byte) catch {
+ return step.fail("invalid export kind value '{d}'", .{kind_byte});
};
const index = try std.leb.readULEB128(u32, reader);
try writer.print(
@@ -880,7 +914,7 @@ const WasmDumper = struct {
var i: u32 = 0;
while (i < entries) : (i += 1) {
try writer.print("table index {d}\n", .{try std.leb.readULEB128(u32, reader)});
- try parseDumpInit(reader, writer);
+ try parseDumpInit(step, reader, writer);
const function_indexes = try std.leb.readULEB128(u32, reader);
var function_index: u32 = 0;
@@ -896,7 +930,7 @@ const WasmDumper = struct {
while (i < entries) : (i += 1) {
const index = try std.leb.readULEB128(u32, reader);
try writer.print("memory index 0x{x}\n", .{index});
- try parseDumpInit(reader, writer);
+ try parseDumpInit(step, reader, writer);
const size = try std.leb.readULEB128(u32, reader);
try writer.print("size {d}\n", .{size});
try reader.skipBytes(size, .{}); // we do not care about the content of the segments
@@ -906,11 +940,10 @@ const WasmDumper = struct {
}
}
- fn parseDumpType(comptime WasmType: type, reader: anytype, writer: anytype) !void {
+ fn parseDumpType(step: *Step, comptime WasmType: type, reader: anytype, writer: anytype) !void {
const type_byte = try reader.readByte();
- const valtype = std.meta.intToEnum(WasmType, type_byte) catch |err| {
- std.debug.print("Invalid wasm type value '{d}'\n", .{type_byte});
- return err;
+ const valtype = std.meta.intToEnum(WasmType, type_byte) catch {
+ return step.fail("Invalid wasm type value '{d}'", .{type_byte});
};
try writer.print("type {s}\n", .{@tagName(valtype)});
}
@@ -925,11 +958,10 @@ const WasmDumper = struct {
}
}
- fn parseDumpInit(reader: anytype, writer: anytype) !void {
+ fn parseDumpInit(step: *Step, reader: anytype, writer: anytype) !void {
const byte = try std.leb.readULEB128(u8, reader);
- const opcode = std.meta.intToEnum(std.wasm.Opcode, byte) catch |err| {
- std.debug.print("invalid wasm opcode '{d}'\n", .{byte});
- return err;
+ const opcode = std.meta.intToEnum(std.wasm.Opcode, byte) catch {
+ return step.fail("invalid wasm opcode '{d}'", .{byte});
};
switch (opcode) {
.i32_const => try writer.print("i32.const {x}\n", .{try std.leb.readILEB128(i32, reader)}),
@@ -941,14 +973,13 @@ const WasmDumper = struct {
}
const end_opcode = try std.leb.readULEB128(u8, reader);
if (end_opcode != std.wasm.opcode(.end)) {
- std.debug.print("expected 'end' opcode in init expression\n", .{});
- return error.MissingEndOpcode;
+ return step.fail("expected 'end' opcode in init expression", .{});
}
}
- fn parseDumpNames(reader: anytype, writer: anytype, data: []const u8) !void {
+ fn parseDumpNames(step: *Step, reader: anytype, writer: anytype, data: []const u8) !void {
while (reader.context.pos < data.len) {
- try parseDumpType(std.wasm.NameSubsection, reader, writer);
+ try parseDumpType(step, std.wasm.NameSubsection, reader, writer);
const size = try std.leb.readULEB128(u32, reader);
const entries = try std.leb.readULEB128(u32, reader);
try writer.print(
diff --git a/lib/std/Build/CompileStep.zig b/lib/std/Build/CompileStep.zig
index ea2320cc89..b438331991 100644
--- a/lib/std/Build/CompileStep.zig
+++ b/lib/std/Build/CompileStep.zig
@@ -1,7 +1,6 @@
const builtin = @import("builtin");
const std = @import("../std.zig");
const mem = std.mem;
-const log = std.log;
const fs = std.fs;
const assert = std.debug.assert;
const panic = std.debug.panic;
@@ -22,7 +21,6 @@ const InstallDir = std.Build.InstallDir;
const InstallArtifactStep = std.Build.InstallArtifactStep;
const GeneratedFile = std.Build.GeneratedFile;
const ObjCopyStep = std.Build.ObjCopyStep;
-const EmulatableRunStep = std.Build.EmulatableRunStep;
const CheckObjectStep = std.Build.CheckObjectStep;
const RunStep = std.Build.RunStep;
const OptionsStep = std.Build.OptionsStep;
@@ -32,7 +30,6 @@ const CompileStep = @This();
pub const base_id: Step.Id = .compile;
step: Step,
-builder: *std.Build,
name: []const u8,
target: CrossTarget,
target_info: NativeTargetInfo,
@@ -49,9 +46,9 @@ strip: ?bool,
unwind_tables: ?bool,
// keep in sync with src/link.zig:CompressDebugSections
compress_debug_sections: enum { none, zlib } = .none,
-lib_paths: ArrayList([]const u8),
-rpaths: ArrayList([]const u8),
-framework_dirs: ArrayList([]const u8),
+lib_paths: ArrayList(FileSource),
+rpaths: ArrayList(FileSource),
+framework_dirs: ArrayList(FileSource),
frameworks: StringHashMap(FrameworkLinkInfo),
verbose_link: bool,
verbose_cc: bool,
@@ -86,7 +83,6 @@ c_std: std.Build.CStd,
zig_lib_dir: ?[]const u8,
main_pkg_path: ?[]const u8,
exec_cmd_args: ?[]const ?[]const u8,
-name_prefix: []const u8,
filter: ?[]const u8,
test_evented_io: bool = false,
test_runner: ?[]const u8,
@@ -144,6 +140,9 @@ link_function_sections: bool = false,
/// exported symbols.
link_gc_sections: ?bool = null,
+/// (Windows) Whether or not to enable ASLR. Maps to the /DYNAMICBASE[:NO] linker argument.
+linker_dynamicbase: bool = true,
+
linker_allow_shlib_undefined: ?bool = null,
/// Permit read-only relocations in read-only segments. Disallowed by default.
@@ -210,10 +209,17 @@ want_lto: ?bool = null,
use_llvm: ?bool = null,
use_lld: ?bool = null,
+/// This is an advanced setting that can change the intent of this CompileStep.
+/// If this slice has nonzero length, it means that this CompileStep exists to
+/// check for compile errors and return *success* if they match, and failure
+/// otherwise.
+expect_errors: []const []const u8 = &.{},
+
output_path_source: GeneratedFile,
output_lib_path_source: GeneratedFile,
output_h_path_source: GeneratedFile,
output_pdb_path_source: GeneratedFile,
+output_dirname_source: GeneratedFile,
pub const CSourceFiles = struct {
files: []const []const u8,
@@ -277,6 +283,7 @@ pub const Options = struct {
kind: Kind,
linkage: ?Linkage = null,
version: ?std.builtin.Version = null,
+ max_rss: usize = 0,
};
pub const Kind = enum {
@@ -284,7 +291,6 @@ pub const Kind = enum {
lib,
obj,
@"test",
- test_exe,
};
pub const Linkage = enum { dynamic, static };
@@ -305,18 +311,35 @@ pub const EmitOption = union(enum) {
}
};
-pub fn create(builder: *std.Build, options: Options) *CompileStep {
- const name = builder.dupe(options.name);
- const root_src: ?FileSource = if (options.root_source_file) |rsrc| rsrc.dupe(builder) else null;
+pub fn create(owner: *std.Build, options: Options) *CompileStep {
+ const name = owner.dupe(options.name);
+ const root_src: ?FileSource = if (options.root_source_file) |rsrc| rsrc.dupe(owner) else null;
if (mem.indexOf(u8, name, "/") != null or mem.indexOf(u8, name, "\\") != null) {
panic("invalid name: '{s}'. It looks like a file path, but it is supposed to be the library or application name.", .{name});
}
- const self = builder.allocator.create(CompileStep) catch @panic("OOM");
+ // Avoid the common case of the step name looking like "zig test test".
+ const name_adjusted = if (options.kind == .@"test" and mem.eql(u8, name, "test"))
+ ""
+ else
+ owner.fmt("{s} ", .{name});
+
+ const step_name = owner.fmt("{s} {s}{s} {s}", .{
+ switch (options.kind) {
+ .exe => "zig build-exe",
+ .lib => "zig build-lib",
+ .obj => "zig build-obj",
+ .@"test" => "zig test",
+ },
+ name_adjusted,
+ @tagName(options.optimize),
+ options.target.zigTriple(owner.allocator) catch @panic("OOM"),
+ });
+
+ const self = owner.allocator.create(CompileStep) catch @panic("OOM");
self.* = CompileStep{
.strip = null,
.unwind_tables = null,
- .builder = builder,
.verbose_link = false,
.verbose_cc = false,
.optimize = options.optimize,
@@ -325,29 +348,34 @@ pub fn create(builder: *std.Build, options: Options) *CompileStep {
.kind = options.kind,
.root_src = root_src,
.name = name,
- .frameworks = StringHashMap(FrameworkLinkInfo).init(builder.allocator),
- .step = Step.init(base_id, name, builder.allocator, make),
+ .frameworks = StringHashMap(FrameworkLinkInfo).init(owner.allocator),
+ .step = Step.init(.{
+ .id = base_id,
+ .name = step_name,
+ .owner = owner,
+ .makeFn = make,
+ .max_rss = options.max_rss,
+ }),
.version = options.version,
.out_filename = undefined,
- .out_h_filename = builder.fmt("{s}.h", .{name}),
+ .out_h_filename = owner.fmt("{s}.h", .{name}),
.out_lib_filename = undefined,
- .out_pdb_filename = builder.fmt("{s}.pdb", .{name}),
+ .out_pdb_filename = owner.fmt("{s}.pdb", .{name}),
.major_only_filename = null,
.name_only_filename = null,
- .modules = std.StringArrayHashMap(*Module).init(builder.allocator),
- .include_dirs = ArrayList(IncludeDir).init(builder.allocator),
- .link_objects = ArrayList(LinkObject).init(builder.allocator),
- .c_macros = ArrayList([]const u8).init(builder.allocator),
- .lib_paths = ArrayList([]const u8).init(builder.allocator),
- .rpaths = ArrayList([]const u8).init(builder.allocator),
- .framework_dirs = ArrayList([]const u8).init(builder.allocator),
- .installed_headers = ArrayList(*Step).init(builder.allocator),
+ .modules = std.StringArrayHashMap(*Module).init(owner.allocator),
+ .include_dirs = ArrayList(IncludeDir).init(owner.allocator),
+ .link_objects = ArrayList(LinkObject).init(owner.allocator),
+ .c_macros = ArrayList([]const u8).init(owner.allocator),
+ .lib_paths = ArrayList(FileSource).init(owner.allocator),
+ .rpaths = ArrayList(FileSource).init(owner.allocator),
+ .framework_dirs = ArrayList(FileSource).init(owner.allocator),
+ .installed_headers = ArrayList(*Step).init(owner.allocator),
.object_src = undefined,
.c_std = std.Build.CStd.C99,
.zig_lib_dir = null,
.main_pkg_path = null,
.exec_cmd_args = null,
- .name_prefix = "",
.filter = null,
.test_runner = null,
.disable_stack_probing = false,
@@ -363,6 +391,7 @@ pub fn create(builder: *std.Build, options: Options) *CompileStep {
.output_lib_path_source = GeneratedFile{ .step = &self.step },
.output_h_path_source = GeneratedFile{ .step = &self.step },
.output_pdb_path_source = GeneratedFile{ .step = &self.step },
+ .output_dirname_source = GeneratedFile{ .step = &self.step },
.target_info = NativeTargetInfo.detect(self.target) catch @panic("unhandled error"),
};
@@ -372,15 +401,16 @@ pub fn create(builder: *std.Build, options: Options) *CompileStep {
}
fn computeOutFileNames(self: *CompileStep) void {
+ const b = self.step.owner;
const target = self.target_info.target;
- self.out_filename = std.zig.binNameAlloc(self.builder.allocator, .{
+ self.out_filename = std.zig.binNameAlloc(b.allocator, .{
.root_name = self.name,
.target = target,
.output_mode = switch (self.kind) {
.lib => .Lib,
.obj => .Obj,
- .exe, .@"test", .test_exe => .Exe,
+ .exe, .@"test" => .Exe,
},
.link_mode = if (self.linkage) |some| @as(std.builtin.LinkMode, switch (some) {
.dynamic => .Dynamic,
@@ -394,30 +424,30 @@ fn computeOutFileNames(self: *CompileStep) void {
self.out_lib_filename = self.out_filename;
} else if (self.version) |version| {
if (target.isDarwin()) {
- self.major_only_filename = self.builder.fmt("lib{s}.{d}.dylib", .{
+ self.major_only_filename = b.fmt("lib{s}.{d}.dylib", .{
self.name,
version.major,
});
- self.name_only_filename = self.builder.fmt("lib{s}.dylib", .{self.name});
+ self.name_only_filename = b.fmt("lib{s}.dylib", .{self.name});
self.out_lib_filename = self.out_filename;
} else if (target.os.tag == .windows) {
- self.out_lib_filename = self.builder.fmt("{s}.lib", .{self.name});
+ self.out_lib_filename = b.fmt("{s}.lib", .{self.name});
} else {
- self.major_only_filename = self.builder.fmt("lib{s}.so.{d}", .{ self.name, version.major });
- self.name_only_filename = self.builder.fmt("lib{s}.so", .{self.name});
+ self.major_only_filename = b.fmt("lib{s}.so.{d}", .{ self.name, version.major });
+ self.name_only_filename = b.fmt("lib{s}.so", .{self.name});
self.out_lib_filename = self.out_filename;
}
} else {
if (target.isDarwin()) {
self.out_lib_filename = self.out_filename;
} else if (target.os.tag == .windows) {
- self.out_lib_filename = self.builder.fmt("{s}.lib", .{self.name});
+ self.out_lib_filename = b.fmt("{s}.lib", .{self.name});
} else {
self.out_lib_filename = self.out_filename;
}
}
if (self.output_dir != null) {
- self.output_lib_path_source.path = self.builder.pathJoin(
+ self.output_lib_path_source.path = b.pathJoin(
&.{ self.output_dir.?, self.out_lib_filename },
);
}
@@ -425,17 +455,20 @@ fn computeOutFileNames(self: *CompileStep) void {
}
pub fn setOutputDir(self: *CompileStep, dir: []const u8) void {
- self.output_dir = self.builder.dupePath(dir);
+ const b = self.step.owner;
+ self.output_dir = b.dupePath(dir);
}
pub fn install(self: *CompileStep) void {
- self.builder.installArtifact(self);
+ const b = self.step.owner;
+ b.installArtifact(self);
}
-pub fn installHeader(a: *CompileStep, src_path: []const u8, dest_rel_path: []const u8) void {
- const install_file = a.builder.addInstallHeaderFile(src_path, dest_rel_path);
- a.builder.getInstallStep().dependOn(&install_file.step);
- a.installed_headers.append(&install_file.step) catch @panic("OOM");
+pub fn installHeader(cs: *CompileStep, src_path: []const u8, dest_rel_path: []const u8) void {
+ const b = cs.step.owner;
+ const install_file = b.addInstallHeaderFile(src_path, dest_rel_path);
+ b.getInstallStep().dependOn(&install_file.step);
+ cs.installed_headers.append(&install_file.step) catch @panic("OOM");
}
pub const InstallConfigHeaderOptions = struct {
@@ -449,13 +482,14 @@ pub fn installConfigHeader(
options: InstallConfigHeaderOptions,
) void {
const dest_rel_path = options.dest_rel_path orelse config_header.include_path;
- const install_file = cs.builder.addInstallFileWithDir(
+ const b = cs.step.owner;
+ const install_file = b.addInstallFileWithDir(
.{ .generated = &config_header.output_file },
options.install_dir,
dest_rel_path,
);
install_file.step.dependOn(&config_header.step);
- cs.builder.getInstallStep().dependOn(&install_file.step);
+ b.getInstallStep().dependOn(&install_file.step);
cs.installed_headers.append(&install_file.step) catch @panic("OOM");
}
@@ -472,91 +506,83 @@ pub fn installHeadersDirectory(
}
pub fn installHeadersDirectoryOptions(
- a: *CompileStep,
+ cs: *CompileStep,
options: std.Build.InstallDirStep.Options,
) void {
- const install_dir = a.builder.addInstallDirectory(options);
- a.builder.getInstallStep().dependOn(&install_dir.step);
- a.installed_headers.append(&install_dir.step) catch @panic("OOM");
+ const b = cs.step.owner;
+ const install_dir = b.addInstallDirectory(options);
+ b.getInstallStep().dependOn(&install_dir.step);
+ cs.installed_headers.append(&install_dir.step) catch @panic("OOM");
}
-pub fn installLibraryHeaders(a: *CompileStep, l: *CompileStep) void {
+pub fn installLibraryHeaders(cs: *CompileStep, l: *CompileStep) void {
assert(l.kind == .lib);
- const install_step = a.builder.getInstallStep();
+ const b = cs.step.owner;
+ const install_step = b.getInstallStep();
// Copy each element from installed_headers, modifying the builder
// to be the new parent's builder.
for (l.installed_headers.items) |step| {
const step_copy = switch (step.id) {
inline .install_file, .install_dir => |id| blk: {
const T = id.Type();
- const ptr = a.builder.allocator.create(T) catch @panic("OOM");
+ const ptr = b.allocator.create(T) catch @panic("OOM");
ptr.* = step.cast(T).?.*;
- ptr.override_source_builder = ptr.builder;
- ptr.builder = a.builder;
+ ptr.dest_builder = b;
break :blk &ptr.step;
},
else => unreachable,
};
- a.installed_headers.append(step_copy) catch @panic("OOM");
+ cs.installed_headers.append(step_copy) catch @panic("OOM");
install_step.dependOn(step_copy);
}
- a.installed_headers.appendSlice(l.installed_headers.items) catch @panic("OOM");
+ cs.installed_headers.appendSlice(l.installed_headers.items) catch @panic("OOM");
}
pub fn addObjCopy(cs: *CompileStep, options: ObjCopyStep.Options) *ObjCopyStep {
+ const b = cs.step.owner;
var copy = options;
if (copy.basename == null) {
if (options.format) |f| {
- copy.basename = cs.builder.fmt("{s}.{s}", .{ cs.name, @tagName(f) });
+ copy.basename = b.fmt("{s}.{s}", .{ cs.name, @tagName(f) });
} else {
copy.basename = cs.name;
}
}
- return cs.builder.addObjCopy(cs.getOutputSource(), copy);
+ return b.addObjCopy(cs.getOutputSource(), copy);
}
/// Deprecated: use `std.Build.addRunArtifact`
/// This function will run in the context of the package that created the executable,
/// which is undesirable when running an executable provided by a dependency package.
-pub fn run(exe: *CompileStep) *RunStep {
- return exe.builder.addRunArtifact(exe);
-}
-
-/// Creates an `EmulatableRunStep` with an executable built with `addExecutable`.
-/// Allows running foreign binaries through emulation platforms such as Qemu or Rosetta.
-/// When a binary cannot be ran through emulation or the option is disabled, a warning
-/// will be printed and the binary will *NOT* be ran.
-pub fn runEmulatable(exe: *CompileStep) *EmulatableRunStep {
- assert(exe.kind == .exe or exe.kind == .test_exe);
-
- const run_step = EmulatableRunStep.create(exe.builder, exe.builder.fmt("run {s}", .{exe.step.name}), exe);
- if (exe.vcpkg_bin_path) |path| {
- RunStep.addPathDirInternal(&run_step.step, exe.builder, path);
- }
- return run_step;
+pub fn run(cs: *CompileStep) *RunStep {
+ return cs.step.owner.addRunArtifact(cs);
}
-pub fn checkObject(self: *CompileStep, obj_format: std.Target.ObjectFormat) *CheckObjectStep {
- return CheckObjectStep.create(self.builder, self.getOutputSource(), obj_format);
+pub fn checkObject(self: *CompileStep) *CheckObjectStep {
+ return CheckObjectStep.create(self.step.owner, self.getOutputSource(), self.target_info.target.ofmt);
}
pub fn setLinkerScriptPath(self: *CompileStep, source: FileSource) void {
- self.linker_script = source.dupe(self.builder);
+ const b = self.step.owner;
+ self.linker_script = source.dupe(b);
source.addStepDependencies(&self.step);
}
pub fn linkFramework(self: *CompileStep, framework_name: []const u8) void {
- self.frameworks.put(self.builder.dupe(framework_name), .{}) catch @panic("OOM");
+ const b = self.step.owner;
+ self.frameworks.put(b.dupe(framework_name), .{}) catch @panic("OOM");
}
pub fn linkFrameworkNeeded(self: *CompileStep, framework_name: []const u8) void {
- self.frameworks.put(self.builder.dupe(framework_name), .{
+ const b = self.step.owner;
+ self.frameworks.put(b.dupe(framework_name), .{
.needed = true,
}) catch @panic("OOM");
}
pub fn linkFrameworkWeak(self: *CompileStep, framework_name: []const u8) void {
- self.frameworks.put(self.builder.dupe(framework_name), .{
+ const b = self.step.owner;
+ self.frameworks.put(b.dupe(framework_name), .{
.weak = true,
}) catch @panic("OOM");
}
@@ -595,7 +621,7 @@ pub fn producesPdbFile(self: *CompileStep) bool {
if (!self.target.isWindows() and !self.target.isUefi()) return false;
if (self.target.getObjectFormat() == .c) return false;
if (self.strip == true) return false;
- return self.isDynamicLibrary() or self.kind == .exe or self.kind == .test_exe;
+ return self.isDynamicLibrary() or self.kind == .exe or self.kind == .@"test";
}
pub fn linkLibC(self: *CompileStep) void {
@@ -609,21 +635,24 @@ pub fn linkLibCpp(self: *CompileStep) void {
/// If the value is omitted, it is set to 1.
/// `name` and `value` need not live longer than the function call.
pub fn defineCMacro(self: *CompileStep, name: []const u8, value: ?[]const u8) void {
- const macro = std.Build.constructCMacro(self.builder.allocator, name, value);
+ const b = self.step.owner;
+ const macro = std.Build.constructCMacro(b.allocator, name, value);
self.c_macros.append(macro) catch @panic("OOM");
}
/// name_and_value looks like [name]=[value]. If the value is omitted, it is set to 1.
pub fn defineCMacroRaw(self: *CompileStep, name_and_value: []const u8) void {
- self.c_macros.append(self.builder.dupe(name_and_value)) catch @panic("OOM");
+ const b = self.step.owner;
+ self.c_macros.append(b.dupe(name_and_value)) catch @panic("OOM");
}
/// This one has no integration with anything, it just puts -lname on the command line.
/// Prefer to use `linkSystemLibrary` instead.
pub fn linkSystemLibraryName(self: *CompileStep, name: []const u8) void {
+ const b = self.step.owner;
self.link_objects.append(.{
.system_lib = .{
- .name = self.builder.dupe(name),
+ .name = b.dupe(name),
.needed = false,
.weak = false,
.use_pkg_config = .no,
@@ -634,9 +663,10 @@ pub fn linkSystemLibraryName(self: *CompileStep, name: []const u8) void {
/// This one has no integration with anything, it just puts -needed-lname on the command line.
/// Prefer to use `linkSystemLibraryNeeded` instead.
pub fn linkSystemLibraryNeededName(self: *CompileStep, name: []const u8) void {
+ const b = self.step.owner;
self.link_objects.append(.{
.system_lib = .{
- .name = self.builder.dupe(name),
+ .name = b.dupe(name),
.needed = true,
.weak = false,
.use_pkg_config = .no,
@@ -647,9 +677,10 @@ pub fn linkSystemLibraryNeededName(self: *CompileStep, name: []const u8) void {
/// Darwin-only. This one has no integration with anything, it just puts -weak-lname on the
/// command line. Prefer to use `linkSystemLibraryWeak` instead.
pub fn linkSystemLibraryWeakName(self: *CompileStep, name: []const u8) void {
+ const b = self.step.owner;
self.link_objects.append(.{
.system_lib = .{
- .name = self.builder.dupe(name),
+ .name = b.dupe(name),
.needed = false,
.weak = true,
.use_pkg_config = .no,
@@ -660,9 +691,10 @@ pub fn linkSystemLibraryWeakName(self: *CompileStep, name: []const u8) void {
/// This links against a system library, exclusively using pkg-config to find the library.
/// Prefer to use `linkSystemLibrary` instead.
pub fn linkSystemLibraryPkgConfigOnly(self: *CompileStep, lib_name: []const u8) void {
+ const b = self.step.owner;
self.link_objects.append(.{
.system_lib = .{
- .name = self.builder.dupe(lib_name),
+ .name = b.dupe(lib_name),
.needed = false,
.weak = false,
.use_pkg_config = .force,
@@ -673,9 +705,10 @@ pub fn linkSystemLibraryPkgConfigOnly(self: *CompileStep, lib_name: []const u8)
/// This links against a system library, exclusively using pkg-config to find the library.
/// Prefer to use `linkSystemLibraryNeeded` instead.
pub fn linkSystemLibraryNeededPkgConfigOnly(self: *CompileStep, lib_name: []const u8) void {
+ const b = self.step.owner;
self.link_objects.append(.{
.system_lib = .{
- .name = self.builder.dupe(lib_name),
+ .name = b.dupe(lib_name),
.needed = true,
.weak = false,
.use_pkg_config = .force,
@@ -685,14 +718,15 @@ pub fn linkSystemLibraryNeededPkgConfigOnly(self: *CompileStep, lib_name: []cons
/// Run pkg-config for the given library name and parse the output, returning the arguments
/// that should be passed to zig to link the given library.
-pub fn runPkgConfig(self: *CompileStep, lib_name: []const u8) ![]const []const u8 {
+fn runPkgConfig(self: *CompileStep, lib_name: []const u8) ![]const []const u8 {
+ const b = self.step.owner;
const pkg_name = match: {
// First we have to map the library name to pkg config name. Unfortunately,
// there are several examples where this is not straightforward:
// -lSDL2 -> pkg-config sdl2
// -lgdk-3 -> pkg-config gdk-3.0
// -latk-1.0 -> pkg-config atk
- const pkgs = try getPkgConfigList(self.builder);
+ const pkgs = try getPkgConfigList(b);
// Exact match means instant winner.
for (pkgs) |pkg| {
@@ -732,7 +766,7 @@ pub fn runPkgConfig(self: *CompileStep, lib_name: []const u8) ![]const []const u
};
var code: u8 = undefined;
- const stdout = if (self.builder.execAllowFail(&[_][]const u8{
+ const stdout = if (b.execAllowFail(&[_][]const u8{
"pkg-config",
pkg_name,
"--cflags",
@@ -742,11 +776,10 @@ pub fn runPkgConfig(self: *CompileStep, lib_name: []const u8) ![]const []const u
error.ExecNotSupported => return error.PkgConfigFailed,
error.ExitCodeFailure => return error.PkgConfigFailed,
error.FileNotFound => return error.PkgConfigNotInstalled,
- error.ChildExecFailed => return error.PkgConfigFailed,
else => return err,
};
- var zig_args = ArrayList([]const u8).init(self.builder.allocator);
+ var zig_args = ArrayList([]const u8).init(b.allocator);
defer zig_args.deinit();
var it = mem.tokenize(u8, stdout, " \r\n\t");
@@ -771,8 +804,8 @@ pub fn runPkgConfig(self: *CompileStep, lib_name: []const u8) ![]const []const u
try zig_args.appendSlice(&[_][]const u8{ "-D", macro });
} else if (mem.startsWith(u8, tok, "-D")) {
try zig_args.append(tok);
- } else if (self.builder.verbose) {
- log.warn("Ignoring pkg-config flag '{s}'", .{tok});
+ } else if (b.debug_pkg_config) {
+ return self.step.fail("unknown pkg-config flag '{s}'", .{tok});
}
}
@@ -795,6 +828,7 @@ fn linkSystemLibraryInner(self: *CompileStep, name: []const u8, opts: struct {
needed: bool = false,
weak: bool = false,
}) void {
+ const b = self.step.owner;
if (isLibCLibrary(name)) {
self.linkLibC();
return;
@@ -806,7 +840,7 @@ fn linkSystemLibraryInner(self: *CompileStep, name: []const u8, opts: struct {
self.link_objects.append(.{
.system_lib = .{
- .name = self.builder.dupe(name),
+ .name = b.dupe(name),
.needed = opts.needed,
.weak = opts.weak,
.use_pkg_config = .yes,
@@ -814,27 +848,31 @@ fn linkSystemLibraryInner(self: *CompileStep, name: []const u8, opts: struct {
}) catch @panic("OOM");
}
-pub fn setNamePrefix(self: *CompileStep, text: []const u8) void {
- assert(self.kind == .@"test" or self.kind == .test_exe);
- self.name_prefix = self.builder.dupe(text);
+pub fn setName(self: *CompileStep, text: []const u8) void {
+ const b = self.step.owner;
+ assert(self.kind == .@"test");
+ self.name = b.dupe(text);
}
pub fn setFilter(self: *CompileStep, text: ?[]const u8) void {
- assert(self.kind == .@"test" or self.kind == .test_exe);
- self.filter = if (text) |t| self.builder.dupe(t) else null;
+ const b = self.step.owner;
+ assert(self.kind == .@"test");
+ self.filter = if (text) |t| b.dupe(t) else null;
}
pub fn setTestRunner(self: *CompileStep, path: ?[]const u8) void {
- assert(self.kind == .@"test" or self.kind == .test_exe);
- self.test_runner = if (path) |p| self.builder.dupePath(p) else null;
+ const b = self.step.owner;
+ assert(self.kind == .@"test");
+ self.test_runner = if (path) |p| b.dupePath(p) else null;
}
/// Handy when you have many C/C++ source files and want them all to have the same flags.
pub fn addCSourceFiles(self: *CompileStep, files: []const []const u8, flags: []const []const u8) void {
- const c_source_files = self.builder.allocator.create(CSourceFiles) catch @panic("OOM");
+ const b = self.step.owner;
+ const c_source_files = b.allocator.create(CSourceFiles) catch @panic("OOM");
- const files_copy = self.builder.dupeStrings(files);
- const flags_copy = self.builder.dupeStrings(flags);
+ const files_copy = b.dupeStrings(files);
+ const flags_copy = b.dupeStrings(flags);
c_source_files.* = .{
.files = files_copy,
@@ -851,8 +889,9 @@ pub fn addCSourceFile(self: *CompileStep, file: []const u8, flags: []const []con
}
pub fn addCSourceFileSource(self: *CompileStep, source: CSourceFile) void {
- const c_source_file = self.builder.allocator.create(CSourceFile) catch @panic("OOM");
- c_source_file.* = source.dupe(self.builder);
+ const b = self.step.owner;
+ const c_source_file = b.allocator.create(CSourceFile) catch @panic("OOM");
+ c_source_file.* = source.dupe(b);
self.link_objects.append(.{ .c_source_file = c_source_file }) catch @panic("OOM");
source.source.addStepDependencies(&self.step);
}
@@ -866,52 +905,61 @@ pub fn setVerboseCC(self: *CompileStep, value: bool) void {
}
pub fn overrideZigLibDir(self: *CompileStep, dir_path: []const u8) void {
- self.zig_lib_dir = self.builder.dupePath(dir_path);
+ const b = self.step.owner;
+ self.zig_lib_dir = b.dupePath(dir_path);
}
pub fn setMainPkgPath(self: *CompileStep, dir_path: []const u8) void {
- self.main_pkg_path = self.builder.dupePath(dir_path);
+ const b = self.step.owner;
+ self.main_pkg_path = b.dupePath(dir_path);
}
pub fn setLibCFile(self: *CompileStep, libc_file: ?FileSource) void {
- self.libc_file = if (libc_file) |f| f.dupe(self.builder) else null;
+ const b = self.step.owner;
+ self.libc_file = if (libc_file) |f| f.dupe(b) else null;
}
/// Returns the generated executable, library or object file.
/// To run an executable built with zig build, use `run`, or create an install step and invoke it.
pub fn getOutputSource(self: *CompileStep) FileSource {
- return FileSource{ .generated = &self.output_path_source };
+ return .{ .generated = &self.output_path_source };
+}
+
+pub fn getOutputDirectorySource(self: *CompileStep) FileSource {
+ return .{ .generated = &self.output_dirname_source };
}
/// Returns the generated import library. This function can only be called for libraries.
pub fn getOutputLibSource(self: *CompileStep) FileSource {
assert(self.kind == .lib);
- return FileSource{ .generated = &self.output_lib_path_source };
+ return .{ .generated = &self.output_lib_path_source };
}
/// Returns the generated header file.
/// This function can only be called for libraries or object files which have `emit_h` set.
pub fn getOutputHSource(self: *CompileStep) FileSource {
- assert(self.kind != .exe and self.kind != .test_exe and self.kind != .@"test");
+ assert(self.kind != .exe and self.kind != .@"test");
assert(self.emit_h);
- return FileSource{ .generated = &self.output_h_path_source };
+ return .{ .generated = &self.output_h_path_source };
}
/// Returns the generated PDB file. This function can only be called for Windows and UEFI.
pub fn getOutputPdbSource(self: *CompileStep) FileSource {
// TODO: Is this right? Isn't PDB for *any* PE/COFF file?
assert(self.target.isWindows() or self.target.isUefi());
- return FileSource{ .generated = &self.output_pdb_path_source };
+ return .{ .generated = &self.output_pdb_path_source };
}
pub fn addAssemblyFile(self: *CompileStep, path: []const u8) void {
+ const b = self.step.owner;
self.link_objects.append(.{
- .assembly_file = .{ .path = self.builder.dupe(path) },
+ .assembly_file = .{ .path = b.dupe(path) },
}) catch @panic("OOM");
}
pub fn addAssemblyFileSource(self: *CompileStep, source: FileSource) void {
- const source_duped = source.dupe(self.builder);
+ const b = self.step.owner;
+ const source_duped = source.dupe(b);
self.link_objects.append(.{ .assembly_file = source_duped }) catch @panic("OOM");
source_duped.addStepDependencies(&self.step);
}
@@ -921,7 +969,8 @@ pub fn addObjectFile(self: *CompileStep, source_file: []const u8) void {
}
pub fn addObjectFileSource(self: *CompileStep, source: FileSource) void {
- self.link_objects.append(.{ .static_path = source.dupe(self.builder) }) catch @panic("OOM");
+ const b = self.step.owner;
+ self.link_objects.append(.{ .static_path = source.dupe(b) }) catch @panic("OOM");
source.addStepDependencies(&self.step);
}
@@ -936,11 +985,13 @@ pub const addLibPath = @compileError("deprecated, use addLibraryPath");
pub const addFrameworkDir = @compileError("deprecated, use addFrameworkPath");
pub fn addSystemIncludePath(self: *CompileStep, path: []const u8) void {
- self.include_dirs.append(IncludeDir{ .raw_path_system = self.builder.dupe(path) }) catch @panic("OOM");
+ const b = self.step.owner;
+ self.include_dirs.append(IncludeDir{ .raw_path_system = b.dupe(path) }) catch @panic("OOM");
}
pub fn addIncludePath(self: *CompileStep, path: []const u8) void {
- self.include_dirs.append(IncludeDir{ .raw_path = self.builder.dupe(path) }) catch @panic("OOM");
+ const b = self.step.owner;
+ self.include_dirs.append(IncludeDir{ .raw_path = b.dupe(path) }) catch @panic("OOM");
}
pub fn addConfigHeader(self: *CompileStep, config_header: *ConfigHeaderStep) void {
@@ -949,23 +1000,42 @@ pub fn addConfigHeader(self: *CompileStep, config_header: *ConfigHeaderStep) voi
}
pub fn addLibraryPath(self: *CompileStep, path: []const u8) void {
- self.lib_paths.append(self.builder.dupe(path)) catch @panic("OOM");
+ const b = self.step.owner;
+ self.lib_paths.append(.{ .path = b.dupe(path) }) catch @panic("OOM");
+}
+
+pub fn addLibraryPathDirectorySource(self: *CompileStep, directory_source: FileSource) void {
+ self.lib_paths.append(directory_source) catch @panic("OOM");
+ directory_source.addStepDependencies(&self.step);
}
pub fn addRPath(self: *CompileStep, path: []const u8) void {
- self.rpaths.append(self.builder.dupe(path)) catch @panic("OOM");
+ const b = self.step.owner;
+ self.rpaths.append(.{ .path = b.dupe(path) }) catch @panic("OOM");
+}
+
+pub fn addRPathDirectorySource(self: *CompileStep, directory_source: FileSource) void {
+ self.rpaths.append(directory_source) catch @panic("OOM");
+ directory_source.addStepDependencies(&self.step);
}
pub fn addFrameworkPath(self: *CompileStep, dir_path: []const u8) void {
- self.framework_dirs.append(self.builder.dupe(dir_path)) catch @panic("OOM");
+ const b = self.step.owner;
+ self.framework_dirs.append(.{ .path = b.dupe(dir_path) }) catch @panic("OOM");
+}
+
+pub fn addFrameworkPathDirectorySource(self: *CompileStep, directory_source: FileSource) void {
+ self.framework_dirs.append(directory_source) catch @panic("OOM");
+ directory_source.addStepDependencies(&self.step);
}
/// Adds a module to be used with `@import` and exposing it in the current
/// package's module table using `name`.
pub fn addModule(cs: *CompileStep, name: []const u8, module: *Module) void {
- cs.modules.put(cs.builder.dupe(name), module) catch @panic("OOM");
+ const b = cs.step.owner;
+ cs.modules.put(b.dupe(name), module) catch @panic("OOM");
- var done = std.AutoHashMap(*Module, void).init(cs.builder.allocator);
+ var done = std.AutoHashMap(*Module, void).init(b.allocator);
defer done.deinit();
cs.addRecursiveBuildDeps(module, &done) catch @panic("OOM");
}
@@ -973,7 +1043,8 @@ pub fn addModule(cs: *CompileStep, name: []const u8, module: *Module) void {
/// Adds a module to be used with `@import` without exposing it in the current
/// package's module table.
pub fn addAnonymousModule(cs: *CompileStep, name: []const u8, options: std.Build.CreateModuleOptions) void {
- const module = cs.builder.createModule(options);
+ const b = cs.step.owner;
+ const module = b.createModule(options);
return addModule(cs, name, module);
}
@@ -993,12 +1064,13 @@ fn addRecursiveBuildDeps(cs: *CompileStep, module: *Module, done: *std.AutoHashM
/// If Vcpkg was found on the system, it will be added to include and lib
/// paths for the specified target.
pub fn addVcpkgPaths(self: *CompileStep, linkage: CompileStep.Linkage) !void {
+ const b = self.step.owner;
// Ideally in the Unattempted case we would call the function recursively
// after findVcpkgRoot and have only one switch statement, but the compiler
// cannot resolve the error set.
- switch (self.builder.vcpkg_root) {
+ switch (b.vcpkg_root) {
.unattempted => {
- self.builder.vcpkg_root = if (try findVcpkgRoot(self.builder.allocator)) |root|
+ b.vcpkg_root = if (try findVcpkgRoot(b.allocator)) |root|
VcpkgRoot{ .found = root }
else
.not_found;
@@ -1007,31 +1079,32 @@ pub fn addVcpkgPaths(self: *CompileStep, linkage: CompileStep.Linkage) !void {
.found => {},
}
- switch (self.builder.vcpkg_root) {
+ switch (b.vcpkg_root) {
.unattempted => unreachable,
.not_found => return error.VcpkgNotFound,
.found => |root| {
- const allocator = self.builder.allocator;
+ const allocator = b.allocator;
const triplet = try self.target.vcpkgTriplet(allocator, if (linkage == .static) .Static else .Dynamic);
- defer self.builder.allocator.free(triplet);
+ defer b.allocator.free(triplet);
- const include_path = self.builder.pathJoin(&.{ root, "installed", triplet, "include" });
+ const include_path = b.pathJoin(&.{ root, "installed", triplet, "include" });
errdefer allocator.free(include_path);
try self.include_dirs.append(IncludeDir{ .raw_path = include_path });
- const lib_path = self.builder.pathJoin(&.{ root, "installed", triplet, "lib" });
- try self.lib_paths.append(lib_path);
+ const lib_path = b.pathJoin(&.{ root, "installed", triplet, "lib" });
+ try self.lib_paths.append(.{ .path = lib_path });
- self.vcpkg_bin_path = self.builder.pathJoin(&.{ root, "installed", triplet, "bin" });
+ self.vcpkg_bin_path = b.pathJoin(&.{ root, "installed", triplet, "bin" });
},
}
}
pub fn setExecCmd(self: *CompileStep, args: []const ?[]const u8) void {
+ const b = self.step.owner;
assert(self.kind == .@"test");
- const duped_args = self.builder.allocator.alloc(?[]u8, args.len) catch @panic("OOM");
+ const duped_args = b.allocator.alloc(?[]u8, args.len) catch @panic("OOM");
for (args, 0..) |arg, i| {
- duped_args[i] = if (arg) |a| self.builder.dupe(a) else null;
+ duped_args[i] = if (arg) |a| b.dupe(a) else null;
}
self.exec_cmd_args = duped_args;
}
@@ -1040,22 +1113,27 @@ fn linkLibraryOrObject(self: *CompileStep, other: *CompileStep) void {
self.step.dependOn(&other.step);
self.link_objects.append(.{ .other_step = other }) catch @panic("OOM");
self.include_dirs.append(.{ .other_step = other }) catch @panic("OOM");
+
+ for (other.installed_headers.items) |install_step| {
+ self.step.dependOn(install_step);
+ }
}
fn appendModuleArgs(
cs: *CompileStep,
zig_args: *ArrayList([]const u8),
) error{OutOfMemory}!void {
+ const b = cs.step.owner;
// First, traverse the whole dependency graph and give every module a unique name, ideally one
// named after what it's called somewhere in the graph. It will help here to have both a mapping
// from module to name and a set of all the currently-used names.
- var mod_names = std.AutoHashMap(*Module, []const u8).init(cs.builder.allocator);
- var names = std.StringHashMap(void).init(cs.builder.allocator);
+ var mod_names = std.AutoHashMap(*Module, []const u8).init(b.allocator);
+ var names = std.StringHashMap(void).init(b.allocator);
var to_name = std.ArrayList(struct {
name: []const u8,
mod: *Module,
- }).init(cs.builder.allocator);
+ }).init(b.allocator);
{
var it = cs.modules.iterator();
while (it.next()) |kv| {
@@ -1076,7 +1154,7 @@ fn appendModuleArgs(
if (mod_names.contains(dep.mod)) continue;
// We'll use this buffer to store the name we decide on
- var buf = try cs.builder.allocator.alloc(u8, dep.name.len + 32);
+ var buf = try b.allocator.alloc(u8, dep.name.len + 32);
// First, try just the exposed dependency name
std.mem.copy(u8, buf, dep.name);
var name = buf[0..dep.name.len];
@@ -1113,15 +1191,15 @@ fn appendModuleArgs(
const mod = kv.key_ptr.*;
const name = kv.value_ptr.*;
- const deps_str = try constructDepString(cs.builder.allocator, mod_names, mod.dependencies);
+ const deps_str = try constructDepString(b.allocator, mod_names, mod.dependencies);
const src = mod.builder.pathFromRoot(mod.source_file.getPath(mod.builder));
try zig_args.append("--mod");
- try zig_args.append(try std.fmt.allocPrint(cs.builder.allocator, "{s}:{s}:{s}", .{ name, deps_str, src }));
+ try zig_args.append(try std.fmt.allocPrint(b.allocator, "{s}:{s}:{s}", .{ name, deps_str, src }));
}
}
// Lastly, output the root dependencies
- const deps_str = try constructDepString(cs.builder.allocator, mod_names, cs.modules);
+ const deps_str = try constructDepString(b.allocator, mod_names, cs.modules);
if (deps_str.len > 0) {
try zig_args.append("--deps");
try zig_args.append(deps_str);
@@ -1151,43 +1229,36 @@ fn constructDepString(
}
}
-fn make(step: *Step) !void {
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ const b = step.owner;
const self = @fieldParentPtr(CompileStep, "step", step);
- const builder = self.builder;
if (self.root_src == null and self.link_objects.items.len == 0) {
- log.err("{s}: linker needs 1 or more objects to link", .{self.step.name});
- return error.NeedAnObject;
+ return step.fail("the linker needs one or more objects to link", .{});
}
- var zig_args = ArrayList([]const u8).init(builder.allocator);
+ var zig_args = ArrayList([]const u8).init(b.allocator);
defer zig_args.deinit();
- try zig_args.append(builder.zig_exe);
+ try zig_args.append(b.zig_exe);
const cmd = switch (self.kind) {
.lib => "build-lib",
.exe => "build-exe",
.obj => "build-obj",
.@"test" => "test",
- .test_exe => "test",
};
try zig_args.append(cmd);
- if (builder.color != .auto) {
- try zig_args.append("--color");
- try zig_args.append(@tagName(builder.color));
- }
-
- if (builder.reference_trace) |some| {
- try zig_args.append(try std.fmt.allocPrint(builder.allocator, "-freference-trace={d}", .{some}));
+ if (b.reference_trace) |some| {
+ try zig_args.append(try std.fmt.allocPrint(b.allocator, "-freference-trace={d}", .{some}));
}
try addFlag(&zig_args, "LLVM", self.use_llvm);
try addFlag(&zig_args, "LLD", self.use_lld);
if (self.target.ofmt) |ofmt| {
- try zig_args.append(try std.fmt.allocPrint(builder.allocator, "-ofmt={s}", .{@tagName(ofmt)}));
+ try zig_args.append(try std.fmt.allocPrint(b.allocator, "-ofmt={s}", .{@tagName(ofmt)}));
}
if (self.entry_symbol_name) |entry| {
@@ -1197,18 +1268,18 @@ fn make(step: *Step) !void {
if (self.stack_size) |stack_size| {
try zig_args.append("--stack");
- try zig_args.append(try std.fmt.allocPrint(builder.allocator, "{}", .{stack_size}));
+ try zig_args.append(try std.fmt.allocPrint(b.allocator, "{}", .{stack_size}));
}
- if (self.root_src) |root_src| try zig_args.append(root_src.getPath(builder));
+ if (self.root_src) |root_src| try zig_args.append(root_src.getPath(b));
// We will add link objects from transitive dependencies, but we want to keep
// all link objects in the same order provided.
// This array is used to keep self.link_objects immutable.
var transitive_deps: TransitiveDeps = .{
- .link_objects = ArrayList(LinkObject).init(builder.allocator),
- .seen_system_libs = StringHashMap(void).init(builder.allocator),
- .seen_steps = std.AutoHashMap(*const Step, void).init(builder.allocator),
+ .link_objects = ArrayList(LinkObject).init(b.allocator),
+ .seen_system_libs = StringHashMap(void).init(b.allocator),
+ .seen_steps = std.AutoHashMap(*const Step, void).init(b.allocator),
.is_linking_libcpp = self.is_linking_libcpp,
.is_linking_libc = self.is_linking_libc,
.frameworks = &self.frameworks,
@@ -1221,14 +1292,13 @@ fn make(step: *Step) !void {
for (transitive_deps.link_objects.items) |link_object| {
switch (link_object) {
- .static_path => |static_path| try zig_args.append(static_path.getPath(builder)),
+ .static_path => |static_path| try zig_args.append(static_path.getPath(b)),
.other_step => |other| switch (other.kind) {
.exe => @panic("Cannot link with an executable build artifact"),
- .test_exe => @panic("Cannot link with an executable build artifact"),
.@"test" => @panic("Cannot link with a test"),
.obj => {
- try zig_args.append(other.getOutputSource().getPath(builder));
+ try zig_args.append(other.getOutputSource().getPath(b));
},
.lib => l: {
if (self.isStaticLibrary() and other.isStaticLibrary()) {
@@ -1236,7 +1306,7 @@ fn make(step: *Step) !void {
break :l;
}
- const full_path_lib = other.getOutputLibSource().getPath(builder);
+ const full_path_lib = other.getOutputLibSource().getPath(b);
try zig_args.append(full_path_lib);
if (other.linkage == Linkage.dynamic and !self.target.isWindows()) {
@@ -1251,14 +1321,11 @@ fn make(step: *Step) !void {
.system_lib => |system_lib| {
const prefix: []const u8 = prefix: {
if (system_lib.needed) break :prefix "-needed-l";
- if (system_lib.weak) {
- if (self.target.isDarwin()) break :prefix "-weak-l";
- log.warn("Weak library import used for a non-darwin target, this will be converted to normally library import `-lname`", .{});
- }
+ if (system_lib.weak) break :prefix "-weak-l";
break :prefix "-l";
};
switch (system_lib.use_pkg_config) {
- .no => try zig_args.append(builder.fmt("{s}{s}", .{ prefix, system_lib.name })),
+ .no => try zig_args.append(b.fmt("{s}{s}", .{ prefix, system_lib.name })),
.yes, .force => {
if (self.runPkgConfig(system_lib.name)) |args| {
try zig_args.appendSlice(args);
@@ -1272,7 +1339,7 @@ fn make(step: *Step) !void {
.yes => {
// pkg-config failed, so fall back to linking the library
// by name directly.
- try zig_args.append(builder.fmt("{s}{s}", .{
+ try zig_args.append(b.fmt("{s}{s}", .{
prefix,
system_lib.name,
}));
@@ -1295,7 +1362,7 @@ fn make(step: *Step) !void {
try zig_args.append("--");
prev_has_extra_flags = false;
}
- try zig_args.append(asm_file.getPath(builder));
+ try zig_args.append(asm_file.getPath(b));
},
.c_source_file => |c_source_file| {
@@ -1312,7 +1379,7 @@ fn make(step: *Step) !void {
}
try zig_args.append("--");
}
- try zig_args.append(c_source_file.source.getPath(builder));
+ try zig_args.append(c_source_file.source.getPath(b));
},
.c_source_files => |c_source_files| {
@@ -1330,7 +1397,7 @@ fn make(step: *Step) !void {
try zig_args.append("--");
}
for (c_source_files.files) |file| {
- try zig_args.append(builder.pathFromRoot(file));
+ try zig_args.append(b.pathFromRoot(file));
}
},
}
@@ -1346,7 +1413,7 @@ fn make(step: *Step) !void {
if (self.image_base) |image_base| {
try zig_args.append("--image-base");
- try zig_args.append(builder.fmt("0x{x}", .{image_base}));
+ try zig_args.append(b.fmt("0x{x}", .{image_base}));
}
if (self.filter) |filter| {
@@ -1358,39 +1425,35 @@ fn make(step: *Step) !void {
try zig_args.append("--test-evented-io");
}
- if (self.name_prefix.len != 0) {
- try zig_args.append("--test-name-prefix");
- try zig_args.append(self.name_prefix);
- }
-
if (self.test_runner) |test_runner| {
try zig_args.append("--test-runner");
- try zig_args.append(builder.pathFromRoot(test_runner));
+ try zig_args.append(b.pathFromRoot(test_runner));
}
- for (builder.debug_log_scopes) |log_scope| {
+ for (b.debug_log_scopes) |log_scope| {
try zig_args.append("--debug-log");
try zig_args.append(log_scope);
}
- if (builder.debug_compile_errors) {
+ if (b.debug_compile_errors) {
try zig_args.append("--debug-compile-errors");
}
- if (builder.verbose_cimport) try zig_args.append("--verbose-cimport");
- if (builder.verbose_air) try zig_args.append("--verbose-air");
- if (builder.verbose_llvm_ir) try zig_args.append("--verbose-llvm-ir");
- if (builder.verbose_link or self.verbose_link) try zig_args.append("--verbose-link");
- if (builder.verbose_cc or self.verbose_cc) try zig_args.append("--verbose-cc");
- if (builder.verbose_llvm_cpu_features) try zig_args.append("--verbose-llvm-cpu-features");
+ if (b.verbose_cimport) try zig_args.append("--verbose-cimport");
+ if (b.verbose_air) try zig_args.append("--verbose-air");
+ if (b.verbose_llvm_ir) |path| try zig_args.append(b.fmt("--verbose-llvm-ir={s}", .{path}));
+ if (b.verbose_llvm_bc) |path| try zig_args.append(b.fmt("--verbose-llvm-bc={s}", .{path}));
+ if (b.verbose_link or self.verbose_link) try zig_args.append("--verbose-link");
+ if (b.verbose_cc or self.verbose_cc) try zig_args.append("--verbose-cc");
+ if (b.verbose_llvm_cpu_features) try zig_args.append("--verbose-llvm-cpu-features");
- if (self.emit_analysis.getArg(builder, "emit-analysis")) |arg| try zig_args.append(arg);
- if (self.emit_asm.getArg(builder, "emit-asm")) |arg| try zig_args.append(arg);
- if (self.emit_bin.getArg(builder, "emit-bin")) |arg| try zig_args.append(arg);
- if (self.emit_docs.getArg(builder, "emit-docs")) |arg| try zig_args.append(arg);
- if (self.emit_implib.getArg(builder, "emit-implib")) |arg| try zig_args.append(arg);
- if (self.emit_llvm_bc.getArg(builder, "emit-llvm-bc")) |arg| try zig_args.append(arg);
- if (self.emit_llvm_ir.getArg(builder, "emit-llvm-ir")) |arg| try zig_args.append(arg);
+ if (self.emit_analysis.getArg(b, "emit-analysis")) |arg| try zig_args.append(arg);
+ if (self.emit_asm.getArg(b, "emit-asm")) |arg| try zig_args.append(arg);
+ if (self.emit_bin.getArg(b, "emit-bin")) |arg| try zig_args.append(arg);
+ if (self.emit_docs.getArg(b, "emit-docs")) |arg| try zig_args.append(arg);
+ if (self.emit_implib.getArg(b, "emit-implib")) |arg| try zig_args.append(arg);
+ if (self.emit_llvm_bc.getArg(b, "emit-llvm-bc")) |arg| try zig_args.append(arg);
+ if (self.emit_llvm_ir.getArg(b, "emit-llvm-ir")) |arg| try zig_args.append(arg);
if (self.emit_h) try zig_args.append("-femit-h");
@@ -1414,6 +1477,9 @@ fn make(step: *Step) !void {
if (self.link_gc_sections) |x| {
try zig_args.append(if (x) "--gc-sections" else "--no-gc-sections");
}
+ if (!self.linker_dynamicbase) {
+ try zig_args.append("--no-dynamicbase");
+ }
if (self.linker_allow_shlib_undefined) |x| {
try zig_args.append(if (x) "-fallow-shlib-undefined" else "-fno-allow-shlib-undefined");
}
@@ -1431,31 +1497,31 @@ fn make(step: *Step) !void {
}
if (self.link_z_common_page_size) |size| {
try zig_args.append("-z");
- try zig_args.append(builder.fmt("common-page-size={d}", .{size}));
+ try zig_args.append(b.fmt("common-page-size={d}", .{size}));
}
if (self.link_z_max_page_size) |size| {
try zig_args.append("-z");
- try zig_args.append(builder.fmt("max-page-size={d}", .{size}));
+ try zig_args.append(b.fmt("max-page-size={d}", .{size}));
}
if (self.libc_file) |libc_file| {
try zig_args.append("--libc");
- try zig_args.append(libc_file.getPath(builder));
- } else if (builder.libc_file) |libc_file| {
+ try zig_args.append(libc_file.getPath(b));
+ } else if (b.libc_file) |libc_file| {
try zig_args.append("--libc");
try zig_args.append(libc_file);
}
switch (self.optimize) {
.Debug => {}, // Skip since it's the default.
- else => try zig_args.append(builder.fmt("-O{s}", .{@tagName(self.optimize)})),
+ else => try zig_args.append(b.fmt("-O{s}", .{@tagName(self.optimize)})),
}
try zig_args.append("--cache-dir");
- try zig_args.append(builder.cache_root.path orelse ".");
+ try zig_args.append(b.cache_root.path orelse ".");
try zig_args.append("--global-cache-dir");
- try zig_args.append(builder.global_cache_root.path orelse ".");
+ try zig_args.append(b.global_cache_root.path orelse ".");
try zig_args.append("--name");
try zig_args.append(self.name);
@@ -1467,11 +1533,11 @@ fn make(step: *Step) !void {
if (self.kind == .lib and self.linkage != null and self.linkage.? == .dynamic) {
if (self.version) |version| {
try zig_args.append("--version");
- try zig_args.append(builder.fmt("{}", .{version}));
+ try zig_args.append(b.fmt("{}", .{version}));
}
if (self.target.isDarwin()) {
- const install_name = self.install_name orelse builder.fmt("@rpath/{s}{s}{s}", .{
+ const install_name = self.install_name orelse b.fmt("@rpath/{s}{s}{s}", .{
self.target.libPrefix(),
self.name,
self.target.dynamicLibSuffix(),
@@ -1485,7 +1551,7 @@ fn make(step: *Step) !void {
try zig_args.appendSlice(&[_][]const u8{ "--entitlements", entitlements });
}
if (self.pagezero_size) |pagezero_size| {
- const size = try std.fmt.allocPrint(builder.allocator, "{x}", .{pagezero_size});
+ const size = try std.fmt.allocPrint(b.allocator, "{x}", .{pagezero_size});
try zig_args.appendSlice(&[_][]const u8{ "-pagezero_size", size });
}
if (self.search_strategy) |strat| switch (strat) {
@@ -1493,7 +1559,7 @@ fn make(step: *Step) !void {
.dylibs_first => try zig_args.append("-search_dylibs_first"),
};
if (self.headerpad_size) |headerpad_size| {
- const size = try std.fmt.allocPrint(builder.allocator, "{x}", .{headerpad_size});
+ const size = try std.fmt.allocPrint(b.allocator, "{x}", .{headerpad_size});
try zig_args.appendSlice(&[_][]const u8{ "-headerpad", size });
}
if (self.headerpad_max_install_names) {
@@ -1541,16 +1607,16 @@ fn make(step: *Step) !void {
try zig_args.append("--export-table");
}
if (self.initial_memory) |initial_memory| {
- try zig_args.append(builder.fmt("--initial-memory={d}", .{initial_memory}));
+ try zig_args.append(b.fmt("--initial-memory={d}", .{initial_memory}));
}
if (self.max_memory) |max_memory| {
- try zig_args.append(builder.fmt("--max-memory={d}", .{max_memory}));
+ try zig_args.append(b.fmt("--max-memory={d}", .{max_memory}));
}
if (self.shared_memory) {
try zig_args.append("--shared-memory");
}
if (self.global_base) |global_base| {
- try zig_args.append(builder.fmt("--global-base={d}", .{global_base}));
+ try zig_args.append(b.fmt("--global-base={d}", .{global_base}));
}
if (self.code_model != .default) {
@@ -1558,16 +1624,16 @@ fn make(step: *Step) !void {
try zig_args.append(@tagName(self.code_model));
}
if (self.wasi_exec_model) |model| {
- try zig_args.append(builder.fmt("-mexec-model={s}", .{@tagName(model)}));
+ try zig_args.append(b.fmt("-mexec-model={s}", .{@tagName(model)}));
}
for (self.export_symbol_names) |symbol_name| {
- try zig_args.append(builder.fmt("--export={s}", .{symbol_name}));
+ try zig_args.append(b.fmt("--export={s}", .{symbol_name}));
}
if (!self.target.isNative()) {
try zig_args.appendSlice(&.{
- "-target", try self.target.zigTriple(builder.allocator),
- "-mcpu", try std.Build.serializeCpu(builder.allocator, self.target.getCpu()),
+ "-target", try self.target.zigTriple(b.allocator),
+ "-mcpu", try std.Build.serializeCpu(b.allocator, self.target.getCpu()),
});
if (self.target.dynamic_linker.get()) |dynamic_linker| {
@@ -1578,12 +1644,12 @@ fn make(step: *Step) !void {
if (self.linker_script) |linker_script| {
try zig_args.append("--script");
- try zig_args.append(linker_script.getPath(builder));
+ try zig_args.append(linker_script.getPath(b));
}
if (self.version_script) |version_script| {
try zig_args.append("--version-script");
- try zig_args.append(builder.pathFromRoot(version_script));
+ try zig_args.append(b.pathFromRoot(version_script));
}
if (self.kind == .@"test") {
@@ -1596,83 +1662,7 @@ fn make(step: *Step) !void {
try zig_args.append("--test-cmd-bin");
}
}
- } else {
- const need_cross_glibc = self.target.isGnuLibC() and transitive_deps.is_linking_libc;
-
- switch (builder.host.getExternalExecutor(self.target_info, .{
- .qemu_fixes_dl = need_cross_glibc and builder.glibc_runtimes_dir != null,
- .link_libc = transitive_deps.is_linking_libc,
- })) {
- .native => {},
- .bad_dl, .bad_os_or_cpu => {
- try zig_args.append("--test-no-exec");
- },
- .rosetta => if (builder.enable_rosetta) {
- try zig_args.append("--test-cmd-bin");
- } else {
- try zig_args.append("--test-no-exec");
- },
- .qemu => |bin_name| ok: {
- if (builder.enable_qemu) qemu: {
- const glibc_dir_arg = if (need_cross_glibc)
- builder.glibc_runtimes_dir orelse break :qemu
- else
- null;
- try zig_args.append("--test-cmd");
- try zig_args.append(bin_name);
- if (glibc_dir_arg) |dir| {
- // TODO look into making this a call to `linuxTriple`. This
- // needs the directory to be called "i686" rather than
- // "x86" which is why we do it manually here.
- const fmt_str = "{s}" ++ fs.path.sep_str ++ "{s}-{s}-{s}";
- const cpu_arch = self.target.getCpuArch();
- const os_tag = self.target.getOsTag();
- const abi = self.target.getAbi();
- const cpu_arch_name: []const u8 = if (cpu_arch == .x86)
- "i686"
- else
- @tagName(cpu_arch);
- const full_dir = try std.fmt.allocPrint(builder.allocator, fmt_str, .{
- dir, cpu_arch_name, @tagName(os_tag), @tagName(abi),
- });
-
- try zig_args.append("--test-cmd");
- try zig_args.append("-L");
- try zig_args.append("--test-cmd");
- try zig_args.append(full_dir);
- }
- try zig_args.append("--test-cmd-bin");
- break :ok;
- }
- try zig_args.append("--test-no-exec");
- },
- .wine => |bin_name| if (builder.enable_wine) {
- try zig_args.append("--test-cmd");
- try zig_args.append(bin_name);
- try zig_args.append("--test-cmd-bin");
- } else {
- try zig_args.append("--test-no-exec");
- },
- .wasmtime => |bin_name| if (builder.enable_wasmtime) {
- try zig_args.append("--test-cmd");
- try zig_args.append(bin_name);
- try zig_args.append("--test-cmd");
- try zig_args.append("--dir=.");
- try zig_args.append("--test-cmd-bin");
- } else {
- try zig_args.append("--test-no-exec");
- },
- .darling => |bin_name| if (builder.enable_darling) {
- try zig_args.append("--test-cmd");
- try zig_args.append(bin_name);
- try zig_args.append("--test-cmd-bin");
- } else {
- try zig_args.append("--test-no-exec");
- },
- }
}
- } else if (self.kind == .test_exe) {
- try zig_args.append("--test-no-exec");
}
try self.appendModuleArgs(&zig_args);
@@ -1681,18 +1671,18 @@ fn make(step: *Step) !void {
switch (include_dir) {
.raw_path => |include_path| {
try zig_args.append("-I");
- try zig_args.append(builder.pathFromRoot(include_path));
+ try zig_args.append(b.pathFromRoot(include_path));
},
.raw_path_system => |include_path| {
- if (builder.sysroot != null) {
+ if (b.sysroot != null) {
try zig_args.append("-iwithsysroot");
} else {
try zig_args.append("-isystem");
}
- const resolved_include_path = builder.pathFromRoot(include_path);
+ const resolved_include_path = b.pathFromRoot(include_path);
- const common_include_path = if (builtin.os.tag == .windows and builder.sysroot != null and fs.path.isAbsolute(resolved_include_path)) blk: {
+ const common_include_path = if (builtin.os.tag == .windows and b.sysroot != null and fs.path.isAbsolute(resolved_include_path)) blk: {
// We need to check for disk designator and strip it out from dir path so
// that zig/clang can concat resolved_include_path with sysroot.
const disk_designator = fs.path.diskDesignatorWindows(resolved_include_path);
@@ -1708,17 +1698,14 @@ fn make(step: *Step) !void {
},
.other_step => |other| {
if (other.emit_h) {
- const h_path = other.getOutputHSource().getPath(builder);
+ const h_path = other.getOutputHSource().getPath(b);
try zig_args.append("-isystem");
try zig_args.append(fs.path.dirname(h_path).?);
}
if (other.installed_headers.items.len > 0) {
- for (other.installed_headers.items) |install_step| {
- try install_step.make();
- }
try zig_args.append("-I");
- try zig_args.append(builder.pathJoin(&.{
- other.builder.install_prefix, "include",
+ try zig_args.append(b.pathJoin(&.{
+ other.step.owner.install_prefix, "include",
}));
}
},
@@ -1730,33 +1717,51 @@ fn make(step: *Step) !void {
}
}
+ for (self.c_macros.items) |c_macro| {
+ try zig_args.append("-D");
+ try zig_args.append(c_macro);
+ }
+
+ try zig_args.ensureUnusedCapacity(2 * self.lib_paths.items.len);
for (self.lib_paths.items) |lib_path| {
- try zig_args.append("-L");
- try zig_args.append(lib_path);
+ zig_args.appendAssumeCapacity("-L");
+ zig_args.appendAssumeCapacity(lib_path.getPath2(b, step));
}
+ try zig_args.ensureUnusedCapacity(2 * self.rpaths.items.len);
for (self.rpaths.items) |rpath| {
- try zig_args.append("-rpath");
- try zig_args.append(rpath);
- }
+ zig_args.appendAssumeCapacity("-rpath");
+
+ if (self.target_info.target.isDarwin()) switch (rpath) {
+ .path => |path| {
+ // On Darwin, we should not try to expand special runtime paths such as
+ // * @executable_path
+ // * @loader_path
+ if (mem.startsWith(u8, path, "@executable_path") or
+ mem.startsWith(u8, path, "@loader_path"))
+ {
+ zig_args.appendAssumeCapacity(path);
+ continue;
+ }
+ },
+ .generated => {},
+ };
- for (self.c_macros.items) |c_macro| {
- try zig_args.append("-D");
- try zig_args.append(c_macro);
+ zig_args.appendAssumeCapacity(rpath.getPath2(b, step));
}
- if (self.target.isDarwin()) {
- for (self.framework_dirs.items) |dir| {
- if (builder.sysroot != null) {
- try zig_args.append("-iframeworkwithsysroot");
- } else {
- try zig_args.append("-iframework");
- }
- try zig_args.append(dir);
- try zig_args.append("-F");
- try zig_args.append(dir);
+ for (self.framework_dirs.items) |directory_source| {
+ if (b.sysroot != null) {
+ try zig_args.append("-iframeworkwithsysroot");
+ } else {
+ try zig_args.append("-iframework");
}
+ try zig_args.append(directory_source.getPath2(b, step));
+ try zig_args.append("-F");
+ try zig_args.append(directory_source.getPath2(b, step));
+ }
+ {
var it = self.frameworks.iterator();
while (it.next()) |entry| {
const name = entry.key_ptr.*;
@@ -1770,29 +1775,45 @@ fn make(step: *Step) !void {
}
try zig_args.append(name);
}
- } else {
- if (self.framework_dirs.items.len > 0) {
- log.info("Framework directories have been added for a non-darwin target, this will have no affect on the build", .{});
- }
-
- if (self.frameworks.count() > 0) {
- log.info("Frameworks have been added for a non-darwin target, this will have no affect on the build", .{});
- }
}
- if (builder.sysroot) |sysroot| {
+ if (b.sysroot) |sysroot| {
try zig_args.appendSlice(&[_][]const u8{ "--sysroot", sysroot });
}
- for (builder.search_prefixes.items) |search_prefix| {
- try zig_args.append("-L");
- try zig_args.append(builder.pathJoin(&.{
- search_prefix, "lib",
- }));
- try zig_args.append("-I");
- try zig_args.append(builder.pathJoin(&.{
- search_prefix, "include",
- }));
+ for (b.search_prefixes.items) |search_prefix| {
+ var prefix_dir = fs.cwd().openDir(search_prefix, .{}) catch |err| {
+ return step.fail("unable to open prefix directory '{s}': {s}", .{
+ search_prefix, @errorName(err),
+ });
+ };
+ defer prefix_dir.close();
+
+ // Avoid passing -L and -I flags for nonexistent directories.
+ // This prevents a warning, that should probably be upgraded to an error in Zig's
+ // CLI parsing code, when the linker sees an -L directory that does not exist.
+
+ if (prefix_dir.accessZ("lib", .{})) |_| {
+ try zig_args.appendSlice(&.{
+ "-L", try fs.path.join(b.allocator, &.{ search_prefix, "lib" }),
+ });
+ } else |err| switch (err) {
+ error.FileNotFound => {},
+ else => |e| return step.fail("unable to access '{s}/lib' directory: {s}", .{
+ search_prefix, @errorName(e),
+ }),
+ }
+
+ if (prefix_dir.accessZ("include", .{})) |_| {
+ try zig_args.appendSlice(&.{
+ "-I", try fs.path.join(b.allocator, &.{ search_prefix, "include" }),
+ });
+ } else |err| switch (err) {
+ error.FileNotFound => {},
+ else => |e| return step.fail("unable to access '{s}/include' directory: {s}", .{
+ search_prefix, @errorName(e),
+ }),
+ }
}
try addFlag(&zig_args, "valgrind", self.valgrind_support);
@@ -1801,15 +1822,15 @@ fn make(step: *Step) !void {
if (self.zig_lib_dir) |dir| {
try zig_args.append("--zig-lib-dir");
- try zig_args.append(builder.pathFromRoot(dir));
- } else if (builder.zig_lib_dir) |dir| {
+ try zig_args.append(b.pathFromRoot(dir));
+ } else if (b.zig_lib_dir) |dir| {
try zig_args.append("--zig-lib-dir");
try zig_args.append(dir);
}
if (self.main_pkg_path) |dir| {
try zig_args.append("--main-pkg-path");
- try zig_args.append(builder.pathFromRoot(dir));
+ try zig_args.append(b.pathFromRoot(dir));
}
try addFlag(&zig_args, "PIC", self.force_pic);
@@ -1831,6 +1852,7 @@ fn make(step: *Step) !void {
}
try zig_args.append("--enable-cache");
+ try zig_args.append("--listen=-");
// Windows has an argument length limit of 32,766 characters, macOS 262,144 and Linux
// 2,097,152. If our args exceed 30 KiB, we instead write them to a "response file" and
@@ -1841,15 +1863,15 @@ fn make(step: *Step) !void {
args_length += arg.len + 1; // +1 to account for null terminator
}
if (args_length >= 30 * 1024) {
- try builder.cache_root.handle.makePath("args");
+ try b.cache_root.handle.makePath("args");
const args_to_escape = zig_args.items[2..];
- var escaped_args = try ArrayList([]const u8).initCapacity(builder.allocator, args_to_escape.len);
+ var escaped_args = try ArrayList([]const u8).initCapacity(b.allocator, args_to_escape.len);
arg_blk: for (args_to_escape) |arg| {
for (arg, 0..) |c, arg_idx| {
if (c == '\\' or c == '"') {
// Slow path for arguments that need to be escaped. We'll need to allocate and copy
- var escaped = try ArrayList(u8).initCapacity(builder.allocator, arg.len + 1);
+ var escaped = try ArrayList(u8).initCapacity(b.allocator, arg.len + 1);
const writer = escaped.writer();
try writer.writeAll(arg[0..arg_idx]);
for (arg[arg_idx..]) |to_escape| {
@@ -1865,8 +1887,8 @@ fn make(step: *Step) !void {
// Write the args to zig-cache/args/<SHA256 hash of args> to avoid conflicts with
// other zig build commands running in parallel.
- const partially_quoted = try std.mem.join(builder.allocator, "\" \"", escaped_args.items);
- const args = try std.mem.concat(builder.allocator, u8, &[_][]const u8{ "\"", partially_quoted, "\"" });
+ const partially_quoted = try std.mem.join(b.allocator, "\" \"", escaped_args.items);
+ const args = try std.mem.concat(b.allocator, u8, &[_][]const u8{ "\"", partially_quoted, "\"" });
var args_hash: [Sha256.digest_length]u8 = undefined;
Sha256.hash(args, &args_hash, .{});
@@ -1878,28 +1900,35 @@ fn make(step: *Step) !void {
);
const args_file = "args" ++ fs.path.sep_str ++ args_hex_hash;
- try builder.cache_root.handle.writeFile(args_file, args);
+ try b.cache_root.handle.writeFile(args_file, args);
- const resolved_args_file = try mem.concat(builder.allocator, u8, &.{
+ const resolved_args_file = try mem.concat(b.allocator, u8, &.{
"@",
- try builder.cache_root.join(builder.allocator, &.{args_file}),
+ try b.cache_root.join(b.allocator, &.{args_file}),
});
zig_args.shrinkRetainingCapacity(2);
try zig_args.append(resolved_args_file);
}
- const output_dir_nl = try builder.execFromStep(zig_args.items, &self.step);
- const build_output_dir = mem.trimRight(u8, output_dir_nl, "\r\n");
+ const output_bin_path = step.evalZigProcess(zig_args.items, prog_node) catch |err| switch (err) {
+ error.NeedCompileErrorCheck => {
+ assert(self.expect_errors.len != 0);
+ try checkCompileErrors(self);
+ return;
+ },
+ else => |e| return e,
+ };
+ const build_output_dir = fs.path.dirname(output_bin_path).?;
if (self.output_dir) |output_dir| {
- var src_dir = try std.fs.cwd().openIterableDir(build_output_dir, .{});
+ var src_dir = try fs.cwd().openIterableDir(build_output_dir, .{});
defer src_dir.close();
// Create the output directory if it doesn't exist.
- try std.fs.cwd().makePath(output_dir);
+ try fs.cwd().makePath(output_dir);
- var dest_dir = try std.fs.cwd().openDir(output_dir, .{});
+ var dest_dir = try fs.cwd().openDir(output_dir, .{});
defer dest_dir.close();
var it = src_dir.iterate();
@@ -1923,25 +1952,34 @@ fn make(step: *Step) !void {
// Update generated files
if (self.output_dir != null) {
- self.output_path_source.path = builder.pathJoin(
+ self.output_dirname_source.path = self.output_dir.?;
+
+ self.output_path_source.path = b.pathJoin(
&.{ self.output_dir.?, self.out_filename },
);
if (self.emit_h) {
- self.output_h_path_source.path = builder.pathJoin(
+ self.output_h_path_source.path = b.pathJoin(
&.{ self.output_dir.?, self.out_h_filename },
);
}
if (self.target.isWindows() or self.target.isUefi()) {
- self.output_pdb_path_source.path = builder.pathJoin(
+ self.output_pdb_path_source.path = b.pathJoin(
&.{ self.output_dir.?, self.out_pdb_filename },
);
}
}
- if (self.kind == .lib and self.linkage != null and self.linkage.? == .dynamic and self.version != null and self.target.wantSharedLibSymLinks()) {
- try doAtomicSymLinks(builder.allocator, self.getOutputSource().getPath(builder), self.major_only_filename.?, self.name_only_filename.?);
+ if (self.kind == .lib and self.linkage != null and self.linkage.? == .dynamic and
+ self.version != null and self.target.wantSharedLibSymLinks())
+ {
+ try doAtomicSymLinks(
+ step,
+ self.getOutputSource().getPath(b),
+ self.major_only_filename.?,
+ self.name_only_filename.?,
+ );
}
}
@@ -1983,30 +2021,27 @@ fn findVcpkgRoot(allocator: Allocator) !?[]const u8 {
}
pub fn doAtomicSymLinks(
- allocator: Allocator,
+ step: *Step,
output_path: []const u8,
filename_major_only: []const u8,
filename_name_only: []const u8,
) !void {
+ const arena = step.owner.allocator;
const out_dir = fs.path.dirname(output_path) orelse ".";
const out_basename = fs.path.basename(output_path);
// sym link for libfoo.so.1 to libfoo.so.1.2.3
- const major_only_path = try fs.path.join(
- allocator,
- &[_][]const u8{ out_dir, filename_major_only },
- );
- fs.atomicSymLink(allocator, out_basename, major_only_path) catch |err| {
- log.err("Unable to symlink {s} -> {s}", .{ major_only_path, out_basename });
- return err;
+ const major_only_path = try fs.path.join(arena, &.{ out_dir, filename_major_only });
+ fs.atomicSymLink(arena, out_basename, major_only_path) catch |err| {
+ return step.fail("unable to symlink {s} -> {s}: {s}", .{
+ major_only_path, out_basename, @errorName(err),
+ });
};
// sym link for libfoo.so to libfoo.so.1
- const name_only_path = try fs.path.join(
- allocator,
- &[_][]const u8{ out_dir, filename_name_only },
- );
- fs.atomicSymLink(allocator, filename_major_only, name_only_path) catch |err| {
- log.err("Unable to symlink {s} -> {s}", .{ name_only_path, filename_major_only });
- return err;
+ const name_only_path = try fs.path.join(arena, &.{ out_dir, filename_name_only });
+ fs.atomicSymLink(arena, filename_major_only, name_only_path) catch |err| {
+ return step.fail("Unable to symlink {s} -> {s}: {s}", .{
+ name_only_path, filename_major_only, @errorName(err),
+ });
};
}
@@ -2042,7 +2077,6 @@ fn getPkgConfigList(self: *std.Build) ![]const PkgConfigPkg {
error.FileNotFound => error.PkgConfigNotInstalled,
error.InvalidName => error.PkgConfigNotInstalled,
error.PkgConfigInvalidOutput => error.PkgConfigInvalidOutput,
- error.ChildExecFailed => error.PkgConfigFailed,
else => return err,
};
self.pkg_config_pkg_list = result;
@@ -2119,3 +2153,57 @@ const TransitiveDeps = struct {
}
}
};
+
+fn checkCompileErrors(self: *CompileStep) !void {
+ // Clear this field so that it does not get printed by the build runner.
+ const actual_eb = self.step.result_error_bundle;
+ self.step.result_error_bundle = std.zig.ErrorBundle.empty;
+
+ const arena = self.step.owner.allocator;
+
+ var actual_stderr_list = std.ArrayList(u8).init(arena);
+ try actual_eb.renderToWriter(.{
+ .ttyconf = .no_color,
+ .include_reference_trace = false,
+ .include_source_line = false,
+ }, actual_stderr_list.writer());
+ const actual_stderr = try actual_stderr_list.toOwnedSlice();
+
+ // Render the expected lines into a string that we can compare verbatim.
+ var expected_generated = std.ArrayList(u8).init(arena);
+
+ var actual_line_it = mem.split(u8, actual_stderr, "\n");
+ for (self.expect_errors) |expect_line| {
+ const actual_line = actual_line_it.next() orelse {
+ try expected_generated.appendSlice(expect_line);
+ try expected_generated.append('\n');
+ continue;
+ };
+ if (mem.endsWith(u8, actual_line, expect_line)) {
+ try expected_generated.appendSlice(actual_line);
+ try expected_generated.append('\n');
+ continue;
+ }
+ if (mem.startsWith(u8, expect_line, ":?:?: ")) {
+ if (mem.endsWith(u8, actual_line, expect_line[":?:?: ".len..])) {
+ try expected_generated.appendSlice(actual_line);
+ try expected_generated.append('\n');
+ continue;
+ }
+ }
+ try expected_generated.appendSlice(expect_line);
+ try expected_generated.append('\n');
+ }
+
+ if (mem.eql(u8, expected_generated.items, actual_stderr)) return;
+
+ // TODO merge this with the testing.expectEqualStrings logic, and also CheckFile
+ return self.step.fail(
+ \\
+ \\========= expected: =====================
+ \\{s}
+ \\========= but found: ====================
+ \\{s}
+ \\=========================================
+ , .{ expected_generated.items, actual_stderr });
+}
diff --git a/lib/std/Build/ConfigHeaderStep.zig b/lib/std/Build/ConfigHeaderStep.zig
index 595c1018f7..c1849b410e 100644
--- a/lib/std/Build/ConfigHeaderStep.zig
+++ b/lib/std/Build/ConfigHeaderStep.zig
@@ -1,9 +1,3 @@
-const std = @import("../std.zig");
-const ConfigHeaderStep = @This();
-const Step = std.Build.Step;
-
-pub const base_id: Step.Id = .config_header;
-
pub const Style = union(enum) {
/// The configure format supported by autotools. It uses `#undef foo` to
/// mark lines that can be substituted with different values.
@@ -34,7 +28,6 @@ pub const Value = union(enum) {
};
step: Step,
-builder: *std.Build,
values: std.StringArrayHashMap(Value),
output_file: std.Build.GeneratedFile,
@@ -42,43 +35,57 @@ style: Style,
max_bytes: usize,
include_path: []const u8,
+pub const base_id: Step.Id = .config_header;
+
pub const Options = struct {
style: Style = .blank,
max_bytes: usize = 2 * 1024 * 1024,
include_path: ?[]const u8 = null,
+ first_ret_addr: ?usize = null,
};
-pub fn create(builder: *std.Build, options: Options) *ConfigHeaderStep {
- const self = builder.allocator.create(ConfigHeaderStep) catch @panic("OOM");
- const name = if (options.style.getFileSource()) |s|
- builder.fmt("configure {s} header {s}", .{ @tagName(options.style), s.getDisplayName() })
- else
- builder.fmt("configure {s} header", .{@tagName(options.style)});
- self.* = .{
- .builder = builder,
- .step = Step.init(base_id, name, builder.allocator, make),
- .style = options.style,
- .values = std.StringArrayHashMap(Value).init(builder.allocator),
+pub fn create(owner: *std.Build, options: Options) *ConfigHeaderStep {
+ const self = owner.allocator.create(ConfigHeaderStep) catch @panic("OOM");
- .max_bytes = options.max_bytes,
- .include_path = "config.h",
- .output_file = .{ .step = &self.step },
- };
+ var include_path: []const u8 = "config.h";
if (options.style.getFileSource()) |s| switch (s) {
.path => |p| {
const basename = std.fs.path.basename(p);
if (std.mem.endsWith(u8, basename, ".h.in")) {
- self.include_path = basename[0 .. basename.len - 3];
+ include_path = basename[0 .. basename.len - 3];
}
},
else => {},
};
- if (options.include_path) |include_path| {
- self.include_path = include_path;
+ if (options.include_path) |p| {
+ include_path = p;
}
+ const name = if (options.style.getFileSource()) |s|
+ owner.fmt("configure {s} header {s} to {s}", .{
+ @tagName(options.style), s.getDisplayName(), include_path,
+ })
+ else
+ owner.fmt("configure {s} header to {s}", .{ @tagName(options.style), include_path });
+
+ self.* = .{
+ .step = Step.init(.{
+ .id = base_id,
+ .name = name,
+ .owner = owner,
+ .makeFn = make,
+ .first_ret_addr = options.first_ret_addr orelse @returnAddress(),
+ }),
+ .style = options.style,
+ .values = std.StringArrayHashMap(Value).init(owner.allocator),
+
+ .max_bytes = options.max_bytes,
+ .include_path = include_path,
+ .output_file = .{ .step = &self.step },
+ };
+
return self;
}
@@ -146,26 +153,20 @@ fn putValue(self: *ConfigHeaderStep, field_name: []const u8, comptime T: type, v
}
}
-fn make(step: *Step) !void {
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ _ = prog_node;
+ const b = step.owner;
const self = @fieldParentPtr(ConfigHeaderStep, "step", step);
- const gpa = self.builder.allocator;
-
- // The cache is used here not really as a way to speed things up - because writing
- // the data to a file would probably be very fast - but as a way to find a canonical
- // location to put build artifacts.
+ const gpa = b.allocator;
+ const arena = b.allocator;
- // If, for example, a hard-coded path was used as the location to put ConfigHeaderStep
- // files, then two ConfigHeaderStep executing in parallel might clobber each other.
+ var man = b.cache.obtain();
+ defer man.deinit();
- // TODO port the cache system from the compiler to zig std lib. Until then
- // we construct the path directly, and no "cache hit" detection happens;
- // the files are always written.
- // Note there is very similar code over in WriteFileStep
- const Hasher = std.crypto.auth.siphash.SipHash128(1, 3);
// Random bytes to make ConfigHeaderStep unique. Refresh this with new
// random bytes when ConfigHeaderStep implementation is modified in a
// non-backwards-compatible way.
- var hash = Hasher.init("PGuDTpidxyMqnkGM");
+ man.hash.add(@as(u32, 0xdef08d23));
var output = std.ArrayList(u8).init(gpa);
defer output.deinit();
@@ -177,15 +178,15 @@ fn make(step: *Step) !void {
switch (self.style) {
.autoconf => |file_source| {
try output.appendSlice(c_generated_line);
- const src_path = file_source.getPath(self.builder);
- const contents = try std.fs.cwd().readFileAlloc(gpa, src_path, self.max_bytes);
- try render_autoconf(contents, &output, self.values, src_path);
+ const src_path = file_source.getPath(b);
+ const contents = try std.fs.cwd().readFileAlloc(arena, src_path, self.max_bytes);
+ try render_autoconf(step, contents, &output, self.values, src_path);
},
.cmake => |file_source| {
try output.appendSlice(c_generated_line);
- const src_path = file_source.getPath(self.builder);
- const contents = try std.fs.cwd().readFileAlloc(gpa, src_path, self.max_bytes);
- try render_cmake(contents, &output, self.values, src_path);
+ const src_path = file_source.getPath(b);
+ const contents = try std.fs.cwd().readFileAlloc(arena, src_path, self.max_bytes);
+ try render_cmake(step, contents, &output, self.values, src_path);
},
.blank => {
try output.appendSlice(c_generated_line);
@@ -197,43 +198,44 @@ fn make(step: *Step) !void {
},
}
- hash.update(output.items);
+ man.hash.addBytes(output.items);
- var digest: [16]u8 = undefined;
- hash.final(&digest);
- var hash_basename: [digest.len * 2]u8 = undefined;
- _ = std.fmt.bufPrint(
- &hash_basename,
- "{s}",
- .{std.fmt.fmtSliceHexLower(&digest)},
- ) catch unreachable;
+ if (try step.cacheHit(&man)) {
+ const digest = man.final();
+ self.output_file.path = try b.cache_root.join(arena, &.{
+ "o", &digest, self.include_path,
+ });
+ return;
+ }
- const output_dir = try self.builder.cache_root.join(gpa, &.{ "o", &hash_basename });
+ const digest = man.final();
// If output_path has directory parts, deal with them. Example:
// output_dir is zig-cache/o/HASH
// output_path is libavutil/avconfig.h
// We want to open directory zig-cache/o/HASH/libavutil/
// but keep output_dir as zig-cache/o/HASH for -I include
- const sub_dir_path = if (std.fs.path.dirname(self.include_path)) |d|
- try std.fs.path.join(gpa, &.{ output_dir, d })
- else
- output_dir;
+ const sub_path = try std.fs.path.join(arena, &.{ "o", &digest, self.include_path });
+ const sub_path_dirname = std.fs.path.dirname(sub_path).?;
- var dir = std.fs.cwd().makeOpenPath(sub_dir_path, .{}) catch |err| {
- std.debug.print("unable to make path {s}: {s}\n", .{ output_dir, @errorName(err) });
- return err;
+ b.cache_root.handle.makePath(sub_path_dirname) catch |err| {
+ return step.fail("unable to make path '{}{s}': {s}", .{
+ b.cache_root, sub_path_dirname, @errorName(err),
+ });
};
- defer dir.close();
- try dir.writeFile(std.fs.path.basename(self.include_path), output.items);
+ b.cache_root.handle.writeFile(sub_path, output.items) catch |err| {
+ return step.fail("unable to write file '{}{s}': {s}", .{
+ b.cache_root, sub_path, @errorName(err),
+ });
+ };
- self.output_file.path = try std.fs.path.join(self.builder.allocator, &.{
- output_dir, self.include_path,
- });
+ self.output_file.path = try b.cache_root.join(arena, &.{sub_path});
+ try man.writeManifest();
}
fn render_autoconf(
+ step: *Step,
contents: []const u8,
output: *std.ArrayList(u8),
values: std.StringArrayHashMap(Value),
@@ -260,7 +262,7 @@ fn render_autoconf(
}
const name = it.rest();
const kv = values_copy.fetchSwapRemove(name) orelse {
- std.debug.print("{s}:{d}: error: unspecified config header value: '{s}'\n", .{
+ try step.addError("{s}:{d}: error: unspecified config header value: '{s}'", .{
src_path, line_index + 1, name,
});
any_errors = true;
@@ -270,15 +272,17 @@ fn render_autoconf(
}
for (values_copy.keys()) |name| {
- std.debug.print("{s}: error: config header value unused: '{s}'\n", .{ src_path, name });
+ try step.addError("{s}: error: config header value unused: '{s}'", .{ src_path, name });
+ any_errors = true;
}
if (any_errors) {
- return error.HeaderConfigFailed;
+ return error.MakeFailed;
}
}
fn render_cmake(
+ step: *Step,
contents: []const u8,
output: *std.ArrayList(u8),
values: std.StringArrayHashMap(Value),
@@ -304,14 +308,14 @@ fn render_cmake(
continue;
}
const name = it.next() orelse {
- std.debug.print("{s}:{d}: error: missing define name\n", .{
+ try step.addError("{s}:{d}: error: missing define name", .{
src_path, line_index + 1,
});
any_errors = true;
continue;
};
const kv = values_copy.fetchSwapRemove(name) orelse {
- std.debug.print("{s}:{d}: error: unspecified config header value: '{s}'\n", .{
+ try step.addError("{s}:{d}: error: unspecified config header value: '{s}'", .{
src_path, line_index + 1, name,
});
any_errors = true;
@@ -321,7 +325,8 @@ fn render_cmake(
}
for (values_copy.keys()) |name| {
- std.debug.print("{s}: error: config header value unused: '{s}'\n", .{ src_path, name });
+ try step.addError("{s}: error: config header value unused: '{s}'", .{ src_path, name });
+ any_errors = true;
}
if (any_errors) {
@@ -426,3 +431,7 @@ fn renderValueNasm(output: *std.ArrayList(u8), name: []const u8, value: Value) !
},
}
}
+
+const std = @import("../std.zig");
+const ConfigHeaderStep = @This();
+const Step = std.Build.Step;
diff --git a/lib/std/Build/EmulatableRunStep.zig b/lib/std/Build/EmulatableRunStep.zig
deleted file mode 100644
index d4b5238524..0000000000
--- a/lib/std/Build/EmulatableRunStep.zig
+++ /dev/null
@@ -1,213 +0,0 @@
-//! Unlike `RunStep` this step will provide emulation, when enabled, to run foreign binaries.
-//! When a binary is foreign, but emulation for the target is disabled, the specified binary
-//! will not be run and therefore also not validated against its output.
-//! This step can be useful when wishing to run a built binary on multiple platforms,
-//! without having to verify if it's possible to be ran against.
-
-const std = @import("../std.zig");
-const Step = std.Build.Step;
-const CompileStep = std.Build.CompileStep;
-const RunStep = std.Build.RunStep;
-
-const fs = std.fs;
-const process = std.process;
-const EnvMap = process.EnvMap;
-
-const EmulatableRunStep = @This();
-
-pub const base_id = .emulatable_run;
-
-const max_stdout_size = 1 * 1024 * 1024; // 1 MiB
-
-step: Step,
-builder: *std.Build,
-
-/// The artifact (executable) to be run by this step
-exe: *CompileStep,
-
-/// Set this to `null` to ignore the exit code for the purpose of determining a successful execution
-expected_term: ?std.ChildProcess.Term = .{ .Exited = 0 },
-
-/// Override this field to modify the environment
-env_map: ?*EnvMap,
-
-/// Set this to modify the current working directory
-cwd: ?[]const u8,
-
-stdout_action: RunStep.StdIoAction = .inherit,
-stderr_action: RunStep.StdIoAction = .inherit,
-
-/// When set to true, hides the warning of skipping a foreign binary which cannot be run on the host
-/// or through emulation.
-hide_foreign_binaries_warning: bool,
-
-/// Creates a step that will execute the given artifact. This step will allow running the
-/// binary through emulation when any of the emulation options such as `enable_rosetta` are set to true.
-/// When set to false, and the binary is foreign, running the executable is skipped.
-/// Asserts given artifact is an executable.
-pub fn create(builder: *std.Build, name: []const u8, artifact: *CompileStep) *EmulatableRunStep {
- std.debug.assert(artifact.kind == .exe or artifact.kind == .test_exe);
- const self = builder.allocator.create(EmulatableRunStep) catch @panic("OOM");
-
- const option_name = "hide-foreign-warnings";
- const hide_warnings = if (builder.available_options_map.get(option_name) == null) warn: {
- break :warn builder.option(bool, option_name, "Hide the warning when a foreign binary which is incompatible is skipped") orelse false;
- } else false;
-
- self.* = .{
- .builder = builder,
- .step = Step.init(.emulatable_run, name, builder.allocator, make),
- .exe = artifact,
- .env_map = null,
- .cwd = null,
- .hide_foreign_binaries_warning = hide_warnings,
- };
- self.step.dependOn(&artifact.step);
-
- return self;
-}
-
-fn make(step: *Step) !void {
- const self = @fieldParentPtr(EmulatableRunStep, "step", step);
- const host_info = self.builder.host;
-
- var argv_list = std.ArrayList([]const u8).init(self.builder.allocator);
- defer argv_list.deinit();
-
- const need_cross_glibc = self.exe.target.isGnuLibC() and self.exe.is_linking_libc;
- switch (host_info.getExternalExecutor(self.exe.target_info, .{
- .qemu_fixes_dl = need_cross_glibc and self.builder.glibc_runtimes_dir != null,
- .link_libc = self.exe.is_linking_libc,
- })) {
- .native => {},
- .rosetta => if (!self.builder.enable_rosetta) return warnAboutForeignBinaries(self),
- .wine => |bin_name| if (self.builder.enable_wine) {
- try argv_list.append(bin_name);
- } else return,
- .qemu => |bin_name| if (self.builder.enable_qemu) {
- const glibc_dir_arg = if (need_cross_glibc)
- self.builder.glibc_runtimes_dir orelse return
- else
- null;
- try argv_list.append(bin_name);
- if (glibc_dir_arg) |dir| {
- // TODO look into making this a call to `linuxTriple`. This
- // needs the directory to be called "i686" rather than
- // "x86" which is why we do it manually here.
- const fmt_str = "{s}" ++ fs.path.sep_str ++ "{s}-{s}-{s}";
- const cpu_arch = self.exe.target.getCpuArch();
- const os_tag = self.exe.target.getOsTag();
- const abi = self.exe.target.getAbi();
- const cpu_arch_name: []const u8 = if (cpu_arch == .x86)
- "i686"
- else
- @tagName(cpu_arch);
- const full_dir = try std.fmt.allocPrint(self.builder.allocator, fmt_str, .{
- dir, cpu_arch_name, @tagName(os_tag), @tagName(abi),
- });
-
- try argv_list.append("-L");
- try argv_list.append(full_dir);
- }
- } else return warnAboutForeignBinaries(self),
- .darling => |bin_name| if (self.builder.enable_darling) {
- try argv_list.append(bin_name);
- } else return warnAboutForeignBinaries(self),
- .wasmtime => |bin_name| if (self.builder.enable_wasmtime) {
- try argv_list.append(bin_name);
- try argv_list.append("--dir=.");
- } else return warnAboutForeignBinaries(self),
- else => return warnAboutForeignBinaries(self),
- }
-
- if (self.exe.target.isWindows()) {
- // On Windows we don't have rpaths so we have to add .dll search paths to PATH
- RunStep.addPathForDynLibsInternal(&self.step, self.builder, self.exe);
- }
-
- const executable_path = self.exe.installed_path orelse self.exe.getOutputSource().getPath(self.builder);
- try argv_list.append(executable_path);
-
- try RunStep.runCommand(
- argv_list.items,
- self.builder,
- self.expected_term,
- self.stdout_action,
- self.stderr_action,
- .Inherit,
- self.env_map,
- self.cwd,
- false,
- );
-}
-
-pub fn expectStdErrEqual(self: *EmulatableRunStep, bytes: []const u8) void {
- self.stderr_action = .{ .expect_exact = self.builder.dupe(bytes) };
-}
-
-pub fn expectStdOutEqual(self: *EmulatableRunStep, bytes: []const u8) void {
- self.stdout_action = .{ .expect_exact = self.builder.dupe(bytes) };
-}
-
-fn warnAboutForeignBinaries(step: *EmulatableRunStep) void {
- if (step.hide_foreign_binaries_warning) return;
- const builder = step.builder;
- const artifact = step.exe;
-
- const host_name = builder.host.target.zigTriple(builder.allocator) catch @panic("unhandled error");
- const foreign_name = artifact.target.zigTriple(builder.allocator) catch @panic("unhandled error");
- const target_info = std.zig.system.NativeTargetInfo.detect(artifact.target) catch @panic("unhandled error");
- const need_cross_glibc = artifact.target.isGnuLibC() and artifact.is_linking_libc;
- switch (builder.host.getExternalExecutor(target_info, .{
- .qemu_fixes_dl = need_cross_glibc and builder.glibc_runtimes_dir != null,
- .link_libc = artifact.is_linking_libc,
- })) {
- .native => unreachable,
- .bad_dl => |foreign_dl| {
- const host_dl = builder.host.dynamic_linker.get() orelse "(none)";
- std.debug.print("the host system does not appear to be capable of executing binaries from the target because the host dynamic linker is '{s}', while the target dynamic linker is '{s}'. Consider setting the dynamic linker as '{s}'.\n", .{
- host_dl, foreign_dl, host_dl,
- });
- },
- .bad_os_or_cpu => {
- std.debug.print("the host system ({s}) does not appear to be capable of executing binaries from the target ({s}).\n", .{
- host_name, foreign_name,
- });
- },
- .darling => if (!builder.enable_darling) {
- std.debug.print(
- "the host system ({s}) does not appear to be capable of executing binaries " ++
- "from the target ({s}). Consider enabling darling.\n",
- .{ host_name, foreign_name },
- );
- },
- .rosetta => if (!builder.enable_rosetta) {
- std.debug.print(
- "the host system ({s}) does not appear to be capable of executing binaries " ++
- "from the target ({s}). Consider enabling rosetta.\n",
- .{ host_name, foreign_name },
- );
- },
- .wine => if (!builder.enable_wine) {
- std.debug.print(
- "the host system ({s}) does not appear to be capable of executing binaries " ++
- "from the target ({s}). Consider enabling wine.\n",
- .{ host_name, foreign_name },
- );
- },
- .qemu => if (!builder.enable_qemu) {
- std.debug.print(
- "the host system ({s}) does not appear to be capable of executing binaries " ++
- "from the target ({s}). Consider enabling qemu.\n",
- .{ host_name, foreign_name },
- );
- },
- .wasmtime => {
- std.debug.print(
- "the host system ({s}) does not appear to be capable of executing binaries " ++
- "from the target ({s}). Consider enabling wasmtime.\n",
- .{ host_name, foreign_name },
- );
- },
- }
-}
diff --git a/lib/std/Build/FmtStep.zig b/lib/std/Build/FmtStep.zig
index 4a5efde2bd..2a82342336 100644
--- a/lib/std/Build/FmtStep.zig
+++ b/lib/std/Build/FmtStep.zig
@@ -1,32 +1,73 @@
-const std = @import("../std.zig");
-const Step = std.Build.Step;
-const FmtStep = @This();
+//! This step has two modes:
+//! * Modify mode: directly modify source files, formatting them in place.
+//! * Check mode: fail the step if a non-conforming file is found.
+
+step: Step,
+paths: []const []const u8,
+exclude_paths: []const []const u8,
+check: bool,
pub const base_id = .fmt;
-step: Step,
-builder: *std.Build,
-argv: [][]const u8,
-
-pub fn create(builder: *std.Build, paths: []const []const u8) *FmtStep {
- const self = builder.allocator.create(FmtStep) catch @panic("OOM");
- const name = "zig fmt";
- self.* = FmtStep{
- .step = Step.init(.fmt, name, builder.allocator, make),
- .builder = builder,
- .argv = builder.allocator.alloc([]u8, paths.len + 2) catch @panic("OOM"),
- };
+pub const Options = struct {
+ paths: []const []const u8 = &.{},
+ exclude_paths: []const []const u8 = &.{},
+ /// If true, fails the build step when any non-conforming files are encountered.
+ check: bool = false,
+};
- self.argv[0] = builder.zig_exe;
- self.argv[1] = "fmt";
- for (paths, 0..) |path, i| {
- self.argv[2 + i] = builder.pathFromRoot(path);
- }
+pub fn create(owner: *std.Build, options: Options) *FmtStep {
+ const self = owner.allocator.create(FmtStep) catch @panic("OOM");
+ const name = if (options.check) "zig fmt --check" else "zig fmt";
+ self.* = .{
+ .step = Step.init(.{
+ .id = base_id,
+ .name = name,
+ .owner = owner,
+ .makeFn = make,
+ }),
+ .paths = options.paths,
+ .exclude_paths = options.exclude_paths,
+ .check = options.check,
+ };
return self;
}
-fn make(step: *Step) !void {
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ // zig fmt is fast enough that no progress is needed.
+ _ = prog_node;
+
+ // TODO: if check=false, this means we are modifying source files in place, which
+ // is an operation that could race against other operations also modifying source files
+ // in place. In this case, this step should obtain a write lock while making those
+ // modifications.
+
+ const b = step.owner;
+ const arena = b.allocator;
const self = @fieldParentPtr(FmtStep, "step", step);
- return self.builder.spawnChild(self.argv);
+ var argv: std.ArrayListUnmanaged([]const u8) = .{};
+ try argv.ensureUnusedCapacity(arena, 2 + 1 + self.paths.len + 2 * self.exclude_paths.len);
+
+ argv.appendAssumeCapacity(b.zig_exe);
+ argv.appendAssumeCapacity("fmt");
+
+ if (self.check) {
+ argv.appendAssumeCapacity("--check");
+ }
+
+ for (self.paths) |p| {
+ argv.appendAssumeCapacity(b.pathFromRoot(p));
+ }
+
+ for (self.exclude_paths) |p| {
+ argv.appendAssumeCapacity("--exclude");
+ argv.appendAssumeCapacity(b.pathFromRoot(p));
+ }
+
+ return step.evalChildProcess(argv.items);
}
+
+const std = @import("../std.zig");
+const Step = std.Build.Step;
+const FmtStep = @This();
diff --git a/lib/std/Build/InstallArtifactStep.zig b/lib/std/Build/InstallArtifactStep.zig
index c419c85fdf..445f1e8ea8 100644
--- a/lib/std/Build/InstallArtifactStep.zig
+++ b/lib/std/Build/InstallArtifactStep.zig
@@ -3,83 +3,133 @@ const Step = std.Build.Step;
const CompileStep = std.Build.CompileStep;
const InstallDir = std.Build.InstallDir;
const InstallArtifactStep = @This();
+const fs = std.fs;
pub const base_id = .install_artifact;
step: Step,
-builder: *std.Build,
+dest_builder: *std.Build,
artifact: *CompileStep,
dest_dir: InstallDir,
pdb_dir: ?InstallDir,
h_dir: ?InstallDir,
+/// If non-null, adds additional path components relative to dest_dir, and
+/// overrides the basename of the CompileStep.
+dest_sub_path: ?[]const u8,
-pub fn create(builder: *std.Build, artifact: *CompileStep) *InstallArtifactStep {
+pub fn create(owner: *std.Build, artifact: *CompileStep) *InstallArtifactStep {
if (artifact.install_step) |s| return s;
- const self = builder.allocator.create(InstallArtifactStep) catch @panic("OOM");
+ const self = owner.allocator.create(InstallArtifactStep) catch @panic("OOM");
self.* = InstallArtifactStep{
- .builder = builder,
- .step = Step.init(.install_artifact, builder.fmt("install {s}", .{artifact.step.name}), builder.allocator, make),
+ .step = Step.init(.{
+ .id = base_id,
+ .name = owner.fmt("install {s}", .{artifact.name}),
+ .owner = owner,
+ .makeFn = make,
+ }),
+ .dest_builder = owner,
.artifact = artifact,
.dest_dir = artifact.override_dest_dir orelse switch (artifact.kind) {
.obj => @panic("Cannot install a .obj build artifact."),
- .@"test" => @panic("Cannot install a .test build artifact, use .test_exe instead."),
- .exe, .test_exe => InstallDir{ .bin = {} },
+ .exe, .@"test" => InstallDir{ .bin = {} },
.lib => InstallDir{ .lib = {} },
},
.pdb_dir = if (artifact.producesPdbFile()) blk: {
- if (artifact.kind == .exe or artifact.kind == .test_exe) {
+ if (artifact.kind == .exe or artifact.kind == .@"test") {
break :blk InstallDir{ .bin = {} };
} else {
break :blk InstallDir{ .lib = {} };
}
} else null,
.h_dir = if (artifact.kind == .lib and artifact.emit_h) .header else null,
+ .dest_sub_path = null,
};
self.step.dependOn(&artifact.step);
artifact.install_step = self;
- builder.pushInstalledFile(self.dest_dir, artifact.out_filename);
+ owner.pushInstalledFile(self.dest_dir, artifact.out_filename);
if (self.artifact.isDynamicLibrary()) {
if (artifact.major_only_filename) |name| {
- builder.pushInstalledFile(.lib, name);
+ owner.pushInstalledFile(.lib, name);
}
if (artifact.name_only_filename) |name| {
- builder.pushInstalledFile(.lib, name);
+ owner.pushInstalledFile(.lib, name);
}
if (self.artifact.target.isWindows()) {
- builder.pushInstalledFile(.lib, artifact.out_lib_filename);
+ owner.pushInstalledFile(.lib, artifact.out_lib_filename);
}
}
if (self.pdb_dir) |pdb_dir| {
- builder.pushInstalledFile(pdb_dir, artifact.out_pdb_filename);
+ owner.pushInstalledFile(pdb_dir, artifact.out_pdb_filename);
}
if (self.h_dir) |h_dir| {
- builder.pushInstalledFile(h_dir, artifact.out_h_filename);
+ owner.pushInstalledFile(h_dir, artifact.out_h_filename);
}
return self;
}
-fn make(step: *Step) !void {
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ _ = prog_node;
+ const src_builder = step.owner;
const self = @fieldParentPtr(InstallArtifactStep, "step", step);
- const builder = self.builder;
+ const dest_builder = self.dest_builder;
- const full_dest_path = builder.getInstallPath(self.dest_dir, self.artifact.out_filename);
- try builder.updateFile(self.artifact.getOutputSource().getPath(builder), full_dest_path);
- if (self.artifact.isDynamicLibrary() and self.artifact.version != null and self.artifact.target.wantSharedLibSymLinks()) {
- try CompileStep.doAtomicSymLinks(builder.allocator, full_dest_path, self.artifact.major_only_filename.?, self.artifact.name_only_filename.?);
+ const dest_sub_path = if (self.dest_sub_path) |sub_path| sub_path else self.artifact.out_filename;
+ const full_dest_path = dest_builder.getInstallPath(self.dest_dir, dest_sub_path);
+ const cwd = fs.cwd();
+
+ var all_cached = true;
+
+ {
+ const full_src_path = self.artifact.getOutputSource().getPath(src_builder);
+ const p = fs.Dir.updateFile(cwd, full_src_path, cwd, full_dest_path, .{}) catch |err| {
+ return step.fail("unable to update file from '{s}' to '{s}': {s}", .{
+ full_src_path, full_dest_path, @errorName(err),
+ });
+ };
+ all_cached = all_cached and p == .fresh;
+ }
+
+ if (self.artifact.isDynamicLibrary() and
+ self.artifact.version != null and
+ self.artifact.target.wantSharedLibSymLinks())
+ {
+ try CompileStep.doAtomicSymLinks(step, full_dest_path, self.artifact.major_only_filename.?, self.artifact.name_only_filename.?);
}
- if (self.artifact.isDynamicLibrary() and self.artifact.target.isWindows() and self.artifact.emit_implib != .no_emit) {
- const full_implib_path = builder.getInstallPath(self.dest_dir, self.artifact.out_lib_filename);
- try builder.updateFile(self.artifact.getOutputLibSource().getPath(builder), full_implib_path);
+ if (self.artifact.isDynamicLibrary() and
+ self.artifact.target.isWindows() and
+ self.artifact.emit_implib != .no_emit)
+ {
+ const full_src_path = self.artifact.getOutputLibSource().getPath(src_builder);
+ const full_implib_path = dest_builder.getInstallPath(self.dest_dir, self.artifact.out_lib_filename);
+ const p = fs.Dir.updateFile(cwd, full_src_path, cwd, full_implib_path, .{}) catch |err| {
+ return step.fail("unable to update file from '{s}' to '{s}': {s}", .{
+ full_src_path, full_implib_path, @errorName(err),
+ });
+ };
+ all_cached = all_cached and p == .fresh;
}
if (self.pdb_dir) |pdb_dir| {
- const full_pdb_path = builder.getInstallPath(pdb_dir, self.artifact.out_pdb_filename);
- try builder.updateFile(self.artifact.getOutputPdbSource().getPath(builder), full_pdb_path);
+ const full_src_path = self.artifact.getOutputPdbSource().getPath(src_builder);
+ const full_pdb_path = dest_builder.getInstallPath(pdb_dir, self.artifact.out_pdb_filename);
+ const p = fs.Dir.updateFile(cwd, full_src_path, cwd, full_pdb_path, .{}) catch |err| {
+ return step.fail("unable to update file from '{s}' to '{s}': {s}", .{
+ full_src_path, full_pdb_path, @errorName(err),
+ });
+ };
+ all_cached = all_cached and p == .fresh;
}
if (self.h_dir) |h_dir| {
- const full_h_path = builder.getInstallPath(h_dir, self.artifact.out_h_filename);
- try builder.updateFile(self.artifact.getOutputHSource().getPath(builder), full_h_path);
+ const full_src_path = self.artifact.getOutputHSource().getPath(src_builder);
+ const full_h_path = dest_builder.getInstallPath(h_dir, self.artifact.out_h_filename);
+ const p = fs.Dir.updateFile(cwd, full_src_path, cwd, full_h_path, .{}) catch |err| {
+ return step.fail("unable to update file from '{s}' to '{s}': {s}", .{
+ full_src_path, full_h_path, @errorName(err),
+ });
+ };
+ all_cached = all_cached and p == .fresh;
}
self.artifact.installed_path = full_dest_path;
+ step.result_cached = all_cached;
}
diff --git a/lib/std/Build/InstallDirStep.zig b/lib/std/Build/InstallDirStep.zig
index 41dbb3e35a..d9ea248913 100644
--- a/lib/std/Build/InstallDirStep.zig
+++ b/lib/std/Build/InstallDirStep.zig
@@ -4,14 +4,12 @@ const fs = std.fs;
const Step = std.Build.Step;
const InstallDir = std.Build.InstallDir;
const InstallDirStep = @This();
-const log = std.log;
step: Step,
-builder: *std.Build,
options: Options,
/// This is used by the build system when a file being installed comes from one
/// package but is being installed by another.
-override_source_builder: ?*std.Build = null,
+dest_builder: *std.Build,
pub const base_id = .install_dir;
@@ -40,31 +38,35 @@ pub const Options = struct {
}
};
-pub fn init(
- builder: *std.Build,
- options: Options,
-) InstallDirStep {
- builder.pushInstalledFile(options.install_dir, options.install_subdir);
- return InstallDirStep{
- .builder = builder,
- .step = Step.init(.install_dir, builder.fmt("install {s}/", .{options.source_dir}), builder.allocator, make),
- .options = options.dupe(builder),
+pub fn init(owner: *std.Build, options: Options) InstallDirStep {
+ owner.pushInstalledFile(options.install_dir, options.install_subdir);
+ return .{
+ .step = Step.init(.{
+ .id = .install_dir,
+ .name = owner.fmt("install {s}/", .{options.source_dir}),
+ .owner = owner,
+ .makeFn = make,
+ }),
+ .options = options.dupe(owner),
+ .dest_builder = owner,
};
}
-fn make(step: *Step) !void {
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ _ = prog_node;
const self = @fieldParentPtr(InstallDirStep, "step", step);
- const dest_prefix = self.builder.getInstallPath(self.options.install_dir, self.options.install_subdir);
- const src_builder = self.override_source_builder orelse self.builder;
- const full_src_dir = src_builder.pathFromRoot(self.options.source_dir);
- var src_dir = std.fs.cwd().openIterableDir(full_src_dir, .{}) catch |err| {
- log.err("InstallDirStep: unable to open source directory '{s}': {s}", .{
- full_src_dir, @errorName(err),
+ const dest_builder = self.dest_builder;
+ const arena = dest_builder.allocator;
+ const dest_prefix = dest_builder.getInstallPath(self.options.install_dir, self.options.install_subdir);
+ const src_builder = self.step.owner;
+ var src_dir = src_builder.build_root.handle.openIterableDir(self.options.source_dir, .{}) catch |err| {
+ return step.fail("unable to open source directory '{}{s}': {s}", .{
+ src_builder.build_root, self.options.source_dir, @errorName(err),
});
- return error.StepFailed;
};
defer src_dir.close();
- var it = try src_dir.walk(self.builder.allocator);
+ var it = try src_dir.walk(arena);
+ var all_cached = true;
next_entry: while (try it.next()) |entry| {
for (self.options.exclude_extensions) |ext| {
if (mem.endsWith(u8, entry.path, ext)) {
@@ -72,22 +74,37 @@ fn make(step: *Step) !void {
}
}
- const full_path = self.builder.pathJoin(&.{ full_src_dir, entry.path });
- const dest_path = self.builder.pathJoin(&.{ dest_prefix, entry.path });
+ // relative to src build root
+ const src_sub_path = try fs.path.join(arena, &.{ self.options.source_dir, entry.path });
+ const dest_path = try fs.path.join(arena, &.{ dest_prefix, entry.path });
+ const cwd = fs.cwd();
switch (entry.kind) {
- .Directory => try fs.cwd().makePath(dest_path),
+ .Directory => try cwd.makePath(dest_path),
.File => {
for (self.options.blank_extensions) |ext| {
if (mem.endsWith(u8, entry.path, ext)) {
- try self.builder.truncateFile(dest_path);
+ try dest_builder.truncateFile(dest_path);
continue :next_entry;
}
}
- try self.builder.updateFile(full_path, dest_path);
+ const prev_status = fs.Dir.updateFile(
+ src_builder.build_root.handle,
+ src_sub_path,
+ cwd,
+ dest_path,
+ .{},
+ ) catch |err| {
+ return step.fail("unable to update file from '{}{s}' to '{s}': {s}", .{
+ src_builder.build_root, src_sub_path, dest_path, @errorName(err),
+ });
+ };
+ all_cached = all_cached and prev_status == .fresh;
},
else => continue,
}
}
+
+ step.result_cached = all_cached;
}
diff --git a/lib/std/Build/InstallFileStep.zig b/lib/std/Build/InstallFileStep.zig
index 8c8d8ad2d4..011ad48208 100644
--- a/lib/std/Build/InstallFileStep.zig
+++ b/lib/std/Build/InstallFileStep.zig
@@ -3,38 +3,55 @@ const Step = std.Build.Step;
const FileSource = std.Build.FileSource;
const InstallDir = std.Build.InstallDir;
const InstallFileStep = @This();
+const assert = std.debug.assert;
pub const base_id = .install_file;
step: Step,
-builder: *std.Build,
source: FileSource,
dir: InstallDir,
dest_rel_path: []const u8,
/// This is used by the build system when a file being installed comes from one
/// package but is being installed by another.
-override_source_builder: ?*std.Build = null,
+dest_builder: *std.Build,
-pub fn init(
- builder: *std.Build,
+pub fn create(
+ owner: *std.Build,
source: FileSource,
dir: InstallDir,
dest_rel_path: []const u8,
-) InstallFileStep {
- builder.pushInstalledFile(dir, dest_rel_path);
- return InstallFileStep{
- .builder = builder,
- .step = Step.init(.install_file, builder.fmt("install {s} to {s}", .{ source.getDisplayName(), dest_rel_path }), builder.allocator, make),
- .source = source.dupe(builder),
- .dir = dir.dupe(builder),
- .dest_rel_path = builder.dupePath(dest_rel_path),
+) *InstallFileStep {
+ assert(dest_rel_path.len != 0);
+ owner.pushInstalledFile(dir, dest_rel_path);
+ const self = owner.allocator.create(InstallFileStep) catch @panic("OOM");
+ self.* = .{
+ .step = Step.init(.{
+ .id = base_id,
+ .name = owner.fmt("install {s} to {s}", .{ source.getDisplayName(), dest_rel_path }),
+ .owner = owner,
+ .makeFn = make,
+ }),
+ .source = source.dupe(owner),
+ .dir = dir.dupe(owner),
+ .dest_rel_path = owner.dupePath(dest_rel_path),
+ .dest_builder = owner,
};
+ source.addStepDependencies(&self.step);
+ return self;
}
-fn make(step: *Step) !void {
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ _ = prog_node;
+ const src_builder = step.owner;
const self = @fieldParentPtr(InstallFileStep, "step", step);
- const src_builder = self.override_source_builder orelse self.builder;
- const full_src_path = self.source.getPath(src_builder);
- const full_dest_path = self.builder.getInstallPath(self.dir, self.dest_rel_path);
- try self.builder.updateFile(full_src_path, full_dest_path);
+ const dest_builder = self.dest_builder;
+ const full_src_path = self.source.getPath2(src_builder, step);
+ const full_dest_path = dest_builder.getInstallPath(self.dir, self.dest_rel_path);
+ const cwd = std.fs.cwd();
+ const prev = std.fs.Dir.updateFile(cwd, full_src_path, cwd, full_dest_path, .{}) catch |err| {
+ return step.fail("unable to update file from '{s}' to '{s}': {s}", .{
+ full_src_path, full_dest_path, @errorName(err),
+ });
+ };
+ step.result_cached = prev == .fresh;
}
diff --git a/lib/std/Build/LogStep.zig b/lib/std/Build/LogStep.zig
deleted file mode 100644
index 6d51df8cbd..0000000000
--- a/lib/std/Build/LogStep.zig
+++ /dev/null
@@ -1,23 +0,0 @@
-const std = @import("../std.zig");
-const log = std.log;
-const Step = std.Build.Step;
-const LogStep = @This();
-
-pub const base_id = .log;
-
-step: Step,
-builder: *std.Build,
-data: []const u8,
-
-pub fn init(builder: *std.Build, data: []const u8) LogStep {
- return LogStep{
- .builder = builder,
- .step = Step.init(.log, builder.fmt("log {s}", .{data}), builder.allocator, make),
- .data = builder.dupe(data),
- };
-}
-
-fn make(step: *Step) anyerror!void {
- const self = @fieldParentPtr(LogStep, "step", step);
- log.info("{s}", .{self.data});
-}
diff --git a/lib/std/Build/ObjCopyStep.zig b/lib/std/Build/ObjCopyStep.zig
index aea5b8975c..608c56591f 100644
--- a/lib/std/Build/ObjCopyStep.zig
+++ b/lib/std/Build/ObjCopyStep.zig
@@ -21,7 +21,6 @@ pub const RawFormat = enum {
};
step: Step,
-builder: *std.Build,
file_source: std.Build.FileSource,
basename: []const u8,
output_file: std.Build.GeneratedFile,
@@ -38,19 +37,18 @@ pub const Options = struct {
};
pub fn create(
- builder: *std.Build,
+ owner: *std.Build,
file_source: std.Build.FileSource,
options: Options,
) *ObjCopyStep {
- const self = builder.allocator.create(ObjCopyStep) catch @panic("OOM");
+ const self = owner.allocator.create(ObjCopyStep) catch @panic("OOM");
self.* = ObjCopyStep{
- .step = Step.init(
- base_id,
- builder.fmt("objcopy {s}", .{file_source.getDisplayName()}),
- builder.allocator,
- make,
- ),
- .builder = builder,
+ .step = Step.init(.{
+ .id = base_id,
+ .name = owner.fmt("objcopy {s}", .{file_source.getDisplayName()}),
+ .owner = owner,
+ .makeFn = make,
+ }),
.file_source = file_source,
.basename = options.basename orelse file_source.getDisplayName(),
.output_file = std.Build.GeneratedFile{ .step = &self.step },
@@ -67,9 +65,9 @@ pub fn getOutputSource(self: *const ObjCopyStep) std.Build.FileSource {
return .{ .generated = &self.output_file };
}
-fn make(step: *Step) !void {
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ const b = step.owner;
const self = @fieldParentPtr(ObjCopyStep, "step", step);
- const b = self.builder;
var man = b.cache.obtain();
defer man.deinit();
@@ -84,7 +82,7 @@ fn make(step: *Step) !void {
man.hash.addOptional(self.pad_to);
man.hash.addOptional(self.format);
- if (man.hit() catch |err| failWithCacheError(man, err)) {
+ if (try step.cacheHit(&man)) {
// Cache hit, skip subprocess execution.
const digest = man.final();
self.output_file.path = try b.cache_root.join(b.allocator, &.{
@@ -97,8 +95,7 @@ fn make(step: *Step) !void {
const full_dest_path = try b.cache_root.join(b.allocator, &.{ "o", &digest, self.basename });
const cache_path = "o" ++ fs.path.sep_str ++ digest;
b.cache_root.handle.makePath(cache_path) catch |err| {
- std.debug.print("unable to make path {s}: {s}\n", .{ cache_path, @errorName(err) });
- return err;
+ return step.fail("unable to make path {s}: {s}", .{ cache_path, @errorName(err) });
};
var argv = std.ArrayList([]const u8).init(b.allocator);
@@ -116,23 +113,10 @@ fn make(step: *Step) !void {
};
try argv.appendSlice(&.{ full_src_path, full_dest_path });
- _ = try self.builder.execFromStep(argv.items, &self.step);
+
+ try argv.append("--listen=-");
+ _ = try step.evalZigProcess(argv.items, prog_node);
self.output_file.path = full_dest_path;
try man.writeManifest();
}
-
-/// TODO consolidate this with the same function in RunStep?
-/// Also properly deal with concurrency (see open PR)
-fn failWithCacheError(man: std.Build.Cache.Manifest, err: anyerror) noreturn {
- const i = man.failed_file_index orelse failWithSimpleError(err);
- const pp = man.files.items[i].prefixed_path orelse failWithSimpleError(err);
- const prefix = man.cache.prefixes()[pp.prefix].path orelse "";
- std.debug.print("{s}: {s}/{s}\n", .{ @errorName(err), prefix, pp.sub_path });
- std.process.exit(1);
-}
-
-fn failWithSimpleError(err: anyerror) noreturn {
- std.debug.print("{s}\n", .{@errorName(err)});
- std.process.exit(1);
-}
diff --git a/lib/std/Build/OptionsStep.zig b/lib/std/Build/OptionsStep.zig
index e5c3e23821..a0e72e3695 100644
--- a/lib/std/Build/OptionsStep.zig
+++ b/lib/std/Build/OptionsStep.zig
@@ -12,21 +12,24 @@ pub const base_id = .options;
step: Step,
generated_file: GeneratedFile,
-builder: *std.Build,
contents: std.ArrayList(u8),
artifact_args: std.ArrayList(OptionArtifactArg),
file_source_args: std.ArrayList(OptionFileSourceArg),
-pub fn create(builder: *std.Build) *OptionsStep {
- const self = builder.allocator.create(OptionsStep) catch @panic("OOM");
+pub fn create(owner: *std.Build) *OptionsStep {
+ const self = owner.allocator.create(OptionsStep) catch @panic("OOM");
self.* = .{
- .builder = builder,
- .step = Step.init(.options, "options", builder.allocator, make),
+ .step = Step.init(.{
+ .id = base_id,
+ .name = "options",
+ .owner = owner,
+ .makeFn = make,
+ }),
.generated_file = undefined,
- .contents = std.ArrayList(u8).init(builder.allocator),
- .artifact_args = std.ArrayList(OptionArtifactArg).init(builder.allocator),
- .file_source_args = std.ArrayList(OptionFileSourceArg).init(builder.allocator),
+ .contents = std.ArrayList(u8).init(owner.allocator),
+ .artifact_args = std.ArrayList(OptionArtifactArg).init(owner.allocator),
+ .file_source_args = std.ArrayList(OptionFileSourceArg).init(owner.allocator),
};
self.generated_file = .{ .step = &self.step };
@@ -192,7 +195,7 @@ pub fn addOptionFileSource(
) void {
self.file_source_args.append(.{
.name = name,
- .source = source.dupe(self.builder),
+ .source = source.dupe(self.step.owner),
}) catch @panic("OOM");
source.addStepDependencies(&self.step);
}
@@ -200,12 +203,12 @@ pub fn addOptionFileSource(
/// The value is the path in the cache dir.
/// Adds a dependency automatically.
pub fn addOptionArtifact(self: *OptionsStep, name: []const u8, artifact: *CompileStep) void {
- self.artifact_args.append(.{ .name = self.builder.dupe(name), .artifact = artifact }) catch @panic("OOM");
+ self.artifact_args.append(.{ .name = self.step.owner.dupe(name), .artifact = artifact }) catch @panic("OOM");
self.step.dependOn(&artifact.step);
}
pub fn createModule(self: *OptionsStep) *std.Build.Module {
- return self.builder.createModule(.{
+ return self.step.owner.createModule(.{
.source_file = self.getSource(),
.dependencies = &.{},
});
@@ -215,14 +218,18 @@ pub fn getSource(self: *OptionsStep) FileSource {
return .{ .generated = &self.generated_file };
}
-fn make(step: *Step) !void {
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ // This step completes so quickly that no progress is necessary.
+ _ = prog_node;
+
+ const b = step.owner;
const self = @fieldParentPtr(OptionsStep, "step", step);
for (self.artifact_args.items) |item| {
self.addOption(
[]const u8,
item.name,
- self.builder.pathFromRoot(item.artifact.getOutputSource().getPath(self.builder)),
+ b.pathFromRoot(item.artifact.getOutputSource().getPath(b)),
);
}
@@ -230,39 +237,79 @@ fn make(step: *Step) !void {
self.addOption(
[]const u8,
item.name,
- item.source.getPath(self.builder),
+ item.source.getPath(b),
);
}
- var options_dir = try self.builder.cache_root.handle.makeOpenPath("options", .{});
- defer options_dir.close();
-
- const basename = self.hashContentsToFileName();
-
- try options_dir.writeFile(&basename, self.contents.items);
-
- self.generated_file.path = try self.builder.cache_root.join(self.builder.allocator, &.{
- "options", &basename,
- });
-}
-
-fn hashContentsToFileName(self: *OptionsStep) [64]u8 {
- // TODO update to use the cache system instead of this
- // This implementation is copied from `WriteFileStep.make`
-
- var hash = std.crypto.hash.blake2.Blake2b384.init(.{});
-
- // Random bytes to make OptionsStep unique. Refresh this with
- // new random bytes when OptionsStep implementation is modified
- // in a non-backwards-compatible way.
- hash.update("yL0Ya4KkmcCjBlP8");
- hash.update(self.contents.items);
-
- var digest: [48]u8 = undefined;
- hash.final(&digest);
- var hash_basename: [64]u8 = undefined;
- _ = fs.base64_encoder.encode(&hash_basename, &digest);
- return hash_basename;
+ const basename = "options.zig";
+
+ // Hash contents to file name.
+ var hash = b.cache.hash;
+ // Random bytes to make unique. Refresh this with new random bytes when
+ // implementation is modified in a non-backwards-compatible way.
+ hash.add(@as(u32, 0x38845ef8));
+ hash.addBytes(self.contents.items);
+ const sub_path = "c" ++ fs.path.sep_str ++ hash.final() ++ fs.path.sep_str ++ basename;
+
+ self.generated_file.path = try b.cache_root.join(b.allocator, &.{sub_path});
+
+ // Optimize for the hot path. Stat the file, and if it already exists,
+ // cache hit.
+ if (b.cache_root.handle.access(sub_path, .{})) |_| {
+ // This is the hot path, success.
+ step.result_cached = true;
+ return;
+ } else |outer_err| switch (outer_err) {
+ error.FileNotFound => {
+ const sub_dirname = fs.path.dirname(sub_path).?;
+ b.cache_root.handle.makePath(sub_dirname) catch |e| {
+ return step.fail("unable to make path '{}{s}': {s}", .{
+ b.cache_root, sub_dirname, @errorName(e),
+ });
+ };
+
+ const rand_int = std.crypto.random.int(u64);
+ const tmp_sub_path = "tmp" ++ fs.path.sep_str ++
+ std.Build.hex64(rand_int) ++ fs.path.sep_str ++
+ basename;
+ const tmp_sub_path_dirname = fs.path.dirname(tmp_sub_path).?;
+
+ b.cache_root.handle.makePath(tmp_sub_path_dirname) catch |err| {
+ return step.fail("unable to make temporary directory '{}{s}': {s}", .{
+ b.cache_root, tmp_sub_path_dirname, @errorName(err),
+ });
+ };
+
+ b.cache_root.handle.writeFile(tmp_sub_path, self.contents.items) catch |err| {
+ return step.fail("unable to write options to '{}{s}': {s}", .{
+ b.cache_root, tmp_sub_path, @errorName(err),
+ });
+ };
+
+ b.cache_root.handle.rename(tmp_sub_path, sub_path) catch |err| switch (err) {
+ error.PathAlreadyExists => {
+ // Other process beat us to it. Clean up the temp file.
+ b.cache_root.handle.deleteFile(tmp_sub_path) catch |e| {
+ try step.addError("warning: unable to delete temp file '{}{s}': {s}", .{
+ b.cache_root, tmp_sub_path, @errorName(e),
+ });
+ };
+ step.result_cached = true;
+ return;
+ },
+ else => {
+ return step.fail("unable to rename options from '{}{s}' to '{}{s}': {s}", .{
+ b.cache_root, tmp_sub_path,
+ b.cache_root, sub_path,
+ @errorName(err),
+ });
+ },
+ };
+ },
+ else => |e| return step.fail("unable to access options file '{}{s}': {s}", .{
+ b.cache_root, sub_path, @errorName(e),
+ }),
+ }
}
const OptionArtifactArg = struct {
diff --git a/lib/std/Build/RemoveDirStep.zig b/lib/std/Build/RemoveDirStep.zig
index f3b71dcec1..a5bf3c3256 100644
--- a/lib/std/Build/RemoveDirStep.zig
+++ b/lib/std/Build/RemoveDirStep.zig
@@ -1,5 +1,4 @@
const std = @import("../std.zig");
-const log = std.log;
const fs = std.fs;
const Step = std.Build.Step;
const RemoveDirStep = @This();
@@ -7,23 +6,37 @@ const RemoveDirStep = @This();
pub const base_id = .remove_dir;
step: Step,
-builder: *std.Build,
dir_path: []const u8,
-pub fn init(builder: *std.Build, dir_path: []const u8) RemoveDirStep {
+pub fn init(owner: *std.Build, dir_path: []const u8) RemoveDirStep {
return RemoveDirStep{
- .builder = builder,
- .step = Step.init(.remove_dir, builder.fmt("RemoveDir {s}", .{dir_path}), builder.allocator, make),
- .dir_path = builder.dupePath(dir_path),
+ .step = Step.init(.{
+ .id = .remove_dir,
+ .name = owner.fmt("RemoveDir {s}", .{dir_path}),
+ .owner = owner,
+ .makeFn = make,
+ }),
+ .dir_path = owner.dupePath(dir_path),
};
}
-fn make(step: *Step) !void {
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ // TODO update progress node while walking file system.
+ // Should the standard library support this use case??
+ _ = prog_node;
+
+ const b = step.owner;
const self = @fieldParentPtr(RemoveDirStep, "step", step);
- const full_path = self.builder.pathFromRoot(self.dir_path);
- fs.cwd().deleteTree(full_path) catch |err| {
- log.err("Unable to remove {s}: {s}", .{ full_path, @errorName(err) });
- return err;
+ b.build_root.handle.deleteTree(self.dir_path) catch |err| {
+ if (b.build_root.path) |base| {
+ return step.fail("unable to recursively delete path '{s}/{s}': {s}", .{
+ base, self.dir_path, @errorName(err),
+ });
+ } else {
+ return step.fail("unable to recursively delete path '{s}': {s}", .{
+ self.dir_path, @errorName(err),
+ });
+ }
};
}
diff --git a/lib/std/Build/RunStep.zig b/lib/std/Build/RunStep.zig
index 1aae37d2f3..36b409d907 100644
--- a/lib/std/Build/RunStep.zig
+++ b/lib/std/Build/RunStep.zig
@@ -10,76 +10,136 @@ const ArrayList = std.ArrayList;
const EnvMap = process.EnvMap;
const Allocator = mem.Allocator;
const ExecError = std.Build.ExecError;
-
-const max_stdout_size = 1 * 1024 * 1024; // 1 MiB
+const assert = std.debug.assert;
const RunStep = @This();
pub const base_id: Step.Id = .run;
step: Step,
-builder: *std.Build,
/// See also addArg and addArgs to modifying this directly
argv: ArrayList(Arg),
/// Set this to modify the current working directory
+/// TODO change this to a Build.Cache.Directory to better integrate with
+/// future child process cwd API.
cwd: ?[]const u8,
/// Override this field to modify the environment, or use setEnvironmentVariable
env_map: ?*EnvMap,
-stdout_action: StdIoAction = .inherit,
-stderr_action: StdIoAction = .inherit,
-
-stdin_behavior: std.ChildProcess.StdIo = .Inherit,
-
-/// Set this to `null` to ignore the exit code for the purpose of determining a successful execution
-expected_term: ?std.ChildProcess.Term = .{ .Exited = 0 },
-
-/// Print the command before running it
-print: bool,
-/// Controls whether execution is skipped if the output file is up-to-date.
-/// The default is to always run if there is no output file, and to skip
-/// running if all output files are up-to-date.
-condition: enum { output_outdated, always } = .output_outdated,
+/// Configures whether the RunStep is considered to have side-effects, and also
+/// whether the RunStep will inherit stdio streams, forwarding them to the
+/// parent process, in which case will require a global lock to prevent other
+/// steps from interfering with stdio while the subprocess associated with this
+/// RunStep is running.
+/// If the RunStep is determined to not have side-effects, then execution will
+/// be skipped if all output files are up-to-date and input files are
+/// unchanged.
+stdio: StdIo = .infer_from_args,
+/// This field must be `null` if stdio is `inherit`.
+stdin: ?[]const u8 = null,
/// Additional file paths relative to build.zig that, when modified, indicate
/// that the RunStep should be re-executed.
+/// If the RunStep is determined to have side-effects, this field is ignored
+/// and the RunStep is always executed when it appears in the build graph.
extra_file_dependencies: []const []const u8 = &.{},
-pub const StdIoAction = union(enum) {
+/// After adding an output argument, this step will by default rename itself
+/// for a better display name in the build summary.
+/// This can be disabled by setting this to false.
+rename_step_with_output_arg: bool = true,
+
+/// If this is true, a RunStep which is configured to check the output of the
+/// executed binary will not fail the build if the binary cannot be executed
+/// due to being for a foreign binary to the host system which is running the
+/// build graph.
+/// Command-line arguments such as -fqemu and -fwasmtime may affect whether a
+/// binary is detected as foreign, as well as system configuration such as
+/// Rosetta (macOS) and binfmt_misc (Linux).
+/// If this RunStep is considered to have side-effects, then this flag does
+/// nothing.
+skip_foreign_checks: bool = false,
+
+/// If stderr or stdout exceeds this amount, the child process is killed and
+/// the step fails.
+max_stdio_size: usize = 10 * 1024 * 1024,
+
+captured_stdout: ?*Output = null,
+captured_stderr: ?*Output = null,
+
+has_side_effects: bool = false,
+
+pub const StdIo = union(enum) {
+ /// Whether the RunStep has side-effects will be determined by whether or not one
+ /// of the args is an output file (added with `addOutputFileArg`).
+ /// If the RunStep is determined to have side-effects, this is the same as `inherit`.
+ /// The step will fail if the subprocess crashes or returns a non-zero exit code.
+ infer_from_args,
+ /// Causes the RunStep to be considered to have side-effects, and therefore
+ /// always execute when it appears in the build graph.
+ /// It also means that this step will obtain a global lock to prevent other
+ /// steps from running in the meantime.
+ /// The step will fail if the subprocess crashes or returns a non-zero exit code.
inherit,
- ignore,
- expect_exact: []const u8,
- expect_matches: []const []const u8,
+ /// Causes the RunStep to be considered to *not* have side-effects. The
+ /// process will be re-executed if any of the input dependencies are
+ /// modified. The exit code and standard I/O streams will be checked for
+ /// certain conditions, and the step will succeed or fail based on these
+ /// conditions.
+ /// Note that an explicit check for exit code 0 needs to be added to this
+ /// list if such a check is desireable.
+ check: std.ArrayList(Check),
+ /// This RunStep is running a zig unit test binary and will communicate
+ /// extra metadata over the IPC protocol.
+ zig_test,
+
+ pub const Check = union(enum) {
+ expect_stderr_exact: []const u8,
+ expect_stderr_match: []const u8,
+ expect_stdout_exact: []const u8,
+ expect_stdout_match: []const u8,
+ expect_term: std.process.Child.Term,
+ };
};
pub const Arg = union(enum) {
artifact: *CompileStep,
file_source: std.Build.FileSource,
+ directory_source: std.Build.FileSource,
bytes: []u8,
- output: Output,
+ output: *Output,
+};
- pub const Output = struct {
- generated_file: *std.Build.GeneratedFile,
- basename: []const u8,
- };
+pub const Output = struct {
+ generated_file: std.Build.GeneratedFile,
+ prefix: []const u8,
+ basename: []const u8,
};
-pub fn create(builder: *std.Build, name: []const u8) *RunStep {
- const self = builder.allocator.create(RunStep) catch @panic("OOM");
- self.* = RunStep{
- .builder = builder,
- .step = Step.init(base_id, name, builder.allocator, make),
- .argv = ArrayList(Arg).init(builder.allocator),
+pub fn create(owner: *std.Build, name: []const u8) *RunStep {
+ const self = owner.allocator.create(RunStep) catch @panic("OOM");
+ self.* = .{
+ .step = Step.init(.{
+ .id = base_id,
+ .name = name,
+ .owner = owner,
+ .makeFn = make,
+ }),
+ .argv = ArrayList(Arg).init(owner.allocator),
.cwd = null,
.env_map = null,
- .print = builder.verbose,
};
return self;
}
+pub fn setName(self: *RunStep, name: []const u8) void {
+ self.step.name = name;
+ self.rename_step_with_output_arg = false;
+}
+
pub fn addArtifactArg(self: *RunStep, artifact: *CompileStep) void {
self.argv.append(Arg{ .artifact = artifact }) catch @panic("OOM");
self.step.dependOn(&artifact.step);
@@ -89,25 +149,47 @@ pub fn addArtifactArg(self: *RunStep, artifact: *CompileStep) void {
/// run, and returns a FileSource which can be used as inputs to other APIs
/// throughout the build system.
pub fn addOutputFileArg(rs: *RunStep, basename: []const u8) std.Build.FileSource {
- const generated_file = rs.builder.allocator.create(std.Build.GeneratedFile) catch @panic("OOM");
- generated_file.* = .{ .step = &rs.step };
- rs.argv.append(.{ .output = .{
- .generated_file = generated_file,
- .basename = rs.builder.dupe(basename),
- } }) catch @panic("OOM");
+ return addPrefixedOutputFileArg(rs, "", basename);
+}
- return .{ .generated = generated_file };
+pub fn addPrefixedOutputFileArg(
+ rs: *RunStep,
+ prefix: []const u8,
+ basename: []const u8,
+) std.Build.FileSource {
+ const b = rs.step.owner;
+
+ const output = b.allocator.create(Output) catch @panic("OOM");
+ output.* = .{
+ .prefix = prefix,
+ .basename = basename,
+ .generated_file = .{ .step = &rs.step },
+ };
+ rs.argv.append(.{ .output = output }) catch @panic("OOM");
+
+ if (rs.rename_step_with_output_arg) {
+ rs.setName(b.fmt("{s} ({s})", .{ rs.step.name, basename }));
+ }
+
+ return .{ .generated = &output.generated_file };
}
pub fn addFileSourceArg(self: *RunStep, file_source: std.Build.FileSource) void {
- self.argv.append(Arg{
- .file_source = file_source.dupe(self.builder),
+ self.argv.append(.{
+ .file_source = file_source.dupe(self.step.owner),
}) catch @panic("OOM");
file_source.addStepDependencies(&self.step);
}
+pub fn addDirectorySourceArg(self: *RunStep, directory_source: std.Build.FileSource) void {
+ self.argv.append(.{
+ .directory_source = directory_source.dupe(self.step.owner),
+ }) catch @panic("OOM");
+ directory_source.addStepDependencies(&self.step);
+}
+
pub fn addArg(self: *RunStep, arg: []const u8) void {
- self.argv.append(Arg{ .bytes = self.builder.dupe(arg) }) catch @panic("OOM");
+ self.argv.append(.{ .bytes = self.step.owner.dupe(arg) }) catch @panic("OOM");
}
pub fn addArgs(self: *RunStep, args: []const []const u8) void {
@@ -117,102 +199,183 @@ pub fn addArgs(self: *RunStep, args: []const []const u8) void {
}
pub fn clearEnvironment(self: *RunStep) void {
- const new_env_map = self.builder.allocator.create(EnvMap) catch @panic("OOM");
- new_env_map.* = EnvMap.init(self.builder.allocator);
+ const b = self.step.owner;
+ const new_env_map = b.allocator.create(EnvMap) catch @panic("OOM");
+ new_env_map.* = EnvMap.init(b.allocator);
self.env_map = new_env_map;
}
pub fn addPathDir(self: *RunStep, search_path: []const u8) void {
- addPathDirInternal(&self.step, self.builder, search_path);
-}
-
-/// For internal use only, users of `RunStep` should use `addPathDir` directly.
-pub fn addPathDirInternal(step: *Step, builder: *std.Build, search_path: []const u8) void {
- const env_map = getEnvMapInternal(step, builder.allocator);
+ const b = self.step.owner;
+ const env_map = getEnvMapInternal(self);
const key = "PATH";
var prev_path = env_map.get(key);
if (prev_path) |pp| {
- const new_path = builder.fmt("{s}" ++ [1]u8{fs.path.delimiter} ++ "{s}", .{ pp, search_path });
+ const new_path = b.fmt("{s}" ++ [1]u8{fs.path.delimiter} ++ "{s}", .{ pp, search_path });
env_map.put(key, new_path) catch @panic("OOM");
} else {
- env_map.put(key, builder.dupePath(search_path)) catch @panic("OOM");
+ env_map.put(key, b.dupePath(search_path)) catch @panic("OOM");
}
}
pub fn getEnvMap(self: *RunStep) *EnvMap {
- return getEnvMapInternal(&self.step, self.builder.allocator);
+ return getEnvMapInternal(self);
}
-fn getEnvMapInternal(step: *Step, allocator: Allocator) *EnvMap {
- const maybe_env_map = switch (step.id) {
- .run => step.cast(RunStep).?.env_map,
- .emulatable_run => step.cast(std.Build.EmulatableRunStep).?.env_map,
- else => unreachable,
- };
- return maybe_env_map orelse {
- const env_map = allocator.create(EnvMap) catch @panic("OOM");
- env_map.* = process.getEnvMap(allocator) catch @panic("unhandled error");
- switch (step.id) {
- .run => step.cast(RunStep).?.env_map = env_map,
- .emulatable_run => step.cast(RunStep).?.env_map = env_map,
- else => unreachable,
- }
+fn getEnvMapInternal(self: *RunStep) *EnvMap {
+ const arena = self.step.owner.allocator;
+ return self.env_map orelse {
+ const env_map = arena.create(EnvMap) catch @panic("OOM");
+ env_map.* = process.getEnvMap(arena) catch @panic("unhandled error");
+ self.env_map = env_map;
return env_map;
};
}
pub fn setEnvironmentVariable(self: *RunStep, key: []const u8, value: []const u8) void {
+ const b = self.step.owner;
const env_map = self.getEnvMap();
- env_map.put(
- self.builder.dupe(key),
- self.builder.dupe(value),
- ) catch @panic("unhandled error");
+ env_map.put(b.dupe(key), b.dupe(value)) catch @panic("unhandled error");
+}
+
+pub fn removeEnvironmentVariable(self: *RunStep, key: []const u8) void {
+ self.getEnvMap().remove(key);
}
+/// Adds a check for exact stderr match. Does not add any other checks.
pub fn expectStdErrEqual(self: *RunStep, bytes: []const u8) void {
- self.stderr_action = .{ .expect_exact = self.builder.dupe(bytes) };
+ const new_check: StdIo.Check = .{ .expect_stderr_exact = self.step.owner.dupe(bytes) };
+ self.addCheck(new_check);
}
+/// Adds a check for exact stdout match as well as a check for exit code 0, if
+/// there is not already an expected termination check.
pub fn expectStdOutEqual(self: *RunStep, bytes: []const u8) void {
- self.stdout_action = .{ .expect_exact = self.builder.dupe(bytes) };
+ const new_check: StdIo.Check = .{ .expect_stdout_exact = self.step.owner.dupe(bytes) };
+ self.addCheck(new_check);
+ if (!self.hasTermCheck()) {
+ self.expectExitCode(0);
+ }
}
-fn stdIoActionToBehavior(action: StdIoAction) std.ChildProcess.StdIo {
- return switch (action) {
- .ignore => .Ignore,
- .inherit => .Inherit,
- .expect_exact, .expect_matches => .Pipe,
+pub fn expectExitCode(self: *RunStep, code: u8) void {
+ const new_check: StdIo.Check = .{ .expect_term = .{ .Exited = code } };
+ self.addCheck(new_check);
+}
+
+pub fn hasTermCheck(self: RunStep) bool {
+ for (self.stdio.check.items) |check| switch (check) {
+ .expect_term => return true,
+ else => continue,
};
+ return false;
}
-fn needOutputCheck(self: RunStep) bool {
- switch (self.condition) {
- .always => return false,
- .output_outdated => {},
+pub fn addCheck(self: *RunStep, new_check: StdIo.Check) void {
+ switch (self.stdio) {
+ .infer_from_args => {
+ self.stdio = .{ .check = std.ArrayList(StdIo.Check).init(self.step.owner.allocator) };
+ self.stdio.check.append(new_check) catch @panic("OOM");
+ },
+ .check => |*checks| checks.append(new_check) catch @panic("OOM"),
+ else => @panic("illegal call to addCheck: conflicting helper method calls. Suggest to directly set stdio field of RunStep instead"),
}
- if (self.extra_file_dependencies.len > 0) return true;
+}
+
+pub fn captureStdErr(self: *RunStep) std.Build.FileSource {
+ assert(self.stdio != .inherit);
+
+ if (self.captured_stderr) |output| return .{ .generated = &output.generated_file };
+
+ const output = self.step.owner.allocator.create(Output) catch @panic("OOM");
+ output.* = .{
+ .prefix = "",
+ .basename = "stderr",
+ .generated_file = .{ .step = &self.step },
+ };
+ self.captured_stderr = output;
+ return .{ .generated = &output.generated_file };
+}
+
+pub fn captureStdOut(self: *RunStep) *std.Build.GeneratedFile {
+ assert(self.stdio != .inherit);
+ if (self.captured_stdout) |output| return .{ .generated = &output.generated_file };
+
+ const output = self.step.owner.allocator.create(Output) catch @panic("OOM");
+ output.* = .{
+ .prefix = "",
+ .basename = "stdout",
+ .generated_file = .{ .step = &self.step },
+ };
+ self.captured_stdout = output;
+ return .{ .generated = &output.generated_file };
+}
+
+/// Returns whether the RunStep has side effects *other than* updating the output arguments.
+fn hasSideEffects(self: RunStep) bool {
+ if (self.has_side_effects) return true;
+ return switch (self.stdio) {
+ .infer_from_args => !self.hasAnyOutputArgs(),
+ .inherit => true,
+ .check => false,
+ .zig_test => false,
+ };
+}
+
+fn hasAnyOutputArgs(self: RunStep) bool {
+ if (self.captured_stdout != null) return true;
+ if (self.captured_stderr != null) return true;
for (self.argv.items) |arg| switch (arg) {
.output => return true,
else => continue,
};
+ return false;
+}
+
+fn checksContainStdout(checks: []const StdIo.Check) bool {
+ for (checks) |check| switch (check) {
+ .expect_stderr_exact,
+ .expect_stderr_match,
+ .expect_term,
+ => continue,
+
+ .expect_stdout_exact,
+ .expect_stdout_match,
+ => return true,
+ };
+ return false;
+}
+fn checksContainStderr(checks: []const StdIo.Check) bool {
+ for (checks) |check| switch (check) {
+ .expect_stdout_exact,
+ .expect_stdout_match,
+ .expect_term,
+ => continue,
+
+ .expect_stderr_exact,
+ .expect_stderr_match,
+ => return true,
+ };
return false;
}
-fn make(step: *Step) !void {
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ const b = step.owner;
+ const arena = b.allocator;
const self = @fieldParentPtr(RunStep, "step", step);
- const need_output_check = self.needOutputCheck();
+ const has_side_effects = self.hasSideEffects();
- var argv_list = ArrayList([]const u8).init(self.builder.allocator);
+ var argv_list = ArrayList([]const u8).init(arena);
var output_placeholders = ArrayList(struct {
index: usize,
- output: Arg.Output,
- }).init(self.builder.allocator);
+ output: *Output,
+ }).init(arena);
- var man = self.builder.cache.obtain();
+ var man = b.cache.obtain();
defer man.deinit();
for (self.argv.items) |arg| {
@@ -222,23 +385,29 @@ fn make(step: *Step) !void {
man.hash.addBytes(bytes);
},
.file_source => |file| {
- const file_path = file.getPath(self.builder);
+ const file_path = file.getPath(b);
try argv_list.append(file_path);
_ = try man.addFile(file_path, null);
},
+ .directory_source => |file| {
+ const file_path = file.getPath(b);
+ try argv_list.append(file_path);
+ man.hash.addBytes(file_path);
+ },
.artifact => |artifact| {
if (artifact.target.isWindows()) {
// On Windows we don't have rpaths so we have to add .dll search paths to PATH
self.addPathForDynLibs(artifact);
}
const file_path = artifact.installed_path orelse
- artifact.getOutputSource().getPath(self.builder);
+ artifact.getOutputSource().getPath(b);
try argv_list.append(file_path);
_ = try man.addFile(file_path, null);
},
.output => |output| {
+ man.hash.addBytes(output.prefix);
man.hash.addBytes(output.basename);
// Add a placeholder into the argument list because we need the
// manifest hash to be updated with all arguments before the
@@ -252,60 +421,77 @@ fn make(step: *Step) !void {
}
}
- if (need_output_check) {
- for (self.extra_file_dependencies) |file_path| {
- _ = try man.addFile(self.builder.pathFromRoot(file_path), null);
- }
+ if (self.captured_stdout) |output| {
+ man.hash.addBytes(output.basename);
+ }
- if (man.hit() catch |err| failWithCacheError(man, err)) {
- // cache hit, skip running command
- const digest = man.final();
- for (output_placeholders.items) |placeholder| {
- placeholder.output.generated_file.path = try self.builder.cache_root.join(
- self.builder.allocator,
- &.{ "o", &digest, placeholder.output.basename },
- );
- }
- return;
- }
+ if (self.captured_stderr) |output| {
+ man.hash.addBytes(output.basename);
+ }
- const digest = man.final();
+ hashStdIo(&man.hash, self.stdio);
+ if (has_side_effects) {
+ try runCommand(self, argv_list.items, has_side_effects, null, prog_node);
+ return;
+ }
+
+ for (self.extra_file_dependencies) |file_path| {
+ _ = try man.addFile(b.pathFromRoot(file_path), null);
+ }
+
+ if (try step.cacheHit(&man)) {
+ // cache hit, skip running command
+ const digest = man.final();
for (output_placeholders.items) |placeholder| {
- const output_path = try self.builder.cache_root.join(
- self.builder.allocator,
- &.{ "o", &digest, placeholder.output.basename },
- );
- const output_dir = fs.path.dirname(output_path).?;
- fs.cwd().makePath(output_dir) catch |err| {
- std.debug.print("unable to make path {s}: {s}\n", .{ output_dir, @errorName(err) });
- return err;
- };
+ placeholder.output.generated_file.path = try b.cache_root.join(arena, &.{
+ "o", &digest, placeholder.output.basename,
+ });
+ }
+
+ if (self.captured_stdout) |output| {
+ output.generated_file.path = try b.cache_root.join(arena, &.{
+ "o", &digest, output.basename,
+ });
+ }
- placeholder.output.generated_file.path = output_path;
- argv_list.items[placeholder.index] = output_path;
+ if (self.captured_stderr) |output| {
+ output.generated_file.path = try b.cache_root.join(arena, &.{
+ "o", &digest, output.basename,
+ });
}
+
+ step.result_cached = true;
+ return;
}
- try runCommand(
- argv_list.items,
- self.builder,
- self.expected_term,
- self.stdout_action,
- self.stderr_action,
- self.stdin_behavior,
- self.env_map,
- self.cwd,
- self.print,
- );
-
- if (need_output_check) {
- try man.writeManifest();
+ const digest = man.final();
+
+ for (output_placeholders.items) |placeholder| {
+ const output_components = .{ "o", &digest, placeholder.output.basename };
+ const output_sub_path = try fs.path.join(arena, &output_components);
+ const output_sub_dir_path = fs.path.dirname(output_sub_path).?;
+ b.cache_root.handle.makePath(output_sub_dir_path) catch |err| {
+ return step.fail("unable to make path '{}{s}': {s}", .{
+ b.cache_root, output_sub_dir_path, @errorName(err),
+ });
+ };
+ const output_path = try b.cache_root.join(arena, &output_components);
+ placeholder.output.generated_file.path = output_path;
+ const cli_arg = if (placeholder.output.prefix.len == 0)
+ output_path
+ else
+ b.fmt("{s}{s}", .{ placeholder.output.prefix, output_path });
+ argv_list.items[placeholder.index] = cli_arg;
}
+
+ try runCommand(self, argv_list.items, has_side_effects, &digest, prog_node);
+
+ try step.writeManifest(&man);
}
fn formatTerm(
- term: ?std.ChildProcess.Term,
+ term: ?std.process.Child.Term,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
@@ -321,11 +507,11 @@ fn formatTerm(
try writer.writeAll("exited with any code");
}
}
-fn fmtTerm(term: ?std.ChildProcess.Term) std.fmt.Formatter(formatTerm) {
+fn fmtTerm(term: ?std.process.Child.Term) std.fmt.Formatter(formatTerm) {
return .{ .data = term };
}
-fn termMatches(expected: ?std.ChildProcess.Term, actual: std.ChildProcess.Term) bool {
+fn termMatches(expected: ?std.process.Child.Term, actual: std.process.Child.Term) bool {
return if (expected) |e| switch (e) {
.Exited => |expected_code| switch (actual) {
.Exited => |actual_code| expected_code == actual_code,
@@ -349,183 +535,702 @@ fn termMatches(expected: ?std.ChildProcess.Term, actual: std.ChildProcess.Term)
};
}
-pub fn runCommand(
+fn runCommand(
+ self: *RunStep,
argv: []const []const u8,
- builder: *std.Build,
- expected_term: ?std.ChildProcess.Term,
- stdout_action: StdIoAction,
- stderr_action: StdIoAction,
- stdin_behavior: std.ChildProcess.StdIo,
- env_map: ?*EnvMap,
- maybe_cwd: ?[]const u8,
- print: bool,
+ has_side_effects: bool,
+ digest: ?*const [std.Build.Cache.hex_digest_len]u8,
+ prog_node: *std.Progress.Node,
) !void {
- const cwd = if (maybe_cwd) |cwd| builder.pathFromRoot(cwd) else builder.build_root.path;
-
- if (!std.process.can_spawn) {
- const cmd = try std.mem.join(builder.allocator, " ", argv);
- std.debug.print("the following command cannot be executed ({s} does not support spawning a child process):\n{s}", .{
- @tagName(builtin.os.tag), cmd,
- });
- builder.allocator.free(cmd);
- return ExecError.ExecNotSupported;
- }
+ const step = &self.step;
+ const b = step.owner;
+ const arena = b.allocator;
+
+ try step.handleChildProcUnsupported(self.cwd, argv);
+ try Step.handleVerbose2(step.owner, self.cwd, self.env_map, argv);
+
+ const allow_skip = switch (self.stdio) {
+ .check, .zig_test => self.skip_foreign_checks,
+ else => false,
+ };
+
+ var interp_argv = std.ArrayList([]const u8).init(b.allocator);
+ defer interp_argv.deinit();
+
+ const result = spawnChildAndCollect(self, argv, has_side_effects, prog_node) catch |err| term: {
+ // InvalidExe: cpu arch mismatch
+ // FileNotFound: can happen with a wrong dynamic linker path
+ if (err == error.InvalidExe or err == error.FileNotFound) interpret: {
+ // TODO: learn the target from the binary directly rather than from
+ // relying on it being a CompileStep. This will make this logic
+ // work even for the edge case that the binary was produced by a
+ // third party.
+ const exe = switch (self.argv.items[0]) {
+ .artifact => |exe| exe,
+ else => break :interpret,
+ };
+ switch (exe.kind) {
+ .exe, .@"test" => {},
+ else => break :interpret,
+ }
+
+ const need_cross_glibc = exe.target.isGnuLibC() and exe.is_linking_libc;
+ switch (b.host.getExternalExecutor(exe.target_info, .{
+ .qemu_fixes_dl = need_cross_glibc and b.glibc_runtimes_dir != null,
+ .link_libc = exe.is_linking_libc,
+ })) {
+ .native, .rosetta => {
+ if (allow_skip) return error.MakeSkipped;
+ break :interpret;
+ },
+ .wine => |bin_name| {
+ if (b.enable_wine) {
+ try interp_argv.append(bin_name);
+ try interp_argv.appendSlice(argv);
+ } else {
+ return failForeign(self, "-fwine", argv[0], exe);
+ }
+ },
+ .qemu => |bin_name| {
+ if (b.enable_qemu) {
+ const glibc_dir_arg = if (need_cross_glibc)
+ b.glibc_runtimes_dir orelse
+ return failForeign(self, "--glibc-runtimes", argv[0], exe)
+ else
+ null;
+
+ try interp_argv.append(bin_name);
+
+ if (glibc_dir_arg) |dir| {
+ // TODO look into making this a call to `linuxTriple`. This
+ // needs the directory to be called "i686" rather than
+ // "x86" which is why we do it manually here.
+ const fmt_str = "{s}" ++ fs.path.sep_str ++ "{s}-{s}-{s}";
+ const cpu_arch = exe.target.getCpuArch();
+ const os_tag = exe.target.getOsTag();
+ const abi = exe.target.getAbi();
+ const cpu_arch_name: []const u8 = if (cpu_arch == .x86)
+ "i686"
+ else
+ @tagName(cpu_arch);
+ const full_dir = try std.fmt.allocPrint(b.allocator, fmt_str, .{
+ dir, cpu_arch_name, @tagName(os_tag), @tagName(abi),
+ });
+
+ try interp_argv.append("-L");
+ try interp_argv.append(full_dir);
+ }
+
+ try interp_argv.appendSlice(argv);
+ } else {
+ return failForeign(self, "-fqemu", argv[0], exe);
+ }
+ },
+ .darling => |bin_name| {
+ if (b.enable_darling) {
+ try interp_argv.append(bin_name);
+ try interp_argv.appendSlice(argv);
+ } else {
+ return failForeign(self, "-fdarling", argv[0], exe);
+ }
+ },
+ .wasmtime => |bin_name| {
+ if (b.enable_wasmtime) {
+ try interp_argv.append(bin_name);
+ try interp_argv.append("--dir=.");
+ try interp_argv.append(argv[0]);
+ try interp_argv.append("--");
+ try interp_argv.appendSlice(argv[1..]);
+ } else {
+ return failForeign(self, "-fwasmtime", argv[0], exe);
+ }
+ },
+ .bad_dl => |foreign_dl| {
+ if (allow_skip) return error.MakeSkipped;
+
+ const host_dl = b.host.dynamic_linker.get() orelse "(none)";
+
+ return step.fail(
+ \\the host system is unable to execute binaries from the target
+ \\ because the host dynamic linker is '{s}',
+ \\ while the target dynamic linker is '{s}'.
+ \\ consider setting the dynamic linker or enabling skip_foreign_checks in the Run step
+ , .{ host_dl, foreign_dl });
+ },
+ .bad_os_or_cpu => {
+ if (allow_skip) return error.MakeSkipped;
+
+ const host_name = try b.host.target.zigTriple(b.allocator);
+ const foreign_name = try exe.target.zigTriple(b.allocator);
+
+ return step.fail("the host system ({s}) is unable to execute binaries from the target ({s})", .{
+ host_name, foreign_name,
+ });
+ },
+ }
- var child = std.ChildProcess.init(argv, builder.allocator);
- child.cwd = cwd;
- child.env_map = env_map orelse builder.env_map;
+ if (exe.target.isWindows()) {
+ // On Windows we don't have rpaths so we have to add .dll search paths to PATH
+ self.addPathForDynLibs(exe);
+ }
- child.stdin_behavior = stdin_behavior;
- child.stdout_behavior = stdIoActionToBehavior(stdout_action);
- child.stderr_behavior = stdIoActionToBehavior(stderr_action);
+ try Step.handleVerbose2(step.owner, self.cwd, self.env_map, interp_argv.items);
- if (print)
- printCmd(cwd, argv);
+ break :term spawnChildAndCollect(self, interp_argv.items, has_side_effects, prog_node) catch |e| {
+ return step.fail("unable to spawn interpreter {s}: {s}", .{
+ interp_argv.items[0], @errorName(e),
+ });
+ };
+ }
- child.spawn() catch |err| {
- std.debug.print("Unable to spawn {s}: {s}\n", .{ argv[0], @errorName(err) });
- return err;
+ return step.fail("unable to spawn {s}: {s}", .{ argv[0], @errorName(err) });
};
- // TODO need to poll to read these streams to prevent a deadlock (or rely on evented I/O).
+ step.result_duration_ns = result.elapsed_ns;
+ step.result_peak_rss = result.peak_rss;
+ step.test_results = result.stdio.test_results;
- var stdout: ?[]const u8 = null;
- defer if (stdout) |s| builder.allocator.free(s);
+ // Capture stdout and stderr to GeneratedFile objects.
+ const Stream = struct {
+ captured: ?*Output,
+ is_null: bool,
+ bytes: []const u8,
+ };
+ for ([_]Stream{
+ .{
+ .captured = self.captured_stdout,
+ .is_null = result.stdio.stdout_null,
+ .bytes = result.stdio.stdout,
+ },
+ .{
+ .captured = self.captured_stderr,
+ .is_null = result.stdio.stderr_null,
+ .bytes = result.stdio.stderr,
+ },
+ }) |stream| {
+ if (stream.captured) |output| {
+ assert(!stream.is_null);
+
+ const output_components = .{ "o", digest.?, output.basename };
+ const output_path = try b.cache_root.join(arena, &output_components);
+ output.generated_file.path = output_path;
+
+ const sub_path = try fs.path.join(arena, &output_components);
+ const sub_path_dirname = fs.path.dirname(sub_path).?;
+ b.cache_root.handle.makePath(sub_path_dirname) catch |err| {
+ return step.fail("unable to make path '{}{s}': {s}", .{
+ b.cache_root, sub_path_dirname, @errorName(err),
+ });
+ };
+ b.cache_root.handle.writeFile(sub_path, stream.bytes) catch |err| {
+ return step.fail("unable to write file '{}{s}': {s}", .{
+ b.cache_root, sub_path, @errorName(err),
+ });
+ };
+ }
+ }
- switch (stdout_action) {
- .expect_exact, .expect_matches => {
- stdout = try child.stdout.?.reader().readAllAlloc(builder.allocator, max_stdout_size);
+ const final_argv = if (interp_argv.items.len == 0) argv else interp_argv.items;
+
+ switch (self.stdio) {
+ .check => |checks| for (checks.items) |check| switch (check) {
+ .expect_stderr_exact => |expected_bytes| {
+ assert(!result.stdio.stderr_null);
+ if (!mem.eql(u8, expected_bytes, result.stdio.stderr)) {
+ return step.fail(
+ \\
+ \\========= expected this stderr: =========
+ \\{s}
+ \\========= but found: ====================
+ \\{s}
+ \\========= from the following command: ===
+ \\{s}
+ , .{
+ expected_bytes,
+ result.stdio.stderr,
+ try Step.allocPrintCmd(arena, self.cwd, final_argv),
+ });
+ }
+ },
+ .expect_stderr_match => |match| {
+ assert(!result.stdio.stderr_null);
+ if (mem.indexOf(u8, result.stdio.stderr, match) == null) {
+ return step.fail(
+ \\
+ \\========= expected to find in stderr: =========
+ \\{s}
+ \\========= but stderr does not contain it: =====
+ \\{s}
+ \\========= from the following command: =========
+ \\{s}
+ , .{
+ match,
+ result.stdio.stderr,
+ try Step.allocPrintCmd(arena, self.cwd, final_argv),
+ });
+ }
+ },
+ .expect_stdout_exact => |expected_bytes| {
+ assert(!result.stdio.stdout_null);
+ if (!mem.eql(u8, expected_bytes, result.stdio.stdout)) {
+ return step.fail(
+ \\
+ \\========= expected this stdout: =========
+ \\{s}
+ \\========= but found: ====================
+ \\{s}
+ \\========= from the following command: ===
+ \\{s}
+ , .{
+ expected_bytes,
+ result.stdio.stdout,
+ try Step.allocPrintCmd(arena, self.cwd, final_argv),
+ });
+ }
+ },
+ .expect_stdout_match => |match| {
+ assert(!result.stdio.stdout_null);
+ if (mem.indexOf(u8, result.stdio.stdout, match) == null) {
+ return step.fail(
+ \\
+ \\========= expected to find in stdout: =========
+ \\{s}
+ \\========= but stdout does not contain it: =====
+ \\{s}
+ \\========= from the following command: =========
+ \\{s}
+ , .{
+ match,
+ result.stdio.stdout,
+ try Step.allocPrintCmd(arena, self.cwd, final_argv),
+ });
+ }
+ },
+ .expect_term => |expected_term| {
+ if (!termMatches(expected_term, result.term)) {
+ return step.fail("the following command {} (expected {}):\n{s}", .{
+ fmtTerm(result.term),
+ fmtTerm(expected_term),
+ try Step.allocPrintCmd(arena, self.cwd, final_argv),
+ });
+ }
+ },
+ },
+ .zig_test => {
+ const expected_term: std.process.Child.Term = .{ .Exited = 0 };
+ if (!termMatches(expected_term, result.term)) {
+ return step.fail("the following command {} (expected {}):\n{s}", .{
+ fmtTerm(result.term),
+ fmtTerm(expected_term),
+ try Step.allocPrintCmd(arena, self.cwd, final_argv),
+ });
+ }
+ if (!result.stdio.test_results.isSuccess()) {
+ return step.fail(
+ "the following test command failed:\n{s}",
+ .{try Step.allocPrintCmd(arena, self.cwd, final_argv)},
+ );
+ }
+ },
+ else => {
+ try step.handleChildProcessTerm(result.term, self.cwd, final_argv);
},
- .inherit, .ignore => {},
}
+}
- var stderr: ?[]const u8 = null;
- defer if (stderr) |s| builder.allocator.free(s);
+const ChildProcResult = struct {
+ term: std.process.Child.Term,
+ elapsed_ns: u64,
+ peak_rss: usize,
- switch (stderr_action) {
- .expect_exact, .expect_matches => {
- stderr = try child.stderr.?.reader().readAllAlloc(builder.allocator, max_stdout_size);
- },
- .inherit, .ignore => {},
+ stdio: StdIoResult,
+};
+
+fn spawnChildAndCollect(
+ self: *RunStep,
+ argv: []const []const u8,
+ has_side_effects: bool,
+ prog_node: *std.Progress.Node,
+) !ChildProcResult {
+ const b = self.step.owner;
+ const arena = b.allocator;
+
+ var child = std.process.Child.init(argv, arena);
+ if (self.cwd) |cwd| {
+ child.cwd = b.pathFromRoot(cwd);
+ } else {
+ child.cwd = b.build_root.path;
+ child.cwd_dir = b.build_root.handle;
}
+ child.env_map = self.env_map orelse b.env_map;
+ child.request_resource_usage_statistics = true;
- const term = child.wait() catch |err| {
- std.debug.print("Unable to spawn {s}: {s}\n", .{ argv[0], @errorName(err) });
- return err;
+ child.stdin_behavior = switch (self.stdio) {
+ .infer_from_args => if (has_side_effects) .Inherit else .Close,
+ .inherit => .Inherit,
+ .check => .Close,
+ .zig_test => .Pipe,
};
+ child.stdout_behavior = switch (self.stdio) {
+ .infer_from_args => if (has_side_effects) .Inherit else .Ignore,
+ .inherit => .Inherit,
+ .check => |checks| if (checksContainStdout(checks.items)) .Pipe else .Ignore,
+ .zig_test => .Pipe,
+ };
+ child.stderr_behavior = switch (self.stdio) {
+ .infer_from_args => if (has_side_effects) .Inherit else .Pipe,
+ .inherit => .Inherit,
+ .check => .Pipe,
+ .zig_test => .Pipe,
+ };
+ if (self.captured_stdout != null) child.stdout_behavior = .Pipe;
+ if (self.captured_stderr != null) child.stderr_behavior = .Pipe;
+ if (self.stdin != null) {
+ assert(child.stdin_behavior != .Inherit);
+ child.stdin_behavior = .Pipe;
+ }
- if (!termMatches(expected_term, term)) {
- if (builder.prominent_compile_errors) {
- std.debug.print("Run step {} (expected {})\n", .{ fmtTerm(term), fmtTerm(expected_term) });
- } else {
- std.debug.print("The following command {} (expected {}):\n", .{ fmtTerm(term), fmtTerm(expected_term) });
- printCmd(cwd, argv);
+ try child.spawn();
+ var timer = try std.time.Timer.start();
+
+ const result = if (self.stdio == .zig_test)
+ evalZigTest(self, &child, prog_node)
+ else
+ evalGeneric(self, &child);
+
+ const term = try child.wait();
+ const elapsed_ns = timer.read();
+
+ return .{
+ .stdio = try result,
+ .term = term,
+ .elapsed_ns = elapsed_ns,
+ .peak_rss = child.resource_usage_statistics.getMaxRss() orelse 0,
+ };
+}
+
+const StdIoResult = struct {
+ // These use boolean flags instead of optionals as a workaround for
+ // https://github.com/ziglang/zig/issues/14783
+ stdout: []const u8,
+ stderr: []const u8,
+ stdout_null: bool,
+ stderr_null: bool,
+ test_results: Step.TestResults,
+};
+
+fn evalZigTest(
+ self: *RunStep,
+ child: *std.process.Child,
+ prog_node: *std.Progress.Node,
+) !StdIoResult {
+ const gpa = self.step.owner.allocator;
+ const arena = self.step.owner.allocator;
+
+ var poller = std.io.poll(gpa, enum { stdout, stderr }, .{
+ .stdout = child.stdout.?,
+ .stderr = child.stderr.?,
+ });
+ defer poller.deinit();
+
+ try sendMessage(child.stdin.?, .query_test_metadata);
+
+ const Header = std.zig.Server.Message.Header;
+
+ const stdout = poller.fifo(.stdout);
+ const stderr = poller.fifo(.stderr);
+
+ var fail_count: u32 = 0;
+ var skip_count: u32 = 0;
+ var leak_count: u32 = 0;
+ var test_count: u32 = 0;
+
+ var metadata: ?TestMetadata = null;
+
+ var sub_prog_node: ?std.Progress.Node = null;
+ defer if (sub_prog_node) |*n| n.end();
+
+ poll: while (true) {
+ while (stdout.readableLength() < @sizeOf(Header)) {
+ if (!(try poller.poll())) break :poll;
+ }
+ const header = stdout.reader().readStruct(Header) catch unreachable;
+ while (stdout.readableLength() < header.bytes_len) {
+ if (!(try poller.poll())) break :poll;
}
- return error.UnexpectedExit;
+ const body = stdout.readableSliceOfLen(header.bytes_len);
+
+ switch (header.tag) {
+ .zig_version => {
+ if (!std.mem.eql(u8, builtin.zig_version_string, body)) {
+ return self.step.fail(
+ "zig version mismatch build runner vs compiler: '{s}' vs '{s}'",
+ .{ builtin.zig_version_string, body },
+ );
+ }
+ },
+ .test_metadata => {
+ const TmHdr = std.zig.Server.Message.TestMetadata;
+ const tm_hdr = @ptrCast(*align(1) const TmHdr, body);
+ test_count = tm_hdr.tests_len;
+
+ const names_bytes = body[@sizeOf(TmHdr)..][0 .. test_count * @sizeOf(u32)];
+ const async_frame_lens_bytes = body[@sizeOf(TmHdr) + names_bytes.len ..][0 .. test_count * @sizeOf(u32)];
+ const expected_panic_msgs_bytes = body[@sizeOf(TmHdr) + names_bytes.len + async_frame_lens_bytes.len ..][0 .. test_count * @sizeOf(u32)];
+ const string_bytes = body[@sizeOf(TmHdr) + names_bytes.len + async_frame_lens_bytes.len + expected_panic_msgs_bytes.len ..][0..tm_hdr.string_bytes_len];
+
+ const names = std.mem.bytesAsSlice(u32, names_bytes);
+ const async_frame_lens = std.mem.bytesAsSlice(u32, async_frame_lens_bytes);
+ const expected_panic_msgs = std.mem.bytesAsSlice(u32, expected_panic_msgs_bytes);
+ const names_aligned = try arena.alloc(u32, names.len);
+ for (names_aligned, names) |*dest, src| dest.* = src;
+
+ const async_frame_lens_aligned = try arena.alloc(u32, async_frame_lens.len);
+ for (async_frame_lens_aligned, async_frame_lens) |*dest, src| dest.* = src;
+
+ const expected_panic_msgs_aligned = try arena.alloc(u32, expected_panic_msgs.len);
+ for (expected_panic_msgs_aligned, expected_panic_msgs) |*dest, src| dest.* = src;
+
+ prog_node.setEstimatedTotalItems(names.len);
+ metadata = .{
+ .string_bytes = try arena.dupe(u8, string_bytes),
+ .names = names_aligned,
+ .async_frame_lens = async_frame_lens_aligned,
+ .expected_panic_msgs = expected_panic_msgs_aligned,
+ .next_index = 0,
+ .prog_node = prog_node,
+ };
+
+ try requestNextTest(child.stdin.?, &metadata.?, &sub_prog_node);
+ },
+ .test_results => {
+ const md = metadata.?;
+
+ const TrHdr = std.zig.Server.Message.TestResults;
+ const tr_hdr = @ptrCast(*align(1) const TrHdr, body);
+ fail_count += @boolToInt(tr_hdr.flags.fail);
+ skip_count += @boolToInt(tr_hdr.flags.skip);
+ leak_count += @boolToInt(tr_hdr.flags.leak);
+
+ if (tr_hdr.flags.fail or tr_hdr.flags.leak) {
+ const name = std.mem.sliceTo(md.string_bytes[md.names[tr_hdr.index]..], 0);
+ const msg = std.mem.trim(u8, stderr.readableSlice(0), "\n");
+ const label = if (tr_hdr.flags.fail) "failed" else "leaked";
+ if (msg.len > 0) {
+ try self.step.addError("'{s}' {s}: {s}", .{ name, label, msg });
+ } else {
+ try self.step.addError("'{s}' {s}", .{ name, label });
+ }
+ stderr.discard(msg.len);
+ }
+
+ try requestNextTest(child.stdin.?, &metadata.?, &sub_prog_node);
+ },
+ else => {}, // ignore other messages
+ }
+
+ stdout.discard(body.len);
}
- switch (stderr_action) {
- .inherit, .ignore => {},
- .expect_exact => |expected_bytes| {
- if (!mem.eql(u8, expected_bytes, stderr.?)) {
- std.debug.print(
- \\
- \\========= Expected this stderr: =========
- \\{s}
- \\========= But found: ====================
- \\{s}
- \\
- , .{ expected_bytes, stderr.? });
- printCmd(cwd, argv);
- return error.TestFailed;
- }
- },
- .expect_matches => |matches| for (matches) |match| {
- if (mem.indexOf(u8, stderr.?, match) == null) {
- std.debug.print(
- \\
- \\========= Expected to find in stderr: =========
- \\{s}
- \\========= But stderr does not contain it: =====
- \\{s}
- \\
- , .{ match, stderr.? });
- printCmd(cwd, argv);
- return error.TestFailed;
- }
- },
+ if (stderr.readableLength() > 0) {
+ const msg = std.mem.trim(u8, try stderr.toOwnedSlice(), "\n");
+ if (msg.len > 0) try self.step.result_error_msgs.append(arena, msg);
}
- switch (stdout_action) {
- .inherit, .ignore => {},
- .expect_exact => |expected_bytes| {
- if (!mem.eql(u8, expected_bytes, stdout.?)) {
- std.debug.print(
- \\
- \\========= Expected this stdout: =========
- \\{s}
- \\========= But found: ====================
- \\{s}
- \\
- , .{ expected_bytes, stdout.? });
- printCmd(cwd, argv);
- return error.TestFailed;
- }
- },
- .expect_matches => |matches| for (matches) |match| {
- if (mem.indexOf(u8, stdout.?, match) == null) {
- std.debug.print(
- \\
- \\========= Expected to find in stdout: =========
- \\{s}
- \\========= But stdout does not contain it: =====
- \\{s}
- \\
- , .{ match, stdout.? });
- printCmd(cwd, argv);
- return error.TestFailed;
- }
+ // Send EOF to stdin.
+ child.stdin.?.close();
+ child.stdin = null;
+
+ return .{
+ .stdout = &.{},
+ .stderr = &.{},
+ .stdout_null = true,
+ .stderr_null = true,
+ .test_results = .{
+ .test_count = test_count,
+ .fail_count = fail_count,
+ .skip_count = skip_count,
+ .leak_count = leak_count,
},
+ };
+}
+
+const TestMetadata = struct {
+ names: []const u32,
+ async_frame_lens: []const u32,
+ expected_panic_msgs: []const u32,
+ string_bytes: []const u8,
+ next_index: u32,
+ prog_node: *std.Progress.Node,
+
+ fn testName(tm: TestMetadata, index: u32) []const u8 {
+ return std.mem.sliceTo(tm.string_bytes[tm.names[index]..], 0);
+ }
+};
+
+fn requestNextTest(in: fs.File, metadata: *TestMetadata, sub_prog_node: *?std.Progress.Node) !void {
+ while (metadata.next_index < metadata.names.len) {
+ const i = metadata.next_index;
+ metadata.next_index += 1;
+
+ if (metadata.async_frame_lens[i] != 0) continue;
+ if (metadata.expected_panic_msgs[i] != 0) continue;
+
+ const name = metadata.testName(i);
+ if (sub_prog_node.*) |*n| n.end();
+ sub_prog_node.* = metadata.prog_node.start(name, 0);
+
+ try sendRunTestMessage(in, i);
+ return;
+ } else {
+ try sendMessage(in, .exit);
}
}
-fn failWithCacheError(man: std.Build.Cache.Manifest, err: anyerror) noreturn {
- const i = man.failed_file_index orelse failWithSimpleError(err);
- const pp = man.files.items[i].prefixed_path orelse failWithSimpleError(err);
- const prefix = man.cache.prefixes()[pp.prefix].path orelse "";
- std.debug.print("{s}: {s}/{s}\n", .{ @errorName(err), prefix, pp.sub_path });
- std.process.exit(1);
+fn sendMessage(file: std.fs.File, tag: std.zig.Client.Message.Tag) !void {
+ const header: std.zig.Client.Message.Header = .{
+ .tag = tag,
+ .bytes_len = 0,
+ };
+ try file.writeAll(std.mem.asBytes(&header));
}
-fn failWithSimpleError(err: anyerror) noreturn {
- std.debug.print("{s}\n", .{@errorName(err)});
- std.process.exit(1);
+fn sendRunTestMessage(file: std.fs.File, index: u32) !void {
+ const header: std.zig.Client.Message.Header = .{
+ .tag = .run_test,
+ .bytes_len = 4,
+ };
+ const full_msg = std.mem.asBytes(&header) ++ std.mem.asBytes(&index);
+ try file.writeAll(full_msg);
}
-fn printCmd(cwd: ?[]const u8, argv: []const []const u8) void {
- if (cwd) |yes_cwd| std.debug.print("cd {s} && ", .{yes_cwd});
- for (argv) |arg| {
- std.debug.print("{s} ", .{arg});
+fn evalGeneric(self: *RunStep, child: *std.process.Child) !StdIoResult {
+ const arena = self.step.owner.allocator;
+
+ if (self.stdin) |stdin| {
+ child.stdin.?.writeAll(stdin) catch |err| {
+ return self.step.fail("unable to write stdin: {s}", .{@errorName(err)});
+ };
+ child.stdin.?.close();
+ child.stdin = null;
}
- std.debug.print("\n", .{});
-}
-fn addPathForDynLibs(self: *RunStep, artifact: *CompileStep) void {
- addPathForDynLibsInternal(&self.step, self.builder, artifact);
+ // These are not optionals, as a workaround for
+ // https://github.com/ziglang/zig/issues/14783
+ var stdout_bytes: []const u8 = undefined;
+ var stderr_bytes: []const u8 = undefined;
+ var stdout_null = true;
+ var stderr_null = true;
+
+ if (child.stdout) |stdout| {
+ if (child.stderr) |stderr| {
+ var poller = std.io.poll(arena, enum { stdout, stderr }, .{
+ .stdout = stdout,
+ .stderr = stderr,
+ });
+ defer poller.deinit();
+
+ while (try poller.poll()) {
+ if (poller.fifo(.stdout).count > self.max_stdio_size)
+ return error.StdoutStreamTooLong;
+ if (poller.fifo(.stderr).count > self.max_stdio_size)
+ return error.StderrStreamTooLong;
+ }
+
+ stdout_bytes = try poller.fifo(.stdout).toOwnedSlice();
+ stderr_bytes = try poller.fifo(.stderr).toOwnedSlice();
+ stdout_null = false;
+ stderr_null = false;
+ } else {
+ stdout_bytes = try stdout.reader().readAllAlloc(arena, self.max_stdio_size);
+ stdout_null = false;
+ }
+ } else if (child.stderr) |stderr| {
+ stderr_bytes = try stderr.reader().readAllAlloc(arena, self.max_stdio_size);
+ stderr_null = false;
+ }
+
+ if (!stderr_null and stderr_bytes.len > 0) {
+ // Treat stderr as an error message.
+ const stderr_is_diagnostic = self.captured_stderr == null and switch (self.stdio) {
+ .check => |checks| !checksContainStderr(checks.items),
+ else => true,
+ };
+ if (stderr_is_diagnostic) {
+ try self.step.result_error_msgs.append(arena, stderr_bytes);
+ }
+ }
+
+ return .{
+ .stdout = stdout_bytes,
+ .stderr = stderr_bytes,
+ .stdout_null = stdout_null,
+ .stderr_null = stderr_null,
+ .test_results = .{},
+ };
}
-/// This should only be used for internal usage, this is called automatically
-/// for the user.
-pub fn addPathForDynLibsInternal(step: *Step, builder: *std.Build, artifact: *CompileStep) void {
+fn addPathForDynLibs(self: *RunStep, artifact: *CompileStep) void {
+ const b = self.step.owner;
for (artifact.link_objects.items) |link_object| {
switch (link_object) {
.other_step => |other| {
if (other.target.isWindows() and other.isDynamicLibrary()) {
- addPathDirInternal(step, builder, fs.path.dirname(other.getOutputSource().getPath(builder)).?);
- addPathForDynLibsInternal(step, builder, other);
+ addPathDir(self, fs.path.dirname(other.getOutputSource().getPath(b)).?);
+ addPathForDynLibs(self, other);
}
},
else => {},
}
}
}
+
+fn failForeign(
+ self: *RunStep,
+ suggested_flag: []const u8,
+ argv0: []const u8,
+ exe: *CompileStep,
+) error{ MakeFailed, MakeSkipped, OutOfMemory } {
+ switch (self.stdio) {
+ .check, .zig_test => {
+ if (self.skip_foreign_checks)
+ return error.MakeSkipped;
+
+ const b = self.step.owner;
+ const host_name = try b.host.target.zigTriple(b.allocator);
+ const foreign_name = try exe.target.zigTriple(b.allocator);
+
+ return self.step.fail(
+ \\unable to spawn foreign binary '{s}' ({s}) on host system ({s})
+ \\ consider using {s} or enabling skip_foreign_checks in the Run step
+ , .{ argv0, foreign_name, host_name, suggested_flag });
+ },
+ else => {
+ return self.step.fail("unable to spawn foreign binary '{s}'", .{argv0});
+ },
+ }
+}
+
+fn hashStdIo(hh: *std.Build.Cache.HashHelper, stdio: StdIo) void {
+ switch (stdio) {
+ .infer_from_args, .inherit, .zig_test => {},
+ .check => |checks| for (checks.items) |check| {
+ hh.add(@as(std.meta.Tag(StdIo.Check), check));
+ switch (check) {
+ .expect_stderr_exact,
+ .expect_stderr_match,
+ .expect_stdout_exact,
+ .expect_stdout_match,
+ => |s| hh.addBytes(s),
+
+ .expect_term => |term| {
+ hh.add(@as(std.meta.Tag(std.process.Child.Term), term));
+ switch (term) {
+ .Exited => |x| hh.add(x),
+ .Signal, .Stopped, .Unknown => |x| hh.add(x),
+ }
+ },
+ }
+ },
+ }
+}
diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig
index 82c39ac2cc..0c05a64b1c 100644
--- a/lib/std/Build/Step.zig
+++ b/lib/std/Build/Step.zig
@@ -1,9 +1,77 @@
id: Id,
name: []const u8,
-makeFn: *const fn (self: *Step) anyerror!void,
+owner: *Build,
+makeFn: MakeFn,
+
dependencies: std.ArrayList(*Step),
-loop_flag: bool,
-done_flag: bool,
+/// This field is empty during execution of the user's build script, and
+/// then populated during dependency loop checking in the build runner.
+dependants: std.ArrayListUnmanaged(*Step),
+state: State,
+/// Set this field to declare an upper bound on the amount of bytes of memory it will
+/// take to run the step. Zero means no limit.
+///
+/// The idea to annotate steps that might use a high amount of RAM with an
+/// upper bound. For example, perhaps a particular set of unit tests require 4
+/// GiB of RAM, and those tests will be run under 4 different build
+/// configurations at once. This would potentially require 16 GiB of memory on
+/// the system if all 4 steps executed simultaneously, which could easily be
+/// greater than what is actually available, potentially causing the system to
+/// crash when using `zig build` at the default concurrency level.
+///
+/// This field causes the build runner to do two things:
+/// 1. ulimit child processes, so that they will fail if it would exceed this
+/// memory limit. This serves to enforce that this upper bound value is
+/// correct.
+/// 2. Ensure that the set of concurrent steps at any given time have a total
+/// max_rss value that does not exceed the `max_total_rss` value of the build
+/// runner. This value is configurable on the command line, and defaults to the
+/// total system memory available.
+max_rss: usize,
+
+result_error_msgs: std.ArrayListUnmanaged([]const u8),
+result_error_bundle: std.zig.ErrorBundle,
+result_cached: bool,
+result_duration_ns: ?u64,
+/// 0 means unavailable or not reported.
+result_peak_rss: usize,
+test_results: TestResults,
+
+/// The return addresss associated with creation of this step that can be useful
+/// to print along with debugging messages.
+debug_stack_trace: [n_debug_stack_frames]usize,
+
+pub const TestResults = struct {
+ fail_count: u32 = 0,
+ skip_count: u32 = 0,
+ leak_count: u32 = 0,
+ test_count: u32 = 0,
+
+ pub fn isSuccess(tr: TestResults) bool {
+ return tr.fail_count == 0 and tr.leak_count == 0;
+ }
+
+ pub fn passCount(tr: TestResults) u32 {
+ return tr.test_count - tr.fail_count - tr.skip_count;
+ }
+};
+
+pub const MakeFn = *const fn (self: *Step, prog_node: *std.Progress.Node) anyerror!void;
+
+const n_debug_stack_frames = 4;
+
+pub const State = enum {
+ precheck_unstarted,
+ precheck_started,
+ precheck_done,
+ running,
+ dependency_failure,
+ success,
+ failure,
+ /// This state indicates that the step did not complete, however, it also did not fail,
+ /// and it is safe to continue executing its dependencies.
+ skipped,
+};
pub const Id = enum {
top_level,
@@ -11,13 +79,11 @@ pub const Id = enum {
install_artifact,
install_file,
install_dir,
- log,
remove_dir,
fmt,
translate_c,
write_file,
run,
- emulatable_run,
check_file,
check_object,
config_header,
@@ -32,13 +98,11 @@ pub const Id = enum {
.install_artifact => Build.InstallArtifactStep,
.install_file => Build.InstallFileStep,
.install_dir => Build.InstallDirStep,
- .log => Build.LogStep,
.remove_dir => Build.RemoveDirStep,
.fmt => Build.FmtStep,
.translate_c => Build.TranslateCStep,
.write_file => Build.WriteFileStep,
.run => Build.RunStep,
- .emulatable_run => Build.EmulatableRunStep,
.check_file => Build.CheckFileStep,
.check_object => Build.CheckObjectStep,
.config_header => Build.ConfigHeaderStep,
@@ -49,39 +113,99 @@ pub const Id = enum {
}
};
-pub fn init(
+pub const Options = struct {
id: Id,
name: []const u8,
- allocator: Allocator,
- makeFn: *const fn (self: *Step) anyerror!void,
-) Step {
- return Step{
- .id = id,
- .name = allocator.dupe(u8, name) catch @panic("OOM"),
- .makeFn = makeFn,
- .dependencies = std.ArrayList(*Step).init(allocator),
- .loop_flag = false,
- .done_flag = false,
+ owner: *Build,
+ makeFn: MakeFn = makeNoOp,
+ first_ret_addr: ?usize = null,
+ max_rss: usize = 0,
+};
+
+pub fn init(options: Options) Step {
+ const arena = options.owner.allocator;
+
+ var addresses = [1]usize{0} ** n_debug_stack_frames;
+ const first_ret_addr = options.first_ret_addr orelse @returnAddress();
+ var stack_trace = std.builtin.StackTrace{
+ .instruction_addresses = &addresses,
+ .index = 0,
};
-}
+ std.debug.captureStackTrace(first_ret_addr, &stack_trace);
-pub fn initNoOp(id: Id, name: []const u8, allocator: Allocator) Step {
- return init(id, name, allocator, makeNoOp);
+ return .{
+ .id = options.id,
+ .name = arena.dupe(u8, options.name) catch @panic("OOM"),
+ .owner = options.owner,
+ .makeFn = options.makeFn,
+ .dependencies = std.ArrayList(*Step).init(arena),
+ .dependants = .{},
+ .state = .precheck_unstarted,
+ .max_rss = options.max_rss,
+ .debug_stack_trace = addresses,
+ .result_error_msgs = .{},
+ .result_error_bundle = std.zig.ErrorBundle.empty,
+ .result_cached = false,
+ .result_duration_ns = null,
+ .result_peak_rss = 0,
+ .test_results = .{},
+ };
}
-pub fn make(self: *Step) !void {
- if (self.done_flag) return;
+/// If the Step's `make` function reports `error.MakeFailed`, it indicates they
+/// have already reported the error. Otherwise, we add a simple error report
+/// here.
+pub fn make(s: *Step, prog_node: *std.Progress.Node) error{ MakeFailed, MakeSkipped }!void {
+ const arena = s.owner.allocator;
+
+ s.makeFn(s, prog_node) catch |err| switch (err) {
+ error.MakeFailed => return error.MakeFailed,
+ error.MakeSkipped => return error.MakeSkipped,
+ else => {
+ s.result_error_msgs.append(arena, @errorName(err)) catch @panic("OOM");
+ return error.MakeFailed;
+ },
+ };
+
+ if (!s.test_results.isSuccess()) {
+ return error.MakeFailed;
+ }
- try self.makeFn(self);
- self.done_flag = true;
+ if (s.max_rss != 0 and s.result_peak_rss > s.max_rss) {
+ const msg = std.fmt.allocPrint(arena, "memory usage peaked at {d} bytes, exceeding the declared upper bound of {d}", .{
+ s.result_peak_rss, s.max_rss,
+ }) catch @panic("OOM");
+ s.result_error_msgs.append(arena, msg) catch @panic("OOM");
+ return error.MakeFailed;
+ }
}
pub fn dependOn(self: *Step, other: *Step) void {
self.dependencies.append(other) catch @panic("OOM");
}
-fn makeNoOp(self: *Step) anyerror!void {
- _ = self;
+pub fn getStackTrace(s: *Step) std.builtin.StackTrace {
+ const stack_addresses = &s.debug_stack_trace;
+ var len: usize = 0;
+ while (len < n_debug_stack_frames and stack_addresses[len] != 0) {
+ len += 1;
+ }
+ return .{
+ .instruction_addresses = stack_addresses,
+ .index = len,
+ };
+}
+
+fn makeNoOp(step: *Step, prog_node: *std.Progress.Node) anyerror!void {
+ _ = prog_node;
+
+ var all_cached = true;
+
+ for (step.dependencies.items) |dep| {
+ all_cached = all_cached and dep.result_cached;
+ }
+
+ step.result_cached = all_cached;
}
pub fn cast(step: *Step, comptime T: type) ?*T {
@@ -91,7 +215,323 @@ pub fn cast(step: *Step, comptime T: type) ?*T {
return null;
}
+/// For debugging purposes, prints identifying information about this Step.
+pub fn dump(step: *Step) void {
+ std.debug.getStderrMutex().lock();
+ defer std.debug.getStderrMutex().unlock();
+
+ const stderr = std.io.getStdErr();
+ const w = stderr.writer();
+ const tty_config = std.debug.detectTTYConfig(stderr);
+ const debug_info = std.debug.getSelfDebugInfo() catch |err| {
+ w.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{
+ @errorName(err),
+ }) catch {};
+ return;
+ };
+ const ally = debug_info.allocator;
+ w.print("name: '{s}'. creation stack trace:\n", .{step.name}) catch {};
+ std.debug.writeStackTrace(step.getStackTrace(), w, ally, debug_info, tty_config) catch |err| {
+ stderr.writer().print("Unable to dump stack trace: {s}\n", .{@errorName(err)}) catch {};
+ return;
+ };
+}
+
const Step = @This();
const std = @import("../std.zig");
const Build = std.Build;
const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+const builtin = @import("builtin");
+
+pub fn evalChildProcess(s: *Step, argv: []const []const u8) !void {
+ const arena = s.owner.allocator;
+
+ try handleChildProcUnsupported(s, null, argv);
+ try handleVerbose(s.owner, null, argv);
+
+ const result = std.ChildProcess.exec(.{
+ .allocator = arena,
+ .argv = argv,
+ }) catch |err| return s.fail("unable to spawn {s}: {s}", .{ argv[0], @errorName(err) });
+
+ if (result.stderr.len > 0) {
+ try s.result_error_msgs.append(arena, result.stderr);
+ }
+
+ try handleChildProcessTerm(s, result.term, null, argv);
+}
+
+pub fn fail(step: *Step, comptime fmt: []const u8, args: anytype) error{ OutOfMemory, MakeFailed } {
+ try step.addError(fmt, args);
+ return error.MakeFailed;
+}
+
+pub fn addError(step: *Step, comptime fmt: []const u8, args: anytype) error{OutOfMemory}!void {
+ const arena = step.owner.allocator;
+ const msg = try std.fmt.allocPrint(arena, fmt, args);
+ try step.result_error_msgs.append(arena, msg);
+}
+
+/// Assumes that argv contains `--listen=-` and that the process being spawned
+/// is the zig compiler - the same version that compiled the build runner.
+pub fn evalZigProcess(
+ s: *Step,
+ argv: []const []const u8,
+ prog_node: *std.Progress.Node,
+) ![]const u8 {
+ assert(argv.len != 0);
+ const b = s.owner;
+ const arena = b.allocator;
+ const gpa = arena;
+
+ try handleChildProcUnsupported(s, null, argv);
+ try handleVerbose(s.owner, null, argv);
+
+ var child = std.ChildProcess.init(argv, arena);
+ child.env_map = b.env_map;
+ child.stdin_behavior = .Pipe;
+ child.stdout_behavior = .Pipe;
+ child.stderr_behavior = .Pipe;
+ child.request_resource_usage_statistics = true;
+
+ child.spawn() catch |err| return s.fail("unable to spawn {s}: {s}", .{
+ argv[0], @errorName(err),
+ });
+ var timer = try std.time.Timer.start();
+
+ var poller = std.io.poll(gpa, enum { stdout, stderr }, .{
+ .stdout = child.stdout.?,
+ .stderr = child.stderr.?,
+ });
+ defer poller.deinit();
+
+ try sendMessage(child.stdin.?, .update);
+ try sendMessage(child.stdin.?, .exit);
+
+ const Header = std.zig.Server.Message.Header;
+ var result: ?[]const u8 = null;
+
+ var node_name: std.ArrayListUnmanaged(u8) = .{};
+ defer node_name.deinit(gpa);
+ var sub_prog_node = prog_node.start("", 0);
+ defer sub_prog_node.end();
+
+ const stdout = poller.fifo(.stdout);
+
+ poll: while (true) {
+ while (stdout.readableLength() < @sizeOf(Header)) {
+ if (!(try poller.poll())) break :poll;
+ }
+ const header = stdout.reader().readStruct(Header) catch unreachable;
+ while (stdout.readableLength() < header.bytes_len) {
+ if (!(try poller.poll())) break :poll;
+ }
+ const body = stdout.readableSliceOfLen(header.bytes_len);
+
+ switch (header.tag) {
+ .zig_version => {
+ if (!std.mem.eql(u8, builtin.zig_version_string, body)) {
+ return s.fail(
+ "zig version mismatch build runner vs compiler: '{s}' vs '{s}'",
+ .{ builtin.zig_version_string, body },
+ );
+ }
+ },
+ .error_bundle => {
+ const EbHdr = std.zig.Server.Message.ErrorBundle;
+ const eb_hdr = @ptrCast(*align(1) const EbHdr, body);
+ const extra_bytes =
+ body[@sizeOf(EbHdr)..][0 .. @sizeOf(u32) * eb_hdr.extra_len];
+ const string_bytes =
+ body[@sizeOf(EbHdr) + extra_bytes.len ..][0..eb_hdr.string_bytes_len];
+ // TODO: use @ptrCast when the compiler supports it
+ const unaligned_extra = std.mem.bytesAsSlice(u32, extra_bytes);
+ const extra_array = try arena.alloc(u32, unaligned_extra.len);
+ // TODO: use @memcpy when it supports slices
+ for (extra_array, unaligned_extra) |*dst, src| dst.* = src;
+ s.result_error_bundle = .{
+ .string_bytes = try arena.dupe(u8, string_bytes),
+ .extra = extra_array,
+ };
+ },
+ .progress => {
+ node_name.clearRetainingCapacity();
+ try node_name.appendSlice(gpa, body);
+ sub_prog_node.setName(node_name.items);
+ },
+ .emit_bin_path => {
+ const EbpHdr = std.zig.Server.Message.EmitBinPath;
+ const ebp_hdr = @ptrCast(*align(1) const EbpHdr, body);
+ s.result_cached = ebp_hdr.flags.cache_hit;
+ result = try arena.dupe(u8, body[@sizeOf(EbpHdr)..]);
+ },
+ else => {}, // ignore other messages
+ }
+
+ stdout.discard(body.len);
+ }
+
+ const stderr = poller.fifo(.stderr);
+ if (stderr.readableLength() > 0) {
+ try s.result_error_msgs.append(arena, try stderr.toOwnedSlice());
+ }
+
+ // Send EOF to stdin.
+ child.stdin.?.close();
+ child.stdin = null;
+
+ const term = child.wait() catch |err| {
+ return s.fail("unable to wait for {s}: {s}", .{ argv[0], @errorName(err) });
+ };
+ s.result_duration_ns = timer.read();
+ s.result_peak_rss = child.resource_usage_statistics.getMaxRss() orelse 0;
+
+ // Special handling for CompileStep that is expecting compile errors.
+ if (s.cast(Build.CompileStep)) |compile| switch (term) {
+ .Exited => {
+ // Note that the exit code may be 0 in this case due to the
+ // compiler server protocol.
+ if (compile.expect_errors.len != 0 and s.result_error_bundle.errorMessageCount() > 0) {
+ return error.NeedCompileErrorCheck;
+ }
+ },
+ else => {},
+ };
+
+ try handleChildProcessTerm(s, term, null, argv);
+
+ if (s.result_error_bundle.errorMessageCount() > 0) {
+ return s.fail("the following command failed with {d} compilation errors:\n{s}", .{
+ s.result_error_bundle.errorMessageCount(),
+ try allocPrintCmd(arena, null, argv),
+ });
+ }
+
+ return result orelse return s.fail(
+ "the following command failed to communicate the compilation result:\n{s}",
+ .{try allocPrintCmd(arena, null, argv)},
+ );
+}
+
+fn sendMessage(file: std.fs.File, tag: std.zig.Client.Message.Tag) !void {
+ const header: std.zig.Client.Message.Header = .{
+ .tag = tag,
+ .bytes_len = 0,
+ };
+ try file.writeAll(std.mem.asBytes(&header));
+}
+
+pub fn handleVerbose(
+ b: *Build,
+ opt_cwd: ?[]const u8,
+ argv: []const []const u8,
+) error{OutOfMemory}!void {
+ return handleVerbose2(b, opt_cwd, null, argv);
+}
+
+pub fn handleVerbose2(
+ b: *Build,
+ opt_cwd: ?[]const u8,
+ opt_env: ?*const std.process.EnvMap,
+ argv: []const []const u8,
+) error{OutOfMemory}!void {
+ if (b.verbose) {
+ // Intention of verbose is to print all sub-process command lines to
+ // stderr before spawning them.
+ const text = try allocPrintCmd2(b.allocator, opt_cwd, opt_env, argv);
+ std.debug.print("{s}\n", .{text});
+ }
+}
+
+pub inline fn handleChildProcUnsupported(
+ s: *Step,
+ opt_cwd: ?[]const u8,
+ argv: []const []const u8,
+) error{ OutOfMemory, MakeFailed }!void {
+ if (!std.process.can_spawn) {
+ return s.fail(
+ "unable to execute the following command: host cannot spawn child processes\n{s}",
+ .{try allocPrintCmd(s.owner.allocator, opt_cwd, argv)},
+ );
+ }
+}
+
+pub fn handleChildProcessTerm(
+ s: *Step,
+ term: std.ChildProcess.Term,
+ opt_cwd: ?[]const u8,
+ argv: []const []const u8,
+) error{ MakeFailed, OutOfMemory }!void {
+ const arena = s.owner.allocator;
+ switch (term) {
+ .Exited => |code| {
+ if (code != 0) {
+ return s.fail(
+ "the following command exited with error code {d}:\n{s}",
+ .{ code, try allocPrintCmd(arena, opt_cwd, argv) },
+ );
+ }
+ },
+ .Signal, .Stopped, .Unknown => {
+ return s.fail(
+ "the following command terminated unexpectedly:\n{s}",
+ .{try allocPrintCmd(arena, opt_cwd, argv)},
+ );
+ },
+ }
+}
+
+pub fn allocPrintCmd(
+ arena: Allocator,
+ opt_cwd: ?[]const u8,
+ argv: []const []const u8,
+) Allocator.Error![]u8 {
+ return allocPrintCmd2(arena, opt_cwd, null, argv);
+}
+
+pub fn allocPrintCmd2(
+ arena: Allocator,
+ opt_cwd: ?[]const u8,
+ opt_env: ?*const std.process.EnvMap,
+ argv: []const []const u8,
+) Allocator.Error![]u8 {
+ var buf: std.ArrayListUnmanaged(u8) = .{};
+ if (opt_cwd) |cwd| try buf.writer(arena).print("cd {s} && ", .{cwd});
+ if (opt_env) |env| {
+ const process_env_map = std.process.getEnvMap(arena) catch std.process.EnvMap.init(arena);
+ var it = env.iterator();
+ while (it.next()) |entry| {
+ const key = entry.key_ptr.*;
+ const value = entry.value_ptr.*;
+ if (process_env_map.get(key)) |process_value| {
+ if (std.mem.eql(u8, value, process_value)) continue;
+ }
+ try buf.writer(arena).print("{s}={s} ", .{ key, value });
+ }
+ }
+ for (argv) |arg| {
+ try buf.writer(arena).print("{s} ", .{arg});
+ }
+ return buf.toOwnedSlice(arena);
+}
+
+pub fn cacheHit(s: *Step, man: *std.Build.Cache.Manifest) !bool {
+ s.result_cached = man.hit() catch |err| return failWithCacheError(s, man, err);
+ return s.result_cached;
+}
+
+fn failWithCacheError(s: *Step, man: *const std.Build.Cache.Manifest, err: anyerror) anyerror {
+ const i = man.failed_file_index orelse return err;
+ const pp = man.files.items[i].prefixed_path orelse return err;
+ const prefix = man.cache.prefixes()[pp.prefix].path orelse "";
+ return s.fail("{s}: {s}/{s}", .{ @errorName(err), prefix, pp.sub_path });
+}
+
+pub fn writeManifest(s: *Step, man: *std.Build.Cache.Manifest) !void {
+ if (s.test_results.isSuccess()) {
+ man.writeManifest() catch |err| {
+ try s.addError("unable to write cache manifest: {s}", .{@errorName(err)});
+ };
+ }
+}
diff --git a/lib/std/Build/TranslateCStep.zig b/lib/std/Build/TranslateCStep.zig
index fb0adfd0ae..0cfd5d85a8 100644
--- a/lib/std/Build/TranslateCStep.zig
+++ b/lib/std/Build/TranslateCStep.zig
@@ -11,7 +11,6 @@ const TranslateCStep = @This();
pub const base_id = .translate_c;
step: Step,
-builder: *std.Build,
source: std.Build.FileSource,
include_dirs: std.ArrayList([]const u8),
c_macros: std.ArrayList([]const u8),
@@ -26,15 +25,19 @@ pub const Options = struct {
optimize: std.builtin.OptimizeMode,
};
-pub fn create(builder: *std.Build, options: Options) *TranslateCStep {
- const self = builder.allocator.create(TranslateCStep) catch @panic("OOM");
- const source = options.source_file.dupe(builder);
+pub fn create(owner: *std.Build, options: Options) *TranslateCStep {
+ const self = owner.allocator.create(TranslateCStep) catch @panic("OOM");
+ const source = options.source_file.dupe(owner);
self.* = TranslateCStep{
- .step = Step.init(.translate_c, "translate-c", builder.allocator, make),
- .builder = builder,
+ .step = Step.init(.{
+ .id = .translate_c,
+ .name = "translate-c",
+ .owner = owner,
+ .makeFn = make,
+ }),
.source = source,
- .include_dirs = std.ArrayList([]const u8).init(builder.allocator),
- .c_macros = std.ArrayList([]const u8).init(builder.allocator),
+ .include_dirs = std.ArrayList([]const u8).init(owner.allocator),
+ .c_macros = std.ArrayList([]const u8).init(owner.allocator),
.out_basename = undefined,
.target = options.target,
.optimize = options.optimize,
@@ -54,7 +57,7 @@ pub const AddExecutableOptions = struct {
/// Creates a step to build an executable from the translated source.
pub fn addExecutable(self: *TranslateCStep, options: AddExecutableOptions) *CompileStep {
- return self.builder.addExecutable(.{
+ return self.step.owner.addExecutable(.{
.root_source_file = .{ .generated = &self.output_file },
.name = options.name orelse "translated_c",
.version = options.version,
@@ -65,43 +68,49 @@ pub fn addExecutable(self: *TranslateCStep, options: AddExecutableOptions) *Comp
}
pub fn addIncludeDir(self: *TranslateCStep, include_dir: []const u8) void {
- self.include_dirs.append(self.builder.dupePath(include_dir)) catch @panic("OOM");
+ self.include_dirs.append(self.step.owner.dupePath(include_dir)) catch @panic("OOM");
}
pub fn addCheckFile(self: *TranslateCStep, expected_matches: []const []const u8) *CheckFileStep {
- return CheckFileStep.create(self.builder, .{ .generated = &self.output_file }, self.builder.dupeStrings(expected_matches));
+ return CheckFileStep.create(
+ self.step.owner,
+ .{ .generated = &self.output_file },
+ .{ .expected_matches = expected_matches },
+ );
}
/// If the value is omitted, it is set to 1.
/// `name` and `value` need not live longer than the function call.
pub fn defineCMacro(self: *TranslateCStep, name: []const u8, value: ?[]const u8) void {
- const macro = std.Build.constructCMacro(self.builder.allocator, name, value);
+ const macro = std.Build.constructCMacro(self.step.owner.allocator, name, value);
self.c_macros.append(macro) catch @panic("OOM");
}
/// name_and_value looks like [name]=[value]. If the value is omitted, it is set to 1.
pub fn defineCMacroRaw(self: *TranslateCStep, name_and_value: []const u8) void {
- self.c_macros.append(self.builder.dupe(name_and_value)) catch @panic("OOM");
+ self.c_macros.append(self.step.owner.dupe(name_and_value)) catch @panic("OOM");
}
-fn make(step: *Step) !void {
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ const b = step.owner;
const self = @fieldParentPtr(TranslateCStep, "step", step);
- var argv_list = std.ArrayList([]const u8).init(self.builder.allocator);
- try argv_list.append(self.builder.zig_exe);
+ var argv_list = std.ArrayList([]const u8).init(b.allocator);
+ try argv_list.append(b.zig_exe);
try argv_list.append("translate-c");
try argv_list.append("-lc");
try argv_list.append("--enable-cache");
+ try argv_list.append("--listen=-");
if (!self.target.isNative()) {
try argv_list.append("-target");
- try argv_list.append(try self.target.zigTriple(self.builder.allocator));
+ try argv_list.append(try self.target.zigTriple(b.allocator));
}
switch (self.optimize) {
.Debug => {}, // Skip since it's the default.
- else => try argv_list.append(self.builder.fmt("-O{s}", .{@tagName(self.optimize)})),
+ else => try argv_list.append(b.fmt("-O{s}", .{@tagName(self.optimize)})),
}
for (self.include_dirs.items) |include_dir| {
@@ -114,16 +123,15 @@ fn make(step: *Step) !void {
try argv_list.append(c_macro);
}
- try argv_list.append(self.source.getPath(self.builder));
+ try argv_list.append(self.source.getPath(b));
- const output_path_nl = try self.builder.execFromStep(argv_list.items, &self.step);
- const output_path = mem.trimRight(u8, output_path_nl, "\r\n");
+ const output_path = try step.evalZigProcess(argv_list.items, prog_node);
self.out_basename = fs.path.basename(output_path);
const output_dir = fs.path.dirname(output_path).?;
self.output_file.path = try fs.path.join(
- self.builder.allocator,
+ b.allocator,
&[_][]const u8{ output_dir, self.out_basename },
);
}
diff --git a/lib/std/Build/WriteFileStep.zig b/lib/std/Build/WriteFileStep.zig
index 3a30aba190..dee79af5be 100644
--- a/lib/std/Build/WriteFileStep.zig
+++ b/lib/std/Build/WriteFileStep.zig
@@ -10,11 +10,11 @@
//! control.
step: Step,
-builder: *std.Build,
/// The elements here are pointers because we need stable pointers for the
/// GeneratedFile field.
files: std.ArrayListUnmanaged(*File),
output_source_files: std.ArrayListUnmanaged(OutputSourceFile),
+generated_directory: std.Build.GeneratedFile,
pub const base_id = .write_file;
@@ -34,24 +34,34 @@ pub const Contents = union(enum) {
copy: std.Build.FileSource,
};
-pub fn init(builder: *std.Build) WriteFileStep {
- return .{
- .builder = builder,
- .step = Step.init(.write_file, "writefile", builder.allocator, make),
+pub fn create(owner: *std.Build) *WriteFileStep {
+ const wf = owner.allocator.create(WriteFileStep) catch @panic("OOM");
+ wf.* = .{
+ .step = Step.init(.{
+ .id = .write_file,
+ .name = "WriteFile",
+ .owner = owner,
+ .makeFn = make,
+ }),
.files = .{},
.output_source_files = .{},
+ .generated_directory = .{ .step = &wf.step },
};
+ return wf;
}
pub fn add(wf: *WriteFileStep, sub_path: []const u8, bytes: []const u8) void {
- const gpa = wf.builder.allocator;
+ const b = wf.step.owner;
+ const gpa = b.allocator;
const file = gpa.create(File) catch @panic("OOM");
file.* = .{
.generated_file = .{ .step = &wf.step },
- .sub_path = wf.builder.dupePath(sub_path),
- .contents = .{ .bytes = wf.builder.dupe(bytes) },
+ .sub_path = b.dupePath(sub_path),
+ .contents = .{ .bytes = b.dupe(bytes) },
};
wf.files.append(gpa, file) catch @panic("OOM");
+
+ wf.maybeUpdateName();
}
/// Place the file into the generated directory within the local cache,
@@ -62,14 +72,18 @@ pub fn add(wf: *WriteFileStep, sub_path: []const u8, bytes: []const u8) void {
/// required sub-path exists.
/// This is the option expected to be used most commonly with `addCopyFile`.
pub fn addCopyFile(wf: *WriteFileStep, source: std.Build.FileSource, sub_path: []const u8) void {
- const gpa = wf.builder.allocator;
+ const b = wf.step.owner;
+ const gpa = b.allocator;
const file = gpa.create(File) catch @panic("OOM");
file.* = .{
.generated_file = .{ .step = &wf.step },
- .sub_path = wf.builder.dupePath(sub_path),
+ .sub_path = b.dupePath(sub_path),
.contents = .{ .copy = source },
};
wf.files.append(gpa, file) catch @panic("OOM");
+
+ wf.maybeUpdateName();
+ source.addStepDependencies(&wf.step);
}
/// A path relative to the package root.
@@ -79,10 +93,26 @@ pub fn addCopyFile(wf: *WriteFileStep, source: std.Build.FileSource, sub_path: [
/// those changes to version control.
/// A file added this way is not available with `getFileSource`.
pub fn addCopyFileToSource(wf: *WriteFileStep, source: std.Build.FileSource, sub_path: []const u8) void {
- wf.output_source_files.append(wf.builder.allocator, .{
+ const b = wf.step.owner;
+ wf.output_source_files.append(b.allocator, .{
.contents = .{ .copy = source },
.sub_path = sub_path,
}) catch @panic("OOM");
+ source.addStepDependencies(&wf.step);
+}
+
+/// A path relative to the package root.
+/// Be careful with this because it updates source files. This should not be
+/// used as part of the normal build process, but as a utility occasionally
+/// run by a developer with intent to modify source files and then commit
+/// those changes to version control.
+/// A file added this way is not available with `getFileSource`.
+pub fn addBytesToSource(wf: *WriteFileStep, bytes: []const u8, sub_path: []const u8) void {
+ const b = wf.step.owner;
+ wf.output_source_files.append(b.allocator, .{
+ .contents = .{ .bytes = bytes },
+ .sub_path = sub_path,
+ }) catch @panic("OOM");
}
/// Gets a file source for the given sub_path. If the file does not exist, returns `null`.
@@ -95,21 +125,63 @@ pub fn getFileSource(wf: *WriteFileStep, sub_path: []const u8) ?std.Build.FileSo
return null;
}
-fn make(step: *Step) !void {
+/// Returns a `FileSource` representing the base directory that contains all the
+/// files from this `WriteFileStep`.
+pub fn getDirectorySource(wf: *WriteFileStep) std.Build.FileSource {
+ return .{ .generated = &wf.generated_directory };
+}
+
+fn maybeUpdateName(wf: *WriteFileStep) void {
+ if (wf.files.items.len == 1) {
+ // First time adding a file; update name.
+ if (std.mem.eql(u8, wf.step.name, "WriteFile")) {
+ wf.step.name = wf.step.owner.fmt("WriteFile {s}", .{wf.files.items[0].sub_path});
+ }
+ }
+}
+
+fn make(step: *Step, prog_node: *std.Progress.Node) !void {
+ _ = prog_node;
+ const b = step.owner;
const wf = @fieldParentPtr(WriteFileStep, "step", step);
// Writing to source files is kind of an extra capability of this
// WriteFileStep - arguably it should be a different step. But anyway here
// it is, it happens unconditionally and does not interact with the other
// files here.
+ var any_miss = false;
for (wf.output_source_files.items) |output_source_file| {
- const basename = fs.path.basename(output_source_file.sub_path);
if (fs.path.dirname(output_source_file.sub_path)) |dirname| {
- var dir = try wf.builder.build_root.handle.makeOpenPath(dirname, .{});
- defer dir.close();
- try writeFile(wf, dir, output_source_file.contents, basename);
- } else {
- try writeFile(wf, wf.builder.build_root.handle, output_source_file.contents, basename);
+ b.build_root.handle.makePath(dirname) catch |err| {
+ return step.fail("unable to make path '{}{s}': {s}", .{
+ b.build_root, dirname, @errorName(err),
+ });
+ };
+ }
+ switch (output_source_file.contents) {
+ .bytes => |bytes| {
+ b.build_root.handle.writeFile(output_source_file.sub_path, bytes) catch |err| {
+ return step.fail("unable to write file '{}{s}': {s}", .{
+ b.build_root, output_source_file.sub_path, @errorName(err),
+ });
+ };
+ any_miss = true;
+ },
+ .copy => |file_source| {
+ const source_path = file_source.getPath(b);
+ const prev_status = fs.Dir.updateFile(
+ fs.cwd(),
+ source_path,
+ b.build_root.handle,
+ output_source_file.sub_path,
+ .{},
+ ) catch |err| {
+ return step.fail("unable to update file from '{s}' to '{}{s}': {s}", .{
+ source_path, b.build_root, output_source_file.sub_path, @errorName(err),
+ });
+ };
+ any_miss = any_miss or prev_status == .stale;
+ },
}
}
@@ -120,7 +192,7 @@ fn make(step: *Step) !void {
// If, for example, a hard-coded path was used as the location to put WriteFileStep
// files, then two WriteFileSteps executing in parallel might clobber each other.
- var man = wf.builder.cache.obtain();
+ var man = b.cache.obtain();
defer man.deinit();
// Random bytes to make WriteFileStep unique. Refresh this with
@@ -135,76 +207,82 @@ fn make(step: *Step) !void {
man.hash.addBytes(bytes);
},
.copy => |file_source| {
- _ = try man.addFile(file_source.getPath(wf.builder), null);
+ _ = try man.addFile(file_source.getPath(b), null);
},
}
}
- if (man.hit() catch |err| failWithCacheError(man, err)) {
- // Cache hit, skip writing file data.
+ if (try step.cacheHit(&man)) {
const digest = man.final();
for (wf.files.items) |file| {
- file.generated_file.path = try wf.builder.cache_root.join(
- wf.builder.allocator,
- &.{ "o", &digest, file.sub_path },
- );
+ file.generated_file.path = try b.cache_root.join(b.allocator, &.{
+ "o", &digest, file.sub_path,
+ });
}
+ wf.generated_directory.path = try b.cache_root.join(b.allocator, &.{ "o", &digest });
return;
}
const digest = man.final();
const cache_path = "o" ++ fs.path.sep_str ++ digest;
- var cache_dir = wf.builder.cache_root.handle.makeOpenPath(cache_path, .{}) catch |err| {
- std.debug.print("unable to make path {s}: {s}\n", .{ cache_path, @errorName(err) });
- return err;
+ wf.generated_directory.path = try b.cache_root.join(b.allocator, &.{ "o", &digest });
+
+ var cache_dir = b.cache_root.handle.makeOpenPath(cache_path, .{}) catch |err| {
+ return step.fail("unable to make path '{}{s}': {s}", .{
+ b.cache_root, cache_path, @errorName(err),
+ });
};
defer cache_dir.close();
for (wf.files.items) |file| {
- const basename = fs.path.basename(file.sub_path);
if (fs.path.dirname(file.sub_path)) |dirname| {
- var dir = try wf.builder.cache_root.handle.makeOpenPath(dirname, .{});
- defer dir.close();
- try writeFile(wf, dir, file.contents, basename);
- } else {
- try writeFile(wf, cache_dir, file.contents, basename);
+ cache_dir.makePath(dirname) catch |err| {
+ return step.fail("unable to make path '{}{s}{c}{s}': {s}", .{
+ b.cache_root, cache_path, fs.path.sep, dirname, @errorName(err),
+ });
+ };
+ }
+ switch (file.contents) {
+ .bytes => |bytes| {
+ cache_dir.writeFile(file.sub_path, bytes) catch |err| {
+ return step.fail("unable to write file '{}{s}{c}{s}': {s}", .{
+ b.cache_root, cache_path, fs.path.sep, file.sub_path, @errorName(err),
+ });
+ };
+ },
+ .copy => |file_source| {
+ const source_path = file_source.getPath(b);
+ const prev_status = fs.Dir.updateFile(
+ fs.cwd(),
+ source_path,
+ cache_dir,
+ file.sub_path,
+ .{},
+ ) catch |err| {
+ return step.fail("unable to update file from '{s}' to '{}{s}{c}{s}': {s}", .{
+ source_path,
+ b.cache_root,
+ cache_path,
+ fs.path.sep,
+ file.sub_path,
+ @errorName(err),
+ });
+ };
+ // At this point we already will mark the step as a cache miss.
+ // But this is kind of a partial cache hit since individual
+ // file copies may be avoided. Oh well, this information is
+ // discarded.
+ _ = prev_status;
+ },
}
- file.generated_file.path = try wf.builder.cache_root.join(
- wf.builder.allocator,
- &.{ cache_path, file.sub_path },
- );
- }
-
- try man.writeManifest();
-}
-
-fn writeFile(wf: *WriteFileStep, dir: fs.Dir, contents: Contents, basename: []const u8) !void {
- // TODO after landing concurrency PR, improve error reporting here
- switch (contents) {
- .bytes => |bytes| return dir.writeFile(basename, bytes),
- .copy => |file_source| {
- const source_path = file_source.getPath(wf.builder);
- const prev_status = try fs.Dir.updateFile(fs.cwd(), source_path, dir, basename, .{});
- _ = prev_status; // TODO logging (affected by open PR regarding concurrency)
- },
+ file.generated_file.path = try b.cache_root.join(b.allocator, &.{
+ cache_path, file.sub_path,
+ });
}
-}
-
-/// TODO consolidate this with the same function in RunStep?
-/// Also properly deal with concurrency (see open PR)
-fn failWithCacheError(man: std.Build.Cache.Manifest, err: anyerror) noreturn {
- const i = man.failed_file_index orelse failWithSimpleError(err);
- const pp = man.files.items[i].prefixed_path orelse failWithSimpleError(err);
- const prefix = man.cache.prefixes()[pp.prefix].path orelse "";
- std.debug.print("{s}: {s}/{s}\n", .{ @errorName(err), prefix, pp.sub_path });
- std.process.exit(1);
-}
-fn failWithSimpleError(err: anyerror) noreturn {
- std.debug.print("{s}\n", .{@errorName(err)});
- std.process.exit(1);
+ try step.writeManifest(&man);
}
const std = @import("../std.zig");