aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2023-02-05 19:39:04 -0700
committerAndrew Kelley <andrew@ziglang.org>2023-02-13 06:42:25 -0700
commit9cb52ca6ce7043ba0ce08d5650ac542075f10685 (patch)
tree272ce33129d65e8af6f068fb620ac5a3d33370fc /src
parent2654d0c66860d32714e33404554482cbc0cbabf5 (diff)
downloadzig-9cb52ca6ce7043ba0ce08d5650ac542075f10685.tar.gz
zig-9cb52ca6ce7043ba0ce08d5650ac542075f10685.zip
move the cache system from compiler to std lib
Diffstat (limited to 'src')
-rw-r--r--src/Cache.zig1265
-rw-r--r--src/Compilation.zig92
-rw-r--r--src/DepTokenizer.zig1069
-rw-r--r--src/Module.zig2
-rw-r--r--src/Package.zig2
-rw-r--r--src/glibc.zig2
-rw-r--r--src/link.zig2
-rw-r--r--src/link/Coff/lld.zig2
-rw-r--r--src/link/Elf.zig2
-rw-r--r--src/link/MachO.zig2
-rw-r--r--src/link/MachO/zld.zig2
-rw-r--r--src/link/Wasm.zig2
-rw-r--r--src/main.zig4
-rw-r--r--src/mingw.zig2
14 files changed, 54 insertions, 2396 deletions
diff --git a/src/Cache.zig b/src/Cache.zig
deleted file mode 100644
index 3020f8e8c6..0000000000
--- a/src/Cache.zig
+++ /dev/null
@@ -1,1265 +0,0 @@
-//! Manages `zig-cache` directories.
-//! This is not a general-purpose cache. It is designed to be fast and simple,
-//! not to withstand attacks using specially-crafted input.
-
-gpa: Allocator,
-manifest_dir: fs.Dir,
-hash: HashHelper = .{},
-/// This value is accessed from multiple threads, protected by mutex.
-recent_problematic_timestamp: i128 = 0,
-mutex: std.Thread.Mutex = .{},
-
-/// A set of strings such as the zig library directory or project source root, which
-/// are stripped from the file paths before putting into the cache. They
-/// are replaced with single-character indicators. This is not to save
-/// space but to eliminate absolute file paths. This improves portability
-/// and usefulness of the cache for advanced use cases.
-prefixes_buffer: [3]Compilation.Directory = undefined,
-prefixes_len: usize = 0,
-
-const Cache = @This();
-const std = @import("std");
-const builtin = @import("builtin");
-const crypto = std.crypto;
-const fs = std.fs;
-const assert = std.debug.assert;
-const testing = std.testing;
-const mem = std.mem;
-const fmt = std.fmt;
-const Allocator = std.mem.Allocator;
-const Compilation = @import("Compilation.zig");
-const log = std.log.scoped(.cache);
-
-pub fn addPrefix(cache: *Cache, directory: Compilation.Directory) void {
- if (directory.path) |p| {
- log.debug("Cache.addPrefix {d} {s}", .{ cache.prefixes_len, p });
- }
- cache.prefixes_buffer[cache.prefixes_len] = directory;
- cache.prefixes_len += 1;
-}
-
-/// Be sure to call `Manifest.deinit` after successful initialization.
-pub fn obtain(cache: *Cache) Manifest {
- return Manifest{
- .cache = cache,
- .hash = cache.hash,
- .manifest_file = null,
- .manifest_dirty = false,
- .hex_digest = undefined,
- };
-}
-
-pub fn prefixes(cache: *const Cache) []const Compilation.Directory {
- return cache.prefixes_buffer[0..cache.prefixes_len];
-}
-
-const PrefixedPath = struct {
- prefix: u8,
- sub_path: []u8,
-};
-
-fn findPrefix(cache: *const Cache, file_path: []const u8) !PrefixedPath {
- const gpa = cache.gpa;
- const resolved_path = try fs.path.resolve(gpa, &[_][]const u8{file_path});
- errdefer gpa.free(resolved_path);
- return findPrefixResolved(cache, resolved_path);
-}
-
-/// Takes ownership of `resolved_path` on success.
-fn findPrefixResolved(cache: *const Cache, resolved_path: []u8) !PrefixedPath {
- const gpa = cache.gpa;
- const prefixes_slice = cache.prefixes();
- var i: u8 = 1; // Start at 1 to skip over checking the null prefix.
- while (i < prefixes_slice.len) : (i += 1) {
- const p = prefixes_slice[i].path.?;
- if (mem.startsWith(u8, resolved_path, p)) {
- // +1 to skip over the path separator here
- const sub_path = try gpa.dupe(u8, resolved_path[p.len + 1 ..]);
- gpa.free(resolved_path);
- return PrefixedPath{
- .prefix = @intCast(u8, i),
- .sub_path = sub_path,
- };
- } else {
- log.debug("'{s}' does not start with '{s}'", .{ resolved_path, p });
- }
- }
-
- return PrefixedPath{
- .prefix = 0,
- .sub_path = resolved_path,
- };
-}
-
-/// This is 128 bits - Even with 2^54 cache entries, the probably of a collision would be under 10^-6
-pub const bin_digest_len = 16;
-pub const hex_digest_len = bin_digest_len * 2;
-pub const BinDigest = [bin_digest_len]u8;
-
-const manifest_file_size_max = 50 * 1024 * 1024;
-
-/// The type used for hashing file contents. Currently, this is SipHash128(1, 3), because it
-/// provides enough collision resistance for the Manifest use cases, while being one of our
-/// fastest options right now.
-pub const Hasher = crypto.auth.siphash.SipHash128(1, 3);
-
-/// Initial state, that can be copied.
-pub const hasher_init: Hasher = Hasher.init(&[_]u8{0} ** Hasher.key_length);
-
-pub const File = struct {
- prefixed_path: ?PrefixedPath,
- max_file_size: ?usize,
- stat: Stat,
- bin_digest: BinDigest,
- contents: ?[]const u8,
-
- pub const Stat = struct {
- inode: fs.File.INode,
- size: u64,
- mtime: i128,
- };
-
- pub fn deinit(self: *File, gpa: Allocator) void {
- if (self.prefixed_path) |pp| {
- gpa.free(pp.sub_path);
- self.prefixed_path = null;
- }
- if (self.contents) |contents| {
- gpa.free(contents);
- self.contents = null;
- }
- self.* = undefined;
- }
-};
-
-pub const HashHelper = struct {
- hasher: Hasher = hasher_init,
-
- const EmitLoc = Compilation.EmitLoc;
-
- /// Record a slice of bytes as an dependency of the process being cached
- pub fn addBytes(hh: *HashHelper, bytes: []const u8) void {
- hh.hasher.update(mem.asBytes(&bytes.len));
- hh.hasher.update(bytes);
- }
-
- pub fn addOptionalBytes(hh: *HashHelper, optional_bytes: ?[]const u8) void {
- hh.add(optional_bytes != null);
- hh.addBytes(optional_bytes orelse return);
- }
-
- pub fn addEmitLoc(hh: *HashHelper, emit_loc: EmitLoc) void {
- hh.addBytes(emit_loc.basename);
- }
-
- pub fn addOptionalEmitLoc(hh: *HashHelper, optional_emit_loc: ?EmitLoc) void {
- hh.add(optional_emit_loc != null);
- hh.addEmitLoc(optional_emit_loc orelse return);
- }
-
- pub fn addListOfBytes(hh: *HashHelper, list_of_bytes: []const []const u8) void {
- hh.add(list_of_bytes.len);
- for (list_of_bytes) |bytes| hh.addBytes(bytes);
- }
-
- /// Convert the input value into bytes and record it as a dependency of the process being cached.
- pub fn add(hh: *HashHelper, x: anytype) void {
- switch (@TypeOf(x)) {
- std.builtin.Version => {
- hh.add(x.major);
- hh.add(x.minor);
- hh.add(x.patch);
- },
- std.Target.Os.TaggedVersionRange => {
- switch (x) {
- .linux => |linux| {
- hh.add(linux.range.min);
- hh.add(linux.range.max);
- hh.add(linux.glibc);
- },
- .windows => |windows| {
- hh.add(windows.min);
- hh.add(windows.max);
- },
- .semver => |semver| {
- hh.add(semver.min);
- hh.add(semver.max);
- },
- .none => {},
- }
- },
- else => switch (@typeInfo(@TypeOf(x))) {
- .Bool, .Int, .Enum, .Array => hh.addBytes(mem.asBytes(&x)),
- else => @compileError("unable to hash type " ++ @typeName(@TypeOf(x))),
- },
- }
- }
-
- pub fn addOptional(hh: *HashHelper, optional: anytype) void {
- hh.add(optional != null);
- hh.add(optional orelse return);
- }
-
- /// Returns a hex encoded hash of the inputs, without modifying state.
- pub fn peek(hh: HashHelper) [hex_digest_len]u8 {
- var copy = hh;
- return copy.final();
- }
-
- pub fn peekBin(hh: HashHelper) BinDigest {
- var copy = hh;
- var bin_digest: BinDigest = undefined;
- copy.hasher.final(&bin_digest);
- return bin_digest;
- }
-
- /// Returns a hex encoded hash of the inputs, mutating the state of the hasher.
- pub fn final(hh: *HashHelper) [hex_digest_len]u8 {
- var bin_digest: BinDigest = undefined;
- hh.hasher.final(&bin_digest);
-
- var out_digest: [hex_digest_len]u8 = undefined;
- _ = std.fmt.bufPrint(
- &out_digest,
- "{s}",
- .{std.fmt.fmtSliceHexLower(&bin_digest)},
- ) catch unreachable;
- return out_digest;
- }
-};
-
-pub const Lock = struct {
- manifest_file: fs.File,
-
- pub fn release(lock: *Lock) void {
- if (builtin.os.tag == .windows) {
- // Windows does not guarantee that locks are immediately unlocked when
- // the file handle is closed. See LockFileEx documentation.
- lock.manifest_file.unlock();
- }
-
- lock.manifest_file.close();
- lock.* = undefined;
- }
-};
-
-pub const Manifest = struct {
- cache: *Cache,
- /// Current state for incremental hashing.
- hash: HashHelper,
- manifest_file: ?fs.File,
- manifest_dirty: bool,
- /// Set this flag to true before calling hit() in order to indicate that
- /// upon a cache hit, the code using the cache will not modify the files
- /// within the cache directory. This allows multiple processes to utilize
- /// the same cache directory at the same time.
- want_shared_lock: bool = true,
- have_exclusive_lock: bool = false,
- // Indicate that we want isProblematicTimestamp to perform a filesystem write in
- // order to obtain a problematic timestamp for the next call. Calls after that
- // will then use the same timestamp, to avoid unnecessary filesystem writes.
- want_refresh_timestamp: bool = true,
- files: std.ArrayListUnmanaged(File) = .{},
- hex_digest: [hex_digest_len]u8,
- /// Populated when hit() returns an error because of one
- /// of the files listed in the manifest.
- failed_file_index: ?usize = null,
- /// Keeps track of the last time we performed a file system write to observe
- /// what time the file system thinks it is, according to its own granularity.
- recent_problematic_timestamp: i128 = 0,
-
- /// Add a file as a dependency of process being cached. When `hit` is
- /// called, the file's contents will be checked to ensure that it matches
- /// the contents from previous times.
- ///
- /// Max file size will be used to determine the amount of space the file contents
- /// are allowed to take up in memory. If max_file_size is null, then the contents
- /// will not be loaded into memory.
- ///
- /// Returns the index of the entry in the `files` array list. You can use it
- /// to access the contents of the file after calling `hit()` like so:
- ///
- /// ```
- /// var file_contents = cache_hash.files.items[file_index].contents.?;
- /// ```
- pub fn addFile(self: *Manifest, file_path: []const u8, max_file_size: ?usize) !usize {
- assert(self.manifest_file == null);
-
- const gpa = self.cache.gpa;
- try self.files.ensureUnusedCapacity(gpa, 1);
- const prefixed_path = try self.cache.findPrefix(file_path);
- errdefer gpa.free(prefixed_path.sub_path);
-
- log.debug("Manifest.addFile {s} -> {d} {s}", .{
- file_path, prefixed_path.prefix, prefixed_path.sub_path,
- });
-
- self.files.addOneAssumeCapacity().* = .{
- .prefixed_path = prefixed_path,
- .contents = null,
- .max_file_size = max_file_size,
- .stat = undefined,
- .bin_digest = undefined,
- };
-
- self.hash.add(prefixed_path.prefix);
- self.hash.addBytes(prefixed_path.sub_path);
-
- return self.files.items.len - 1;
- }
-
- pub fn hashCSource(self: *Manifest, c_source: Compilation.CSourceFile) !void {
- _ = try self.addFile(c_source.src_path, null);
- // Hash the extra flags, with special care to call addFile for file parameters.
- // TODO this logic can likely be improved by utilizing clang_options_data.zig.
- const file_args = [_][]const u8{"-include"};
- var arg_i: usize = 0;
- while (arg_i < c_source.extra_flags.len) : (arg_i += 1) {
- const arg = c_source.extra_flags[arg_i];
- self.hash.addBytes(arg);
- for (file_args) |file_arg| {
- if (mem.eql(u8, file_arg, arg) and arg_i + 1 < c_source.extra_flags.len) {
- arg_i += 1;
- _ = try self.addFile(c_source.extra_flags[arg_i], null);
- }
- }
- }
- }
-
- pub fn addOptionalFile(self: *Manifest, optional_file_path: ?[]const u8) !void {
- self.hash.add(optional_file_path != null);
- const file_path = optional_file_path orelse return;
- _ = try self.addFile(file_path, null);
- }
-
- pub fn addListOfFiles(self: *Manifest, list_of_files: []const []const u8) !void {
- self.hash.add(list_of_files.len);
- for (list_of_files) |file_path| {
- _ = try self.addFile(file_path, null);
- }
- }
-
- /// Check the cache to see if the input exists in it. If it exists, returns `true`.
- /// A hex encoding of its hash is available by calling `final`.
- ///
- /// This function will also acquire an exclusive lock to the manifest file. This means
- /// that a process holding a Manifest will block any other process attempting to
- /// acquire the lock. If `want_shared_lock` is `true`, a cache hit guarantees the
- /// manifest file to be locked in shared mode, and a cache miss guarantees the manifest
- /// file to be locked in exclusive mode.
- ///
- /// The lock on the manifest file is released when `deinit` is called. As another
- /// option, one may call `toOwnedLock` to obtain a smaller object which can represent
- /// the lock. `deinit` is safe to call whether or not `toOwnedLock` has been called.
- pub fn hit(self: *Manifest) !bool {
- const gpa = self.cache.gpa;
- assert(self.manifest_file == null);
-
- self.failed_file_index = null;
-
- const ext = ".txt";
- var manifest_file_path: [self.hex_digest.len + ext.len]u8 = undefined;
-
- var bin_digest: BinDigest = undefined;
- self.hash.hasher.final(&bin_digest);
-
- _ = std.fmt.bufPrint(
- &self.hex_digest,
- "{s}",
- .{std.fmt.fmtSliceHexLower(&bin_digest)},
- ) catch unreachable;
-
- self.hash.hasher = hasher_init;
- self.hash.hasher.update(&bin_digest);
-
- mem.copy(u8, &manifest_file_path, &self.hex_digest);
- manifest_file_path[self.hex_digest.len..][0..ext.len].* = ext.*;
-
- if (self.files.items.len == 0) {
- // If there are no file inputs, we check if the manifest file exists instead of
- // comparing the hashes on the files used for the cached item
- while (true) {
- if (self.cache.manifest_dir.openFile(&manifest_file_path, .{
- .mode = .read_write,
- .lock = .Exclusive,
- .lock_nonblocking = self.want_shared_lock,
- })) |manifest_file| {
- self.manifest_file = manifest_file;
- self.have_exclusive_lock = true;
- break;
- } else |open_err| switch (open_err) {
- error.WouldBlock => {
- self.manifest_file = try self.cache.manifest_dir.openFile(&manifest_file_path, .{
- .lock = .Shared,
- });
- break;
- },
- error.FileNotFound => {
- if (self.cache.manifest_dir.createFile(&manifest_file_path, .{
- .read = true,
- .truncate = false,
- .lock = .Exclusive,
- .lock_nonblocking = self.want_shared_lock,
- })) |manifest_file| {
- self.manifest_file = manifest_file;
- self.manifest_dirty = true;
- self.have_exclusive_lock = true;
- return false; // cache miss; exclusive lock already held
- } else |err| switch (err) {
- error.WouldBlock => continue,
- else => |e| return e,
- }
- },
- else => |e| return e,
- }
- }
- } else {
- if (self.cache.manifest_dir.createFile(&manifest_file_path, .{
- .read = true,
- .truncate = false,
- .lock = .Exclusive,
- .lock_nonblocking = self.want_shared_lock,
- })) |manifest_file| {
- self.manifest_file = manifest_file;
- self.have_exclusive_lock = true;
- } else |err| switch (err) {
- error.WouldBlock => {
- self.manifest_file = try self.cache.manifest_dir.openFile(&manifest_file_path, .{
- .lock = .Shared,
- });
- },
- else => |e| return e,
- }
- }
-
- self.want_refresh_timestamp = true;
-
- const file_contents = try self.manifest_file.?.reader().readAllAlloc(gpa, manifest_file_size_max);
- defer gpa.free(file_contents);
-
- const input_file_count = self.files.items.len;
- var any_file_changed = false;
- var line_iter = mem.tokenize(u8, file_contents, "\n");
- var idx: usize = 0;
- while (line_iter.next()) |line| {
- defer idx += 1;
-
- const cache_hash_file = if (idx < input_file_count) &self.files.items[idx] else blk: {
- const new = try self.files.addOne(gpa);
- new.* = .{
- .prefixed_path = null,
- .contents = null,
- .max_file_size = null,
- .stat = undefined,
- .bin_digest = undefined,
- };
- break :blk new;
- };
-
- var iter = mem.tokenize(u8, line, " ");
- const size = iter.next() orelse return error.InvalidFormat;
- const inode = iter.next() orelse return error.InvalidFormat;
- const mtime_nsec_str = iter.next() orelse return error.InvalidFormat;
- const digest_str = iter.next() orelse return error.InvalidFormat;
- const prefix_str = iter.next() orelse return error.InvalidFormat;
- const file_path = iter.rest();
-
- cache_hash_file.stat.size = fmt.parseInt(u64, size, 10) catch return error.InvalidFormat;
- cache_hash_file.stat.inode = fmt.parseInt(fs.File.INode, inode, 10) catch return error.InvalidFormat;
- cache_hash_file.stat.mtime = fmt.parseInt(i64, mtime_nsec_str, 10) catch return error.InvalidFormat;
- _ = std.fmt.hexToBytes(&cache_hash_file.bin_digest, digest_str) catch return error.InvalidFormat;
- const prefix = fmt.parseInt(u8, prefix_str, 10) catch return error.InvalidFormat;
- if (prefix >= self.cache.prefixes_len) return error.InvalidFormat;
-
- if (file_path.len == 0) {
- return error.InvalidFormat;
- }
- if (cache_hash_file.prefixed_path) |pp| {
- if (pp.prefix != prefix or !mem.eql(u8, file_path, pp.sub_path)) {
- return error.InvalidFormat;
- }
- }
-
- if (cache_hash_file.prefixed_path == null) {
- cache_hash_file.prefixed_path = .{
- .prefix = prefix,
- .sub_path = try gpa.dupe(u8, file_path),
- };
- }
-
- const pp = cache_hash_file.prefixed_path.?;
- const dir = self.cache.prefixes()[pp.prefix].handle;
- const this_file = dir.openFile(pp.sub_path, .{ .mode = .read_only }) catch |err| switch (err) {
- error.FileNotFound => {
- try self.upgradeToExclusiveLock();
- return false;
- },
- else => return error.CacheUnavailable,
- };
- defer this_file.close();
-
- const actual_stat = this_file.stat() catch |err| {
- self.failed_file_index = idx;
- return err;
- };
- const size_match = actual_stat.size == cache_hash_file.stat.size;
- const mtime_match = actual_stat.mtime == cache_hash_file.stat.mtime;
- const inode_match = actual_stat.inode == cache_hash_file.stat.inode;
-
- if (!size_match or !mtime_match or !inode_match) {
- self.manifest_dirty = true;
-
- cache_hash_file.stat = .{
- .size = actual_stat.size,
- .mtime = actual_stat.mtime,
- .inode = actual_stat.inode,
- };
-
- if (self.isProblematicTimestamp(cache_hash_file.stat.mtime)) {
- // The actual file has an unreliable timestamp, force it to be hashed
- cache_hash_file.stat.mtime = 0;
- cache_hash_file.stat.inode = 0;
- }
-
- var actual_digest: BinDigest = undefined;
- hashFile(this_file, &actual_digest) catch |err| {
- self.failed_file_index = idx;
- return err;
- };
-
- if (!mem.eql(u8, &cache_hash_file.bin_digest, &actual_digest)) {
- cache_hash_file.bin_digest = actual_digest;
- // keep going until we have the input file digests
- any_file_changed = true;
- }
- }
-
- if (!any_file_changed) {
- self.hash.hasher.update(&cache_hash_file.bin_digest);
- }
- }
-
- if (any_file_changed) {
- // cache miss
- // keep the manifest file open
- self.unhit(bin_digest, input_file_count);
- try self.upgradeToExclusiveLock();
- return false;
- }
-
- if (idx < input_file_count) {
- self.manifest_dirty = true;
- while (idx < input_file_count) : (idx += 1) {
- const ch_file = &self.files.items[idx];
- self.populateFileHash(ch_file) catch |err| {
- self.failed_file_index = idx;
- return err;
- };
- }
- try self.upgradeToExclusiveLock();
- return false;
- }
-
- if (self.want_shared_lock) {
- try self.downgradeToSharedLock();
- }
-
- return true;
- }
-
- pub fn unhit(self: *Manifest, bin_digest: BinDigest, input_file_count: usize) void {
- // Reset the hash.
- self.hash.hasher = hasher_init;
- self.hash.hasher.update(&bin_digest);
-
- // Remove files not in the initial hash.
- for (self.files.items[input_file_count..]) |*file| {
- file.deinit(self.cache.gpa);
- }
- self.files.shrinkRetainingCapacity(input_file_count);
-
- for (self.files.items) |file| {
- self.hash.hasher.update(&file.bin_digest);
- }
- }
-
- fn isProblematicTimestamp(man: *Manifest, file_time: i128) bool {
- // If the file_time is prior to the most recent problematic timestamp
- // then we don't need to access the filesystem.
- if (file_time < man.recent_problematic_timestamp)
- return false;
-
- // Next we will check the globally shared Cache timestamp, which is accessed
- // from multiple threads.
- man.cache.mutex.lock();
- defer man.cache.mutex.unlock();
-
- // Save the global one to our local one to avoid locking next time.
- man.recent_problematic_timestamp = man.cache.recent_problematic_timestamp;
- if (file_time < man.recent_problematic_timestamp)
- return false;
-
- // This flag prevents multiple filesystem writes for the same hit() call.
- if (man.want_refresh_timestamp) {
- man.want_refresh_timestamp = false;
-
- var file = man.cache.manifest_dir.createFile("timestamp", .{
- .read = true,
- .truncate = true,
- }) catch return true;
- defer file.close();
-
- // Save locally and also save globally (we still hold the global lock).
- man.recent_problematic_timestamp = (file.stat() catch return true).mtime;
- man.cache.recent_problematic_timestamp = man.recent_problematic_timestamp;
- }
-
- return file_time >= man.recent_problematic_timestamp;
- }
-
- fn populateFileHash(self: *Manifest, ch_file: *File) !void {
- const pp = ch_file.prefixed_path.?;
- const dir = self.cache.prefixes()[pp.prefix].handle;
- const file = try dir.openFile(pp.sub_path, .{});
- defer file.close();
-
- const actual_stat = try file.stat();
- ch_file.stat = .{
- .size = actual_stat.size,
- .mtime = actual_stat.mtime,
- .inode = actual_stat.inode,
- };
-
- if (self.isProblematicTimestamp(ch_file.stat.mtime)) {
- // The actual file has an unreliable timestamp, force it to be hashed
- ch_file.stat.mtime = 0;
- ch_file.stat.inode = 0;
- }
-
- if (ch_file.max_file_size) |max_file_size| {
- if (ch_file.stat.size > max_file_size) {
- return error.FileTooBig;
- }
-
- const contents = try self.cache.gpa.alloc(u8, @intCast(usize, ch_file.stat.size));
- errdefer self.cache.gpa.free(contents);
-
- // Hash while reading from disk, to keep the contents in the cpu cache while
- // doing hashing.
- var hasher = hasher_init;
- var off: usize = 0;
- while (true) {
- // give me everything you've got, captain
- const bytes_read = try file.read(contents[off..]);
- if (bytes_read == 0) break;
- hasher.update(contents[off..][0..bytes_read]);
- off += bytes_read;
- }
- hasher.final(&ch_file.bin_digest);
-
- ch_file.contents = contents;
- } else {
- try hashFile(file, &ch_file.bin_digest);
- }
-
- self.hash.hasher.update(&ch_file.bin_digest);
- }
-
- /// Add a file as a dependency of process being cached, after the initial hash has been
- /// calculated. This is useful for processes that don't know all the files that
- /// are depended on ahead of time. For example, a source file that can import other files
- /// will need to be recompiled if the imported file is changed.
- pub fn addFilePostFetch(self: *Manifest, file_path: []const u8, max_file_size: usize) ![]const u8 {
- assert(self.manifest_file != null);
-
- const gpa = self.cache.gpa;
- const prefixed_path = try self.cache.findPrefix(file_path);
- errdefer gpa.free(prefixed_path.sub_path);
-
- log.debug("Manifest.addFilePostFetch {s} -> {d} {s}", .{
- file_path, prefixed_path.prefix, prefixed_path.sub_path,
- });
-
- const new_ch_file = try self.files.addOne(gpa);
- new_ch_file.* = .{
- .prefixed_path = prefixed_path,
- .max_file_size = max_file_size,
- .stat = undefined,
- .bin_digest = undefined,
- .contents = null,
- };
- errdefer self.files.shrinkRetainingCapacity(self.files.items.len - 1);
-
- try self.populateFileHash(new_ch_file);
-
- return new_ch_file.contents.?;
- }
-
- /// Add a file as a dependency of process being cached, after the initial hash has been
- /// calculated. This is useful for processes that don't know the all the files that
- /// are depended on ahead of time. For example, a source file that can import other files
- /// will need to be recompiled if the imported file is changed.
- pub fn addFilePost(self: *Manifest, file_path: []const u8) !void {
- assert(self.manifest_file != null);
-
- const gpa = self.cache.gpa;
- const prefixed_path = try self.cache.findPrefix(file_path);
- errdefer gpa.free(prefixed_path.sub_path);
-
- log.debug("Manifest.addFilePost {s} -> {d} {s}", .{
- file_path, prefixed_path.prefix, prefixed_path.sub_path,
- });
-
- const new_ch_file = try self.files.addOne(gpa);
- new_ch_file.* = .{
- .prefixed_path = prefixed_path,
- .max_file_size = null,
- .stat = undefined,
- .bin_digest = undefined,
- .contents = null,
- };
- errdefer self.files.shrinkRetainingCapacity(self.files.items.len - 1);
-
- try self.populateFileHash(new_ch_file);
- }
-
- /// Like `addFilePost` but when the file contents have already been loaded from disk.
- /// On success, cache takes ownership of `resolved_path`.
- pub fn addFilePostContents(
- self: *Manifest,
- resolved_path: []u8,
- bytes: []const u8,
- stat: File.Stat,
- ) error{OutOfMemory}!void {
- assert(self.manifest_file != null);
- const gpa = self.cache.gpa;
-
- const ch_file = try self.files.addOne(gpa);
- errdefer self.files.shrinkRetainingCapacity(self.files.items.len - 1);
-
- log.debug("Manifest.addFilePostContents resolved_path={s}", .{resolved_path});
-
- const prefixed_path = try self.cache.findPrefixResolved(resolved_path);
- errdefer gpa.free(prefixed_path.sub_path);
-
- log.debug("Manifest.addFilePostContents -> {d} {s}", .{
- prefixed_path.prefix, prefixed_path.sub_path,
- });
-
- ch_file.* = .{
- .prefixed_path = prefixed_path,
- .max_file_size = null,
- .stat = stat,
- .bin_digest = undefined,
- .contents = null,
- };
-
- if (self.isProblematicTimestamp(ch_file.stat.mtime)) {
- // The actual file has an unreliable timestamp, force it to be hashed
- ch_file.stat.mtime = 0;
- ch_file.stat.inode = 0;
- }
-
- {
- var hasher = hasher_init;
- hasher.update(bytes);
- hasher.final(&ch_file.bin_digest);
- }
-
- self.hash.hasher.update(&ch_file.bin_digest);
- }
-
- pub fn addDepFilePost(self: *Manifest, dir: fs.Dir, dep_file_basename: []const u8) !void {
- assert(self.manifest_file != null);
-
- const dep_file_contents = try dir.readFileAlloc(self.cache.gpa, dep_file_basename, manifest_file_size_max);
- defer self.cache.gpa.free(dep_file_contents);
-
- var error_buf = std.ArrayList(u8).init(self.cache.gpa);
- defer error_buf.deinit();
-
- var it: @import("DepTokenizer.zig") = .{ .bytes = dep_file_contents };
-
- // Skip first token: target.
- switch (it.next() orelse return) { // Empty dep file OK.
- .target, .target_must_resolve, .prereq => {},
- else => |err| {
- try err.printError(error_buf.writer());
- log.err("failed parsing {s}: {s}", .{ dep_file_basename, error_buf.items });
- return error.InvalidDepFile;
- },
- }
- // Process 0+ preqreqs.
- // Clang is invoked in single-source mode so we never get more targets.
- while (true) {
- switch (it.next() orelse return) {
- .target, .target_must_resolve => return,
- .prereq => |file_path| try self.addFilePost(file_path),
- else => |err| {
- try err.printError(error_buf.writer());
- log.err("failed parsing {s}: {s}", .{ dep_file_basename, error_buf.items });
- return error.InvalidDepFile;
- },
- }
- }
- }
-
- /// Returns a hex encoded hash of the inputs.
- pub fn final(self: *Manifest) [hex_digest_len]u8 {
- assert(self.manifest_file != null);
-
- // We don't close the manifest file yet, because we want to
- // keep it locked until the API user is done using it.
- // We also don't write out the manifest yet, because until
- // cache_release is called we still might be working on creating
- // the artifacts to cache.
-
- var bin_digest: BinDigest = undefined;
- self.hash.hasher.final(&bin_digest);
-
- var out_digest: [hex_digest_len]u8 = undefined;
- _ = std.fmt.bufPrint(
- &out_digest,
- "{s}",
- .{std.fmt.fmtSliceHexLower(&bin_digest)},
- ) catch unreachable;
-
- return out_digest;
- }
-
- /// If `want_shared_lock` is true, this function automatically downgrades the
- /// lock from exclusive to shared.
- pub fn writeManifest(self: *Manifest) !void {
- assert(self.have_exclusive_lock);
-
- const manifest_file = self.manifest_file.?;
- if (self.manifest_dirty) {
- self.manifest_dirty = false;
-
- var contents = std.ArrayList(u8).init(self.cache.gpa);
- defer contents.deinit();
-
- const writer = contents.writer();
- var encoded_digest: [hex_digest_len]u8 = undefined;
-
- for (self.files.items) |file| {
- _ = std.fmt.bufPrint(
- &encoded_digest,
- "{s}",
- .{std.fmt.fmtSliceHexLower(&file.bin_digest)},
- ) catch unreachable;
- try writer.print("{d} {d} {d} {s} {d} {s}\n", .{
- file.stat.size,
- file.stat.inode,
- file.stat.mtime,
- &encoded_digest,
- file.prefixed_path.?.prefix,
- file.prefixed_path.?.sub_path,
- });
- }
-
- try manifest_file.setEndPos(contents.items.len);
- try manifest_file.pwriteAll(contents.items, 0);
- }
-
- if (self.want_shared_lock) {
- try self.downgradeToSharedLock();
- }
- }
-
- fn downgradeToSharedLock(self: *Manifest) !void {
- if (!self.have_exclusive_lock) return;
-
- // WASI does not currently support flock, so we bypass it here.
- // TODO: If/when flock is supported on WASI, this check should be removed.
- // See https://github.com/WebAssembly/wasi-filesystem/issues/2
- if (builtin.os.tag != .wasi or std.process.can_spawn or !builtin.single_threaded) {
- const manifest_file = self.manifest_file.?;
- try manifest_file.downgradeLock();
- }
-
- self.have_exclusive_lock = false;
- }
-
- fn upgradeToExclusiveLock(self: *Manifest) !void {
- if (self.have_exclusive_lock) return;
- assert(self.manifest_file != null);
-
- // WASI does not currently support flock, so we bypass it here.
- // TODO: If/when flock is supported on WASI, this check should be removed.
- // See https://github.com/WebAssembly/wasi-filesystem/issues/2
- if (builtin.os.tag != .wasi or std.process.can_spawn or !builtin.single_threaded) {
- const manifest_file = self.manifest_file.?;
- // Here we intentionally have a period where the lock is released, in case there are
- // other processes holding a shared lock.
- manifest_file.unlock();
- try manifest_file.lock(.Exclusive);
- }
- self.have_exclusive_lock = true;
- }
-
- /// Obtain only the data needed to maintain a lock on the manifest file.
- /// The `Manifest` remains safe to deinit.
- /// Don't forget to call `writeManifest` before this!
- pub fn toOwnedLock(self: *Manifest) Lock {
- const lock: Lock = .{
- .manifest_file = self.manifest_file.?,
- };
-
- self.manifest_file = null;
- return lock;
- }
-
- /// Releases the manifest file and frees any memory the Manifest was using.
- /// `Manifest.hit` must be called first.
- /// Don't forget to call `writeManifest` before this!
- pub fn deinit(self: *Manifest) void {
- if (self.manifest_file) |file| {
- if (builtin.os.tag == .windows) {
- // See Lock.release for why this is required on Windows
- file.unlock();
- }
-
- file.close();
- }
- for (self.files.items) |*file| {
- file.deinit(self.cache.gpa);
- }
- self.files.deinit(self.cache.gpa);
- }
-};
-
-/// On operating systems that support symlinks, does a readlink. On other operating systems,
-/// uses the file contents. Windows supports symlinks but only with elevated privileges, so
-/// it is treated as not supporting symlinks.
-pub fn readSmallFile(dir: fs.Dir, sub_path: []const u8, buffer: []u8) ![]u8 {
- if (builtin.os.tag == .windows) {
- return dir.readFile(sub_path, buffer);
- } else {
- return dir.readLink(sub_path, buffer);
- }
-}
-
-/// On operating systems that support symlinks, does a symlink. On other operating systems,
-/// uses the file contents. Windows supports symlinks but only with elevated privileges, so
-/// it is treated as not supporting symlinks.
-/// `data` must be a valid UTF-8 encoded file path and 255 bytes or fewer.
-pub fn writeSmallFile(dir: fs.Dir, sub_path: []const u8, data: []const u8) !void {
- assert(data.len <= 255);
- if (builtin.os.tag == .windows) {
- return dir.writeFile(sub_path, data);
- } else {
- return dir.symLink(data, sub_path, .{});
- }
-}
-
-fn hashFile(file: fs.File, bin_digest: *[Hasher.mac_length]u8) !void {
- var buf: [1024]u8 = undefined;
-
- var hasher = hasher_init;
- while (true) {
- const bytes_read = try file.read(&buf);
- if (bytes_read == 0) break;
- hasher.update(buf[0..bytes_read]);
- }
-
- hasher.final(bin_digest);
-}
-
-// Create/Write a file, close it, then grab its stat.mtime timestamp.
-fn testGetCurrentFileTimestamp() !i128 {
- var file = try fs.cwd().createFile("test-filetimestamp.tmp", .{
- .read = true,
- .truncate = true,
- });
- defer file.close();
-
- return (try file.stat()).mtime;
-}
-
-test "cache file and then recall it" {
- if (builtin.os.tag == .wasi) {
- // https://github.com/ziglang/zig/issues/5437
- return error.SkipZigTest;
- }
-
- const cwd = fs.cwd();
-
- const temp_file = "test.txt";
- const temp_manifest_dir = "temp_manifest_dir";
-
- try cwd.writeFile(temp_file, "Hello, world!\n");
-
- // Wait for file timestamps to tick
- const initial_time = try testGetCurrentFileTimestamp();
- while ((try testGetCurrentFileTimestamp()) == initial_time) {
- std.time.sleep(1);
- }
-
- var digest1: [hex_digest_len]u8 = undefined;
- var digest2: [hex_digest_len]u8 = undefined;
-
- {
- var cache = Cache{
- .gpa = testing.allocator,
- .manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
- };
- cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
- defer cache.manifest_dir.close();
-
- {
- var ch = cache.obtain();
- defer ch.deinit();
-
- ch.hash.add(true);
- ch.hash.add(@as(u16, 1234));
- ch.hash.addBytes("1234");
- _ = try ch.addFile(temp_file, null);
-
- // There should be nothing in the cache
- try testing.expectEqual(false, try ch.hit());
-
- digest1 = ch.final();
- try ch.writeManifest();
- }
- {
- var ch = cache.obtain();
- defer ch.deinit();
-
- ch.hash.add(true);
- ch.hash.add(@as(u16, 1234));
- ch.hash.addBytes("1234");
- _ = try ch.addFile(temp_file, null);
-
- // Cache hit! We just "built" the same file
- try testing.expect(try ch.hit());
- digest2 = ch.final();
-
- try testing.expectEqual(false, ch.have_exclusive_lock);
- }
-
- try testing.expectEqual(digest1, digest2);
- }
-
- try cwd.deleteTree(temp_manifest_dir);
- try cwd.deleteFile(temp_file);
-}
-
-test "check that changing a file makes cache fail" {
- if (builtin.os.tag == .wasi) {
- // https://github.com/ziglang/zig/issues/5437
- return error.SkipZigTest;
- }
- const cwd = fs.cwd();
-
- const temp_file = "cache_hash_change_file_test.txt";
- const temp_manifest_dir = "cache_hash_change_file_manifest_dir";
- const original_temp_file_contents = "Hello, world!\n";
- const updated_temp_file_contents = "Hello, world; but updated!\n";
-
- try cwd.deleteTree(temp_manifest_dir);
- try cwd.deleteTree(temp_file);
-
- try cwd.writeFile(temp_file, original_temp_file_contents);
-
- // Wait for file timestamps to tick
- const initial_time = try testGetCurrentFileTimestamp();
- while ((try testGetCurrentFileTimestamp()) == initial_time) {
- std.time.sleep(1);
- }
-
- var digest1: [hex_digest_len]u8 = undefined;
- var digest2: [hex_digest_len]u8 = undefined;
-
- {
- var cache = Cache{
- .gpa = testing.allocator,
- .manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
- };
- cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
- defer cache.manifest_dir.close();
-
- {
- var ch = cache.obtain();
- defer ch.deinit();
-
- ch.hash.addBytes("1234");
- const temp_file_idx = try ch.addFile(temp_file, 100);
-
- // There should be nothing in the cache
- try testing.expectEqual(false, try ch.hit());
-
- try testing.expect(mem.eql(u8, original_temp_file_contents, ch.files.items[temp_file_idx].contents.?));
-
- digest1 = ch.final();
-
- try ch.writeManifest();
- }
-
- try cwd.writeFile(temp_file, updated_temp_file_contents);
-
- {
- var ch = cache.obtain();
- defer ch.deinit();
-
- ch.hash.addBytes("1234");
- const temp_file_idx = try ch.addFile(temp_file, 100);
-
- // A file that we depend on has been updated, so the cache should not contain an entry for it
- try testing.expectEqual(false, try ch.hit());
-
- // The cache system does not keep the contents of re-hashed input files.
- try testing.expect(ch.files.items[temp_file_idx].contents == null);
-
- digest2 = ch.final();
-
- try ch.writeManifest();
- }
-
- try testing.expect(!mem.eql(u8, digest1[0..], digest2[0..]));
- }
-
- try cwd.deleteTree(temp_manifest_dir);
- try cwd.deleteTree(temp_file);
-}
-
-test "no file inputs" {
- if (builtin.os.tag == .wasi) {
- // https://github.com/ziglang/zig/issues/5437
- return error.SkipZigTest;
- }
- const cwd = fs.cwd();
- const temp_manifest_dir = "no_file_inputs_manifest_dir";
- defer cwd.deleteTree(temp_manifest_dir) catch {};
-
- var digest1: [hex_digest_len]u8 = undefined;
- var digest2: [hex_digest_len]u8 = undefined;
-
- var cache = Cache{
- .gpa = testing.allocator,
- .manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
- };
- cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
- defer cache.manifest_dir.close();
-
- {
- var man = cache.obtain();
- defer man.deinit();
-
- man.hash.addBytes("1234");
-
- // There should be nothing in the cache
- try testing.expectEqual(false, try man.hit());
-
- digest1 = man.final();
-
- try man.writeManifest();
- }
- {
- var man = cache.obtain();
- defer man.deinit();
-
- man.hash.addBytes("1234");
-
- try testing.expect(try man.hit());
- digest2 = man.final();
- try testing.expectEqual(false, man.have_exclusive_lock);
- }
-
- try testing.expectEqual(digest1, digest2);
-}
-
-test "Manifest with files added after initial hash work" {
- if (builtin.os.tag == .wasi) {
- // https://github.com/ziglang/zig/issues/5437
- return error.SkipZigTest;
- }
- const cwd = fs.cwd();
-
- const temp_file1 = "cache_hash_post_file_test1.txt";
- const temp_file2 = "cache_hash_post_file_test2.txt";
- const temp_manifest_dir = "cache_hash_post_file_manifest_dir";
-
- try cwd.writeFile(temp_file1, "Hello, world!\n");
- try cwd.writeFile(temp_file2, "Hello world the second!\n");
-
- // Wait for file timestamps to tick
- const initial_time = try testGetCurrentFileTimestamp();
- while ((try testGetCurrentFileTimestamp()) == initial_time) {
- std.time.sleep(1);
- }
-
- var digest1: [hex_digest_len]u8 = undefined;
- var digest2: [hex_digest_len]u8 = undefined;
- var digest3: [hex_digest_len]u8 = undefined;
-
- {
- var cache = Cache{
- .gpa = testing.allocator,
- .manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
- };
- cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
- defer cache.manifest_dir.close();
-
- {
- var ch = cache.obtain();
- defer ch.deinit();
-
- ch.hash.addBytes("1234");
- _ = try ch.addFile(temp_file1, null);
-
- // There should be nothing in the cache
- try testing.expectEqual(false, try ch.hit());
-
- _ = try ch.addFilePost(temp_file2);
-
- digest1 = ch.final();
- try ch.writeManifest();
- }
- {
- var ch = cache.obtain();
- defer ch.deinit();
-
- ch.hash.addBytes("1234");
- _ = try ch.addFile(temp_file1, null);
-
- try testing.expect(try ch.hit());
- digest2 = ch.final();
-
- try testing.expectEqual(false, ch.have_exclusive_lock);
- }
- try testing.expect(mem.eql(u8, &digest1, &digest2));
-
- // Modify the file added after initial hash
- try cwd.writeFile(temp_file2, "Hello world the second, updated\n");
-
- // Wait for file timestamps to tick
- const initial_time2 = try testGetCurrentFileTimestamp();
- while ((try testGetCurrentFileTimestamp()) == initial_time2) {
- std.time.sleep(1);
- }
-
- {
- var ch = cache.obtain();
- defer ch.deinit();
-
- ch.hash.addBytes("1234");
- _ = try ch.addFile(temp_file1, null);
-
- // A file that we depend on has been updated, so the cache should not contain an entry for it
- try testing.expectEqual(false, try ch.hit());
-
- _ = try ch.addFilePost(temp_file2);
-
- digest3 = ch.final();
-
- try ch.writeManifest();
- }
-
- try testing.expect(!mem.eql(u8, &digest1, &digest3));
- }
-
- try cwd.deleteTree(temp_manifest_dir);
- try cwd.deleteFile(temp_file1);
- try cwd.deleteFile(temp_file2);
-}
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 18d0e46892..ea83d82109 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -26,7 +26,7 @@ const wasi_libc = @import("wasi_libc.zig");
const fatal = @import("main.zig").fatal;
const clangMain = @import("main.zig").clangMain;
const Module = @import("Module.zig");
-const Cache = @import("Cache.zig");
+const Cache = std.Build.Cache;
const translate_c = @import("translate_c.zig");
const clang = @import("clang.zig");
const c_codegen = @import("codegen/c.zig");
@@ -807,44 +807,7 @@ pub const AllErrors = struct {
}
};
-pub const Directory = struct {
- /// This field is redundant for operations that can act on the open directory handle
- /// directly, but it is needed when passing the directory to a child process.
- /// `null` means cwd.
- path: ?[]const u8,
- handle: std.fs.Dir,
-
- pub fn join(self: Directory, allocator: Allocator, paths: []const []const u8) ![]u8 {
- if (self.path) |p| {
- // TODO clean way to do this with only 1 allocation
- const part2 = try std.fs.path.join(allocator, paths);
- defer allocator.free(part2);
- return std.fs.path.join(allocator, &[_][]const u8{ p, part2 });
- } else {
- return std.fs.path.join(allocator, paths);
- }
- }
-
- pub fn joinZ(self: Directory, allocator: Allocator, paths: []const []const u8) ![:0]u8 {
- if (self.path) |p| {
- // TODO clean way to do this with only 1 allocation
- const part2 = try std.fs.path.join(allocator, paths);
- defer allocator.free(part2);
- return std.fs.path.joinZ(allocator, &[_][]const u8{ p, part2 });
- } else {
- return std.fs.path.joinZ(allocator, paths);
- }
- }
-
- /// Whether or not the handle should be closed, or the path should be freed
- /// is determined by usage, however this function is provided for convenience
- /// if it happens to be what the caller needs.
- pub fn closeAndFree(self: *Directory, gpa: Allocator) void {
- self.handle.close();
- if (self.path) |p| gpa.free(p);
- self.* = undefined;
- }
-};
+pub const Directory = Cache.Directory;
pub const EmitLoc = struct {
/// If this is `null` it means the file will be output to the cache directory.
@@ -854,6 +817,35 @@ pub const EmitLoc = struct {
basename: []const u8,
};
+pub const cache_helpers = struct {
+ pub fn addEmitLoc(hh: *Cache.HashHelper, emit_loc: EmitLoc) void {
+ hh.addBytes(emit_loc.basename);
+ }
+
+ pub fn addOptionalEmitLoc(hh: *Cache.HashHelper, optional_emit_loc: ?EmitLoc) void {
+ hh.add(optional_emit_loc != null);
+ addEmitLoc(hh, optional_emit_loc orelse return);
+ }
+
+ pub fn hashCSource(self: *Cache.Manifest, c_source: Compilation.CSourceFile) !void {
+ _ = try self.addFile(c_source.src_path, null);
+ // Hash the extra flags, with special care to call addFile for file parameters.
+ // TODO this logic can likely be improved by utilizing clang_options_data.zig.
+ const file_args = [_][]const u8{"-include"};
+ var arg_i: usize = 0;
+ while (arg_i < c_source.extra_flags.len) : (arg_i += 1) {
+ const arg = c_source.extra_flags[arg_i];
+ self.hash.addBytes(arg);
+ for (file_args) |file_arg| {
+ if (mem.eql(u8, file_arg, arg) and arg_i + 1 < c_source.extra_flags.len) {
+ arg_i += 1;
+ _ = try self.addFile(c_source.extra_flags[arg_i], null);
+ }
+ }
+ }
+ }
+};
+
pub const ClangPreprocessorMode = enum {
no,
/// This means we are doing `zig cc -E -o <path>`.
@@ -1522,8 +1514,8 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
cache.hash.add(link_libunwind);
cache.hash.add(options.output_mode);
cache.hash.add(options.machine_code_model);
- cache.hash.addOptionalEmitLoc(options.emit_bin);
- cache.hash.addOptionalEmitLoc(options.emit_implib);
+ cache_helpers.addOptionalEmitLoc(&cache.hash, options.emit_bin);
+ cache_helpers.addOptionalEmitLoc(&cache.hash, options.emit_implib);
cache.hash.addBytes(options.root_name);
if (options.target.os.tag == .wasi) cache.hash.add(wasi_exec_model);
// TODO audit this and make sure everything is in it
@@ -2636,11 +2628,11 @@ fn addNonIncrementalStuffToCacheManifest(comp: *Compilation, man: *Cache.Manifes
man.hash.addListOfBytes(key.src.extra_flags);
}
- man.hash.addOptionalEmitLoc(comp.emit_asm);
- man.hash.addOptionalEmitLoc(comp.emit_llvm_ir);
- man.hash.addOptionalEmitLoc(comp.emit_llvm_bc);
- man.hash.addOptionalEmitLoc(comp.emit_analysis);
- man.hash.addOptionalEmitLoc(comp.emit_docs);
+ cache_helpers.addOptionalEmitLoc(&man.hash, comp.emit_asm);
+ cache_helpers.addOptionalEmitLoc(&man.hash, comp.emit_llvm_ir);
+ cache_helpers.addOptionalEmitLoc(&man.hash, comp.emit_llvm_bc);
+ cache_helpers.addOptionalEmitLoc(&man.hash, comp.emit_analysis);
+ cache_helpers.addOptionalEmitLoc(&man.hash, comp.emit_docs);
man.hash.addListOfBytes(comp.clang_argv);
@@ -3959,11 +3951,11 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
defer man.deinit();
man.hash.add(comp.clang_preprocessor_mode);
- man.hash.addOptionalEmitLoc(comp.emit_asm);
- man.hash.addOptionalEmitLoc(comp.emit_llvm_ir);
- man.hash.addOptionalEmitLoc(comp.emit_llvm_bc);
+ cache_helpers.addOptionalEmitLoc(&man.hash, comp.emit_asm);
+ cache_helpers.addOptionalEmitLoc(&man.hash, comp.emit_llvm_ir);
+ cache_helpers.addOptionalEmitLoc(&man.hash, comp.emit_llvm_bc);
- try man.hashCSource(c_object.src);
+ try cache_helpers.hashCSource(&man, c_object.src);
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
diff --git a/src/DepTokenizer.zig b/src/DepTokenizer.zig
deleted file mode 100644
index 8f9f2f81cd..0000000000
--- a/src/DepTokenizer.zig
+++ /dev/null
@@ -1,1069 +0,0 @@
-const Tokenizer = @This();
-
-index: usize = 0,
-bytes: []const u8,
-state: State = .lhs,
-
-const std = @import("std");
-const testing = std.testing;
-const assert = std.debug.assert;
-
-pub fn next(self: *Tokenizer) ?Token {
- var start = self.index;
- var must_resolve = false;
- while (self.index < self.bytes.len) {
- const char = self.bytes[self.index];
- switch (self.state) {
- .lhs => switch (char) {
- '\t', '\n', '\r', ' ' => {
- // silently ignore whitespace
- self.index += 1;
- },
- else => {
- start = self.index;
- self.state = .target;
- },
- },
- .target => switch (char) {
- '\t', '\n', '\r', ' ' => {
- return errorIllegalChar(.invalid_target, self.index, char);
- },
- '$' => {
- self.state = .target_dollar_sign;
- self.index += 1;
- },
- '\\' => {
- self.state = .target_reverse_solidus;
- self.index += 1;
- },
- ':' => {
- self.state = .target_colon;
- self.index += 1;
- },
- else => {
- self.index += 1;
- },
- },
- .target_reverse_solidus => switch (char) {
- '\t', '\n', '\r' => {
- return errorIllegalChar(.bad_target_escape, self.index, char);
- },
- ' ', '#', '\\' => {
- must_resolve = true;
- self.state = .target;
- self.index += 1;
- },
- '$' => {
- self.state = .target_dollar_sign;
- self.index += 1;
- },
- else => {
- self.state = .target;
- self.index += 1;
- },
- },
- .target_dollar_sign => switch (char) {
- '$' => {
- must_resolve = true;
- self.state = .target;
- self.index += 1;
- },
- else => {
- return errorIllegalChar(.expected_dollar_sign, self.index, char);
- },
- },
- .target_colon => switch (char) {
- '\n', '\r' => {
- const bytes = self.bytes[start .. self.index - 1];
- if (bytes.len != 0) {
- self.state = .lhs;
- return finishTarget(must_resolve, bytes);
- }
- // silently ignore null target
- self.state = .lhs;
- },
- '/', '\\' => {
- self.state = .target_colon_reverse_solidus;
- self.index += 1;
- },
- else => {
- const bytes = self.bytes[start .. self.index - 1];
- if (bytes.len != 0) {
- self.state = .rhs;
- return finishTarget(must_resolve, bytes);
- }
- // silently ignore null target
- self.state = .lhs;
- },
- },
- .target_colon_reverse_solidus => switch (char) {
- '\n', '\r' => {
- const bytes = self.bytes[start .. self.index - 2];
- if (bytes.len != 0) {
- self.state = .lhs;
- return finishTarget(must_resolve, bytes);
- }
- // silently ignore null target
- self.state = .lhs;
- },
- else => {
- self.state = .target;
- },
- },
- .rhs => switch (char) {
- '\t', ' ' => {
- // silently ignore horizontal whitespace
- self.index += 1;
- },
- '\n', '\r' => {
- self.state = .lhs;
- },
- '\\' => {
- self.state = .rhs_continuation;
- self.index += 1;
- },
- '"' => {
- self.state = .prereq_quote;
- self.index += 1;
- start = self.index;
- },
- else => {
- start = self.index;
- self.state = .prereq;
- },
- },
- .rhs_continuation => switch (char) {
- '\n' => {
- self.state = .rhs;
- self.index += 1;
- },
- '\r' => {
- self.state = .rhs_continuation_linefeed;
- self.index += 1;
- },
- else => {
- return errorIllegalChar(.continuation_eol, self.index, char);
- },
- },
- .rhs_continuation_linefeed => switch (char) {
- '\n' => {
- self.state = .rhs;
- self.index += 1;
- },
- else => {
- return errorIllegalChar(.continuation_eol, self.index, char);
- },
- },
- .prereq_quote => switch (char) {
- '"' => {
- self.index += 1;
- self.state = .rhs;
- return Token{ .prereq = self.bytes[start .. self.index - 1] };
- },
- else => {
- self.index += 1;
- },
- },
- .prereq => switch (char) {
- '\t', ' ' => {
- self.state = .rhs;
- return Token{ .prereq = self.bytes[start..self.index] };
- },
- '\n', '\r' => {
- self.state = .lhs;
- return Token{ .prereq = self.bytes[start..self.index] };
- },
- '\\' => {
- self.state = .prereq_continuation;
- self.index += 1;
- },
- else => {
- self.index += 1;
- },
- },
- .prereq_continuation => switch (char) {
- '\n' => {
- self.index += 1;
- self.state = .rhs;
- return Token{ .prereq = self.bytes[start .. self.index - 2] };
- },
- '\r' => {
- self.state = .prereq_continuation_linefeed;
- self.index += 1;
- },
- else => {
- // not continuation
- self.state = .prereq;
- self.index += 1;
- },
- },
- .prereq_continuation_linefeed => switch (char) {
- '\n' => {
- self.index += 1;
- self.state = .rhs;
- return Token{ .prereq = self.bytes[start .. self.index - 1] };
- },
- else => {
- return errorIllegalChar(.continuation_eol, self.index, char);
- },
- },
- }
- } else {
- switch (self.state) {
- .lhs,
- .rhs,
- .rhs_continuation,
- .rhs_continuation_linefeed,
- => return null,
- .target => {
- return errorPosition(.incomplete_target, start, self.bytes[start..]);
- },
- .target_reverse_solidus,
- .target_dollar_sign,
- => {
- const idx = self.index - 1;
- return errorIllegalChar(.incomplete_escape, idx, self.bytes[idx]);
- },
- .target_colon => {
- const bytes = self.bytes[start .. self.index - 1];
- if (bytes.len != 0) {
- self.index += 1;
- self.state = .rhs;
- return finishTarget(must_resolve, bytes);
- }
- // silently ignore null target
- self.state = .lhs;
- return null;
- },
- .target_colon_reverse_solidus => {
- const bytes = self.bytes[start .. self.index - 2];
- if (bytes.len != 0) {
- self.index += 1;
- self.state = .rhs;
- return finishTarget(must_resolve, bytes);
- }
- // silently ignore null target
- self.state = .lhs;
- return null;
- },
- .prereq_quote => {
- return errorPosition(.incomplete_quoted_prerequisite, start, self.bytes[start..]);
- },
- .prereq => {
- self.state = .lhs;
- return Token{ .prereq = self.bytes[start..] };
- },
- .prereq_continuation => {
- self.state = .lhs;
- return Token{ .prereq = self.bytes[start .. self.index - 1] };
- },
- .prereq_continuation_linefeed => {
- self.state = .lhs;
- return Token{ .prereq = self.bytes[start .. self.index - 2] };
- },
- }
- }
- unreachable;
-}
-
-fn errorPosition(comptime id: std.meta.Tag(Token), index: usize, bytes: []const u8) Token {
- return @unionInit(Token, @tagName(id), .{ .index = index, .bytes = bytes });
-}
-
-fn errorIllegalChar(comptime id: std.meta.Tag(Token), index: usize, char: u8) Token {
- return @unionInit(Token, @tagName(id), .{ .index = index, .char = char });
-}
-
-fn finishTarget(must_resolve: bool, bytes: []const u8) Token {
- return if (must_resolve) .{ .target_must_resolve = bytes } else .{ .target = bytes };
-}
-
-const State = enum {
- lhs,
- target,
- target_reverse_solidus,
- target_dollar_sign,
- target_colon,
- target_colon_reverse_solidus,
- rhs,
- rhs_continuation,
- rhs_continuation_linefeed,
- prereq_quote,
- prereq,
- prereq_continuation,
- prereq_continuation_linefeed,
-};
-
-pub const Token = union(enum) {
- target: []const u8,
- target_must_resolve: []const u8,
- prereq: []const u8,
-
- incomplete_quoted_prerequisite: IndexAndBytes,
- incomplete_target: IndexAndBytes,
-
- invalid_target: IndexAndChar,
- bad_target_escape: IndexAndChar,
- expected_dollar_sign: IndexAndChar,
- continuation_eol: IndexAndChar,
- incomplete_escape: IndexAndChar,
-
- pub const IndexAndChar = struct {
- index: usize,
- char: u8,
- };
-
- pub const IndexAndBytes = struct {
- index: usize,
- bytes: []const u8,
- };
-
- /// Resolve escapes in target. Only valid with .target_must_resolve.
- pub fn resolve(self: Token, writer: anytype) @TypeOf(writer).Error!void {
- const bytes = self.target_must_resolve; // resolve called on incorrect token
-
- var state: enum { start, escape, dollar } = .start;
- for (bytes) |c| {
- switch (state) {
- .start => {
- switch (c) {
- '\\' => state = .escape,
- '$' => state = .dollar,
- else => try writer.writeByte(c),
- }
- },
- .escape => {
- switch (c) {
- ' ', '#', '\\' => {},
- '$' => {
- try writer.writeByte('\\');
- state = .dollar;
- continue;
- },
- else => try writer.writeByte('\\'),
- }
- try writer.writeByte(c);
- state = .start;
- },
- .dollar => {
- try writer.writeByte('$');
- switch (c) {
- '$' => {},
- else => try writer.writeByte(c),
- }
- state = .start;
- },
- }
- }
- }
-
- pub fn printError(self: Token, writer: anytype) @TypeOf(writer).Error!void {
- switch (self) {
- .target, .target_must_resolve, .prereq => unreachable, // not an error
- .incomplete_quoted_prerequisite,
- .incomplete_target,
- => |index_and_bytes| {
- try writer.print("{s} '", .{self.errStr()});
- if (self == .incomplete_target) {
- const tmp = Token{ .target_must_resolve = index_and_bytes.bytes };
- try tmp.resolve(writer);
- } else {
- try printCharValues(writer, index_and_bytes.bytes);
- }
- try writer.print("' at position {d}", .{index_and_bytes.index});
- },
- .invalid_target,
- .bad_target_escape,
- .expected_dollar_sign,
- .continuation_eol,
- .incomplete_escape,
- => |index_and_char| {
- try writer.writeAll("illegal char ");
- try printUnderstandableChar(writer, index_and_char.char);
- try writer.print(" at position {d}: {s}", .{ index_and_char.index, self.errStr() });
- },
- }
- }
-
- fn errStr(self: Token) []const u8 {
- return switch (self) {
- .target, .target_must_resolve, .prereq => unreachable, // not an error
- .incomplete_quoted_prerequisite => "incomplete quoted prerequisite",
- .incomplete_target => "incomplete target",
- .invalid_target => "invalid target",
- .bad_target_escape => "bad target escape",
- .expected_dollar_sign => "expecting '$'",
- .continuation_eol => "continuation expecting end-of-line",
- .incomplete_escape => "incomplete escape",
- };
- }
-};
-
-test "empty file" {
- try depTokenizer("", "");
-}
-
-test "empty whitespace" {
- try depTokenizer("\n", "");
- try depTokenizer("\r", "");
- try depTokenizer("\r\n", "");
- try depTokenizer(" ", "");
-}
-
-test "empty colon" {
- try depTokenizer(":", "");
- try depTokenizer("\n:", "");
- try depTokenizer("\r:", "");
- try depTokenizer("\r\n:", "");
- try depTokenizer(" :", "");
-}
-
-test "empty target" {
- try depTokenizer("foo.o:", "target = {foo.o}");
- try depTokenizer(
- \\foo.o:
- \\bar.o:
- \\abcd.o:
- ,
- \\target = {foo.o}
- \\target = {bar.o}
- \\target = {abcd.o}
- );
-}
-
-test "whitespace empty target" {
- try depTokenizer("\nfoo.o:", "target = {foo.o}");
- try depTokenizer("\rfoo.o:", "target = {foo.o}");
- try depTokenizer("\r\nfoo.o:", "target = {foo.o}");
- try depTokenizer(" foo.o:", "target = {foo.o}");
-}
-
-test "escape empty target" {
- try depTokenizer("\\ foo.o:", "target = { foo.o}");
- try depTokenizer("\\#foo.o:", "target = {#foo.o}");
- try depTokenizer("\\\\foo.o:", "target = {\\foo.o}");
- try depTokenizer("$$foo.o:", "target = {$foo.o}");
-}
-
-test "empty target linefeeds" {
- try depTokenizer("\n", "");
- try depTokenizer("\r\n", "");
-
- const expect = "target = {foo.o}";
- try depTokenizer(
- \\foo.o:
- , expect);
- try depTokenizer(
- \\foo.o:
- \\
- , expect);
- try depTokenizer(
- \\foo.o:
- , expect);
- try depTokenizer(
- \\foo.o:
- \\
- , expect);
-}
-
-test "empty target linefeeds + continuations" {
- const expect = "target = {foo.o}";
- try depTokenizer(
- \\foo.o:\
- , expect);
- try depTokenizer(
- \\foo.o:\
- \\
- , expect);
- try depTokenizer(
- \\foo.o:\
- , expect);
- try depTokenizer(
- \\foo.o:\
- \\
- , expect);
-}
-
-test "empty target linefeeds + hspace + continuations" {
- const expect = "target = {foo.o}";
- try depTokenizer(
- \\foo.o: \
- , expect);
- try depTokenizer(
- \\foo.o: \
- \\
- , expect);
- try depTokenizer(
- \\foo.o: \
- , expect);
- try depTokenizer(
- \\foo.o: \
- \\
- , expect);
-}
-
-test "prereq" {
- const expect =
- \\target = {foo.o}
- \\prereq = {foo.c}
- ;
- try depTokenizer("foo.o: foo.c", expect);
- try depTokenizer(
- \\foo.o: \
- \\foo.c
- , expect);
- try depTokenizer(
- \\foo.o: \
- \\ foo.c
- , expect);
- try depTokenizer(
- \\foo.o: \
- \\ foo.c
- , expect);
-}
-
-test "prereq continuation" {
- const expect =
- \\target = {foo.o}
- \\prereq = {foo.h}
- \\prereq = {bar.h}
- ;
- try depTokenizer(
- \\foo.o: foo.h\
- \\bar.h
- , expect);
- try depTokenizer(
- \\foo.o: foo.h\
- \\bar.h
- , expect);
-}
-
-test "multiple prereqs" {
- const expect =
- \\target = {foo.o}
- \\prereq = {foo.c}
- \\prereq = {foo.h}
- \\prereq = {bar.h}
- ;
- try depTokenizer("foo.o: foo.c foo.h bar.h", expect);
- try depTokenizer(
- \\foo.o: \
- \\foo.c foo.h bar.h
- , expect);
- try depTokenizer(
- \\foo.o: foo.c foo.h bar.h\
- , expect);
- try depTokenizer(
- \\foo.o: foo.c foo.h bar.h\
- \\
- , expect);
- try depTokenizer(
- \\foo.o: \
- \\foo.c \
- \\ foo.h\
- \\bar.h
- \\
- , expect);
- try depTokenizer(
- \\foo.o: \
- \\foo.c \
- \\ foo.h\
- \\bar.h\
- \\
- , expect);
- try depTokenizer(
- \\foo.o: \
- \\foo.c \
- \\ foo.h\
- \\bar.h\
- , expect);
-}
-
-test "multiple targets and prereqs" {
- try depTokenizer(
- \\foo.o: foo.c
- \\bar.o: bar.c a.h b.h c.h
- \\abc.o: abc.c \
- \\ one.h two.h \
- \\ three.h four.h
- ,
- \\target = {foo.o}
- \\prereq = {foo.c}
- \\target = {bar.o}
- \\prereq = {bar.c}
- \\prereq = {a.h}
- \\prereq = {b.h}
- \\prereq = {c.h}
- \\target = {abc.o}
- \\prereq = {abc.c}
- \\prereq = {one.h}
- \\prereq = {two.h}
- \\prereq = {three.h}
- \\prereq = {four.h}
- );
- try depTokenizer(
- \\ascii.o: ascii.c
- \\base64.o: base64.c stdio.h
- \\elf.o: elf.c a.h b.h c.h
- \\macho.o: \
- \\ macho.c\
- \\ a.h b.h c.h
- ,
- \\target = {ascii.o}
- \\prereq = {ascii.c}
- \\target = {base64.o}
- \\prereq = {base64.c}
- \\prereq = {stdio.h}
- \\target = {elf.o}
- \\prereq = {elf.c}
- \\prereq = {a.h}
- \\prereq = {b.h}
- \\prereq = {c.h}
- \\target = {macho.o}
- \\prereq = {macho.c}
- \\prereq = {a.h}
- \\prereq = {b.h}
- \\prereq = {c.h}
- );
- try depTokenizer(
- \\a$$scii.o: ascii.c
- \\\\base64.o: "\base64.c" "s t#dio.h"
- \\e\\lf.o: "e\lf.c" "a.h$$" "$$b.h c.h$$"
- \\macho.o: \
- \\ "macho!.c" \
- \\ a.h b.h c.h
- ,
- \\target = {a$scii.o}
- \\prereq = {ascii.c}
- \\target = {\base64.o}
- \\prereq = {\base64.c}
- \\prereq = {s t#dio.h}
- \\target = {e\lf.o}
- \\prereq = {e\lf.c}
- \\prereq = {a.h$$}
- \\prereq = {$$b.h c.h$$}
- \\target = {macho.o}
- \\prereq = {macho!.c}
- \\prereq = {a.h}
- \\prereq = {b.h}
- \\prereq = {c.h}
- );
-}
-
-test "windows quoted prereqs" {
- try depTokenizer(
- \\c:\foo.o: "C:\Program Files (x86)\Microsoft Visual Studio\foo.c"
- \\c:\foo2.o: "C:\Program Files (x86)\Microsoft Visual Studio\foo2.c" \
- \\ "C:\Program Files (x86)\Microsoft Visual Studio\foo1.h" \
- \\ "C:\Program Files (x86)\Microsoft Visual Studio\foo2.h"
- ,
- \\target = {c:\foo.o}
- \\prereq = {C:\Program Files (x86)\Microsoft Visual Studio\foo.c}
- \\target = {c:\foo2.o}
- \\prereq = {C:\Program Files (x86)\Microsoft Visual Studio\foo2.c}
- \\prereq = {C:\Program Files (x86)\Microsoft Visual Studio\foo1.h}
- \\prereq = {C:\Program Files (x86)\Microsoft Visual Studio\foo2.h}
- );
-}
-
-test "windows mixed prereqs" {
- try depTokenizer(
- \\cimport.o: \
- \\ C:\msys64\home\anon\project\zig\master\zig-cache\o\qhvhbUo7GU5iKyQ5mpA8TcQpncCYaQu0wwvr3ybiSTj_Dtqi1Nmcb70kfODJ2Qlg\cimport.h \
- \\ "C:\Program Files (x86)\Windows Kits\10\\Include\10.0.17763.0\ucrt\stdio.h" \
- \\ "C:\Program Files (x86)\Windows Kits\10\\Include\10.0.17763.0\ucrt\corecrt.h" \
- \\ "C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.21.27702\lib\x64\\..\..\include\vcruntime.h" \
- \\ "C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.21.27702\lib\x64\\..\..\include\sal.h" \
- \\ "C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.21.27702\lib\x64\\..\..\include\concurrencysal.h" \
- \\ C:\msys64\opt\zig\lib\zig\include\vadefs.h \
- \\ "C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.21.27702\lib\x64\\..\..\include\vadefs.h" \
- \\ "C:\Program Files (x86)\Windows Kits\10\\Include\10.0.17763.0\ucrt\corecrt_wstdio.h" \
- \\ "C:\Program Files (x86)\Windows Kits\10\\Include\10.0.17763.0\ucrt\corecrt_stdio_config.h" \
- \\ "C:\Program Files (x86)\Windows Kits\10\\Include\10.0.17763.0\ucrt\string.h" \
- \\ "C:\Program Files (x86)\Windows Kits\10\\Include\10.0.17763.0\ucrt\corecrt_memory.h" \
- \\ "C:\Program Files (x86)\Windows Kits\10\\Include\10.0.17763.0\ucrt\corecrt_memcpy_s.h" \
- \\ "C:\Program Files (x86)\Windows Kits\10\\Include\10.0.17763.0\ucrt\errno.h" \
- \\ "C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.21.27702\lib\x64\\..\..\include\vcruntime_string.h" \
- \\ "C:\Program Files (x86)\Windows Kits\10\\Include\10.0.17763.0\ucrt\corecrt_wstring.h"
- ,
- \\target = {cimport.o}
- \\prereq = {C:\msys64\home\anon\project\zig\master\zig-cache\o\qhvhbUo7GU5iKyQ5mpA8TcQpncCYaQu0wwvr3ybiSTj_Dtqi1Nmcb70kfODJ2Qlg\cimport.h}
- \\prereq = {C:\Program Files (x86)\Windows Kits\10\\Include\10.0.17763.0\ucrt\stdio.h}
- \\prereq = {C:\Program Files (x86)\Windows Kits\10\\Include\10.0.17763.0\ucrt\corecrt.h}
- \\prereq = {C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.21.27702\lib\x64\\..\..\include\vcruntime.h}
- \\prereq = {C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.21.27702\lib\x64\\..\..\include\sal.h}
- \\prereq = {C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.21.27702\lib\x64\\..\..\include\concurrencysal.h}
- \\prereq = {C:\msys64\opt\zig\lib\zig\include\vadefs.h}
- \\prereq = {C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.21.27702\lib\x64\\..\..\include\vadefs.h}
- \\prereq = {C:\Program Files (x86)\Windows Kits\10\\Include\10.0.17763.0\ucrt\corecrt_wstdio.h}
- \\prereq = {C:\Program Files (x86)\Windows Kits\10\\Include\10.0.17763.0\ucrt\corecrt_stdio_config.h}
- \\prereq = {C:\Program Files (x86)\Windows Kits\10\\Include\10.0.17763.0\ucrt\string.h}
- \\prereq = {C:\Program Files (x86)\Windows Kits\10\\Include\10.0.17763.0\ucrt\corecrt_memory.h}
- \\prereq = {C:\Program Files (x86)\Windows Kits\10\\Include\10.0.17763.0\ucrt\corecrt_memcpy_s.h}
- \\prereq = {C:\Program Files (x86)\Windows Kits\10\\Include\10.0.17763.0\ucrt\errno.h}
- \\prereq = {C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.21.27702\lib\x64\\..\..\include\vcruntime_string.h}
- \\prereq = {C:\Program Files (x86)\Windows Kits\10\\Include\10.0.17763.0\ucrt\corecrt_wstring.h}
- );
-}
-
-test "windows funky targets" {
- try depTokenizer(
- \\C:\Users\anon\foo.o:
- \\C:\Users\anon\foo\ .o:
- \\C:\Users\anon\foo\#.o:
- \\C:\Users\anon\foo$$.o:
- \\C:\Users\anon\\\ foo.o:
- \\C:\Users\anon\\#foo.o:
- \\C:\Users\anon\$$foo.o:
- \\C:\Users\anon\\\ \ \ \ \ foo.o:
- ,
- \\target = {C:\Users\anon\foo.o}
- \\target = {C:\Users\anon\foo .o}
- \\target = {C:\Users\anon\foo#.o}
- \\target = {C:\Users\anon\foo$.o}
- \\target = {C:\Users\anon\ foo.o}
- \\target = {C:\Users\anon\#foo.o}
- \\target = {C:\Users\anon\$foo.o}
- \\target = {C:\Users\anon\ foo.o}
- );
-}
-
-test "windows drive and forward slashes" {
- try depTokenizer(
- \\C:/msys64/what/zig-cache\tmp\48ac4d78dd531abd-cxa_thread_atexit.obj: \
- \\ C:/msys64/opt/zig3/lib/zig/libc/mingw/crt/cxa_thread_atexit.c
- ,
- \\target = {C:/msys64/what/zig-cache\tmp\48ac4d78dd531abd-cxa_thread_atexit.obj}
- \\prereq = {C:/msys64/opt/zig3/lib/zig/libc/mingw/crt/cxa_thread_atexit.c}
- );
-}
-
-test "error incomplete escape - reverse_solidus" {
- try depTokenizer("\\",
- \\ERROR: illegal char '\' at position 0: incomplete escape
- );
- try depTokenizer("\t\\",
- \\ERROR: illegal char '\' at position 1: incomplete escape
- );
- try depTokenizer("\n\\",
- \\ERROR: illegal char '\' at position 1: incomplete escape
- );
- try depTokenizer("\r\\",
- \\ERROR: illegal char '\' at position 1: incomplete escape
- );
- try depTokenizer("\r\n\\",
- \\ERROR: illegal char '\' at position 2: incomplete escape
- );
- try depTokenizer(" \\",
- \\ERROR: illegal char '\' at position 1: incomplete escape
- );
-}
-
-test "error incomplete escape - dollar_sign" {
- try depTokenizer("$",
- \\ERROR: illegal char '$' at position 0: incomplete escape
- );
- try depTokenizer("\t$",
- \\ERROR: illegal char '$' at position 1: incomplete escape
- );
- try depTokenizer("\n$",
- \\ERROR: illegal char '$' at position 1: incomplete escape
- );
- try depTokenizer("\r$",
- \\ERROR: illegal char '$' at position 1: incomplete escape
- );
- try depTokenizer("\r\n$",
- \\ERROR: illegal char '$' at position 2: incomplete escape
- );
- try depTokenizer(" $",
- \\ERROR: illegal char '$' at position 1: incomplete escape
- );
-}
-
-test "error incomplete target" {
- try depTokenizer("foo.o",
- \\ERROR: incomplete target 'foo.o' at position 0
- );
- try depTokenizer("\tfoo.o",
- \\ERROR: incomplete target 'foo.o' at position 1
- );
- try depTokenizer("\nfoo.o",
- \\ERROR: incomplete target 'foo.o' at position 1
- );
- try depTokenizer("\rfoo.o",
- \\ERROR: incomplete target 'foo.o' at position 1
- );
- try depTokenizer("\r\nfoo.o",
- \\ERROR: incomplete target 'foo.o' at position 2
- );
- try depTokenizer(" foo.o",
- \\ERROR: incomplete target 'foo.o' at position 1
- );
-
- try depTokenizer("\\ foo.o",
- \\ERROR: incomplete target ' foo.o' at position 0
- );
- try depTokenizer("\\#foo.o",
- \\ERROR: incomplete target '#foo.o' at position 0
- );
- try depTokenizer("\\\\foo.o",
- \\ERROR: incomplete target '\foo.o' at position 0
- );
- try depTokenizer("$$foo.o",
- \\ERROR: incomplete target '$foo.o' at position 0
- );
-}
-
-test "error illegal char at position - bad target escape" {
- try depTokenizer("\\\t",
- \\ERROR: illegal char \x09 at position 1: bad target escape
- );
- try depTokenizer("\\\n",
- \\ERROR: illegal char \x0A at position 1: bad target escape
- );
- try depTokenizer("\\\r",
- \\ERROR: illegal char \x0D at position 1: bad target escape
- );
- try depTokenizer("\\\r\n",
- \\ERROR: illegal char \x0D at position 1: bad target escape
- );
-}
-
-test "error illegal char at position - execting dollar_sign" {
- try depTokenizer("$\t",
- \\ERROR: illegal char \x09 at position 1: expecting '$'
- );
- try depTokenizer("$\n",
- \\ERROR: illegal char \x0A at position 1: expecting '$'
- );
- try depTokenizer("$\r",
- \\ERROR: illegal char \x0D at position 1: expecting '$'
- );
- try depTokenizer("$\r\n",
- \\ERROR: illegal char \x0D at position 1: expecting '$'
- );
-}
-
-test "error illegal char at position - invalid target" {
- try depTokenizer("foo\t.o",
- \\ERROR: illegal char \x09 at position 3: invalid target
- );
- try depTokenizer("foo\n.o",
- \\ERROR: illegal char \x0A at position 3: invalid target
- );
- try depTokenizer("foo\r.o",
- \\ERROR: illegal char \x0D at position 3: invalid target
- );
- try depTokenizer("foo\r\n.o",
- \\ERROR: illegal char \x0D at position 3: invalid target
- );
-}
-
-test "error target - continuation expecting end-of-line" {
- try depTokenizer("foo.o: \\\t",
- \\target = {foo.o}
- \\ERROR: illegal char \x09 at position 8: continuation expecting end-of-line
- );
- try depTokenizer("foo.o: \\ ",
- \\target = {foo.o}
- \\ERROR: illegal char ' ' at position 8: continuation expecting end-of-line
- );
- try depTokenizer("foo.o: \\x",
- \\target = {foo.o}
- \\ERROR: illegal char 'x' at position 8: continuation expecting end-of-line
- );
- try depTokenizer("foo.o: \\\x0dx",
- \\target = {foo.o}
- \\ERROR: illegal char 'x' at position 9: continuation expecting end-of-line
- );
-}
-
-test "error prereq - continuation expecting end-of-line" {
- try depTokenizer("foo.o: foo.h\\\x0dx",
- \\target = {foo.o}
- \\ERROR: illegal char 'x' at position 14: continuation expecting end-of-line
- );
-}
-
-// - tokenize input, emit textual representation, and compare to expect
-fn depTokenizer(input: []const u8, expect: []const u8) !void {
- var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator);
- const arena = arena_allocator.allocator();
- defer arena_allocator.deinit();
-
- var it: Tokenizer = .{ .bytes = input };
- var buffer = std.ArrayList(u8).init(arena);
- var resolve_buf = std.ArrayList(u8).init(arena);
- var i: usize = 0;
- while (it.next()) |token| {
- if (i != 0) try buffer.appendSlice("\n");
- switch (token) {
- .target, .prereq => |bytes| {
- try buffer.appendSlice(@tagName(token));
- try buffer.appendSlice(" = {");
- for (bytes) |b| {
- try buffer.append(printable_char_tab[b]);
- }
- try buffer.appendSlice("}");
- },
- .target_must_resolve => {
- try buffer.appendSlice("target = {");
- try token.resolve(resolve_buf.writer());
- for (resolve_buf.items) |b| {
- try buffer.append(printable_char_tab[b]);
- }
- resolve_buf.items.len = 0;
- try buffer.appendSlice("}");
- },
- else => {
- try buffer.appendSlice("ERROR: ");
- try token.printError(buffer.writer());
- break;
- },
- }
- i += 1;
- }
-
- if (std.mem.eql(u8, expect, buffer.items)) {
- try testing.expect(true);
- return;
- }
-
- const out = std.io.getStdErr().writer();
-
- try out.writeAll("\n");
- try printSection(out, "<<<< input", input);
- try printSection(out, "==== expect", expect);
- try printSection(out, ">>>> got", buffer.items);
- try printRuler(out);
-
- try testing.expect(false);
-}
-
-fn printSection(out: anytype, label: []const u8, bytes: []const u8) !void {
- try printLabel(out, label, bytes);
- try hexDump(out, bytes);
- try printRuler(out);
- try out.writeAll(bytes);
- try out.writeAll("\n");
-}
-
-fn printLabel(out: anytype, label: []const u8, bytes: []const u8) !void {
- var buf: [80]u8 = undefined;
- var text = try std.fmt.bufPrint(buf[0..], "{s} {d} bytes ", .{ label, bytes.len });
- try out.writeAll(text);
- var i: usize = text.len;
- const end = 79;
- while (i < end) : (i += 1) {
- try out.writeAll(&[_]u8{label[0]});
- }
- try out.writeAll("\n");
-}
-
-fn printRuler(out: anytype) !void {
- var i: usize = 0;
- const end = 79;
- while (i < end) : (i += 1) {
- try out.writeAll("-");
- }
- try out.writeAll("\n");
-}
-
-fn hexDump(out: anytype, bytes: []const u8) !void {
- const n16 = bytes.len >> 4;
- var line: usize = 0;
- var offset: usize = 0;
- while (line < n16) : (line += 1) {
- try hexDump16(out, offset, bytes[offset .. offset + 16]);
- offset += 16;
- }
-
- const n = bytes.len & 0x0f;
- if (n > 0) {
- try printDecValue(out, offset, 8);
- try out.writeAll(":");
- try out.writeAll(" ");
- var end1 = std.math.min(offset + n, offset + 8);
- for (bytes[offset..end1]) |b| {
- try out.writeAll(" ");
- try printHexValue(out, b, 2);
- }
- var end2 = offset + n;
- if (end2 > end1) {
- try out.writeAll(" ");
- for (bytes[end1..end2]) |b| {
- try out.writeAll(" ");
- try printHexValue(out, b, 2);
- }
- }
- const short = 16 - n;
- var i: usize = 0;
- while (i < short) : (i += 1) {
- try out.writeAll(" ");
- }
- if (end2 > end1) {
- try out.writeAll(" |");
- } else {
- try out.writeAll(" |");
- }
- try printCharValues(out, bytes[offset..end2]);
- try out.writeAll("|\n");
- offset += n;
- }
-
- try printDecValue(out, offset, 8);
- try out.writeAll(":");
- try out.writeAll("\n");
-}
-
-fn hexDump16(out: anytype, offset: usize, bytes: []const u8) !void {
- try printDecValue(out, offset, 8);
- try out.writeAll(":");
- try out.writeAll(" ");
- for (bytes[0..8]) |b| {
- try out.writeAll(" ");
- try printHexValue(out, b, 2);
- }
- try out.writeAll(" ");
- for (bytes[8..16]) |b| {
- try out.writeAll(" ");
- try printHexValue(out, b, 2);
- }
- try out.writeAll(" |");
- try printCharValues(out, bytes);
- try out.writeAll("|\n");
-}
-
-fn printDecValue(out: anytype, value: u64, width: u8) !void {
- var buffer: [20]u8 = undefined;
- const len = std.fmt.formatIntBuf(buffer[0..], value, 10, .lower, .{ .width = width, .fill = '0' });
- try out.writeAll(buffer[0..len]);
-}
-
-fn printHexValue(out: anytype, value: u64, width: u8) !void {
- var buffer: [16]u8 = undefined;
- const len = std.fmt.formatIntBuf(buffer[0..], value, 16, .lower, .{ .width = width, .fill = '0' });
- try out.writeAll(buffer[0..len]);
-}
-
-fn printCharValues(out: anytype, bytes: []const u8) !void {
- for (bytes) |b| {
- try out.writeAll(&[_]u8{printable_char_tab[b]});
- }
-}
-
-fn printUnderstandableChar(out: anytype, char: u8) !void {
- if (std.ascii.isPrint(char)) {
- try out.print("'{c}'", .{char});
- } else {
- try out.print("\\x{X:0>2}", .{char});
- }
-}
-
-// zig fmt: off
-const printable_char_tab: [256]u8 = (
- "................................ !\"#$%&'()*+,-./0123456789:;<=>?" ++
- "@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~." ++
- "................................................................" ++
- "................................................................"
-).*;
diff --git a/src/Module.zig b/src/Module.zig
index e4cf0189cc..a129cb0cb6 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -16,7 +16,7 @@ const Ast = std.zig.Ast;
const Module = @This();
const Compilation = @import("Compilation.zig");
-const Cache = @import("Cache.zig");
+const Cache = std.Build.Cache;
const Value = @import("value.zig").Value;
const Type = @import("type.zig").Type;
const TypedValue = @import("TypedValue.zig");
diff --git a/src/Package.zig b/src/Package.zig
index 401eef2121..a3afe21009 100644
--- a/src/Package.zig
+++ b/src/Package.zig
@@ -13,7 +13,7 @@ const Compilation = @import("Compilation.zig");
const Module = @import("Module.zig");
const ThreadPool = @import("ThreadPool.zig");
const WaitGroup = @import("WaitGroup.zig");
-const Cache = @import("Cache.zig");
+const Cache = std.Build.Cache;
const build_options = @import("build_options");
const Manifest = @import("Manifest.zig");
diff --git a/src/glibc.zig b/src/glibc.zig
index 8dce1c5132..2a2887c334 100644
--- a/src/glibc.zig
+++ b/src/glibc.zig
@@ -11,7 +11,7 @@ const target_util = @import("target.zig");
const Compilation = @import("Compilation.zig");
const build_options = @import("build_options");
const trace = @import("tracy.zig").trace;
-const Cache = @import("Cache.zig");
+const Cache = std.Build.Cache;
const Package = @import("Package.zig");
pub const Lib = struct {
diff --git a/src/link.zig b/src/link.zig
index 2b3ce51667..5650e0679a 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -10,7 +10,7 @@ const wasi_libc = @import("wasi_libc.zig");
const Air = @import("Air.zig");
const Allocator = std.mem.Allocator;
-const Cache = @import("Cache.zig");
+const Cache = std.Build.Cache;
const Compilation = @import("Compilation.zig");
const LibCInstallation = @import("libc_installation.zig").LibCInstallation;
const Liveness = @import("Liveness.zig");
diff --git a/src/link/Coff/lld.zig b/src/link/Coff/lld.zig
index d705f62f5c..c308ff5989 100644
--- a/src/link/Coff/lld.zig
+++ b/src/link/Coff/lld.zig
@@ -5,6 +5,7 @@ const assert = std.debug.assert;
const fs = std.fs;
const log = std.log.scoped(.link);
const mem = std.mem;
+const Cache = std.Build.Cache;
const mingw = @import("../../mingw.zig");
const link = @import("../../link.zig");
@@ -13,7 +14,6 @@ const trace = @import("../../tracy.zig").trace;
const Allocator = mem.Allocator;
-const Cache = @import("../../Cache.zig");
const Coff = @import("../Coff.zig");
const Compilation = @import("../../Compilation.zig");
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 45952da6c0..37ebfdc0dc 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -21,7 +21,7 @@ const trace = @import("../tracy.zig").trace;
const Air = @import("../Air.zig");
const Allocator = std.mem.Allocator;
pub const Atom = @import("Elf/Atom.zig");
-const Cache = @import("../Cache.zig");
+const Cache = std.Build.Cache;
const Compilation = @import("../Compilation.zig");
const Dwarf = @import("Dwarf.zig");
const File = link.File;
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 24ef275c5b..35f5f1b562 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -28,7 +28,7 @@ const Air = @import("../Air.zig");
const Allocator = mem.Allocator;
const Archive = @import("MachO/Archive.zig");
pub const Atom = @import("MachO/Atom.zig");
-const Cache = @import("../Cache.zig");
+const Cache = std.Build.Cache;
const CodeSignature = @import("MachO/CodeSignature.zig");
const Compilation = @import("../Compilation.zig");
const Dwarf = File.Dwarf;
diff --git a/src/link/MachO/zld.zig b/src/link/MachO/zld.zig
index 095ac9b5ce..785fa71445 100644
--- a/src/link/MachO/zld.zig
+++ b/src/link/MachO/zld.zig
@@ -20,7 +20,7 @@ const trace = @import("../../tracy.zig").trace;
const Allocator = mem.Allocator;
const Archive = @import("Archive.zig");
const Atom = @import("ZldAtom.zig");
-const Cache = @import("../../Cache.zig");
+const Cache = std.Build.Cache;
const CodeSignature = @import("CodeSignature.zig");
const Compilation = @import("../../Compilation.zig");
const DwarfInfo = @import("DwarfInfo.zig");
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 9d20412788..e62a2050d7 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -20,7 +20,7 @@ const lldMain = @import("../main.zig").lldMain;
const trace = @import("../tracy.zig").trace;
const build_options = @import("build_options");
const wasi_libc = @import("../wasi_libc.zig");
-const Cache = @import("../Cache.zig");
+const Cache = std.Build.Cache;
const Type = @import("../type.zig").Type;
const TypedValue = @import("../TypedValue.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
diff --git a/src/main.zig b/src/main.zig
index 00a7b126c8..2add2f9165 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -20,7 +20,7 @@ const LibCInstallation = @import("libc_installation.zig").LibCInstallation;
const wasi_libc = @import("wasi_libc.zig");
const translate_c = @import("translate_c.zig");
const clang = @import("clang.zig");
-const Cache = @import("Cache.zig");
+const Cache = std.Build.Cache;
const target_util = @import("target.zig");
const ThreadPool = @import("ThreadPool.zig");
const crash_report = @import("crash_report.zig");
@@ -3607,7 +3607,7 @@ fn cmdTranslateC(comp: *Compilation, arena: Allocator, enable_cache: bool) !void
defer if (enable_cache) man.deinit();
man.hash.add(@as(u16, 0xb945)); // Random number to distinguish translate-c from compiling C objects
- man.hashCSource(c_source_file) catch |err| {
+ Compilation.cache_helpers.hashCSource(&man, c_source_file) catch |err| {
fatal("unable to process '{s}': {s}", .{ c_source_file.src_path, @errorName(err) });
};
diff --git a/src/mingw.zig b/src/mingw.zig
index 06880743c6..4f94e26a98 100644
--- a/src/mingw.zig
+++ b/src/mingw.zig
@@ -8,7 +8,7 @@ const log = std.log.scoped(.mingw);
const builtin = @import("builtin");
const Compilation = @import("Compilation.zig");
const build_options = @import("build_options");
-const Cache = @import("Cache.zig");
+const Cache = std.Build.Cache;
pub const CRTFile = enum {
crt2_o,