aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2023-10-09 11:47:37 -0700
committerGitHub <noreply@github.com>2023-10-09 11:47:37 -0700
commitf7bc55c0136b91805bd046a8cc8ea745d7e7567d (patch)
tree8379c645854d3c513ebb18e7fbabc7ab5ed1a283 /src
parent75b48ef503204d3ba005647ecce8fda4657a8588 (diff)
parent95907cb79578779108f3772cb93648d38354b9ec (diff)
downloadzig-f7bc55c0136b91805bd046a8cc8ea745d7e7567d.tar.gz
zig-f7bc55c0136b91805bd046a8cc8ea745d7e7567d.zip
Merge pull request #17392 from ziglang/fetch
rework package manager
Diffstat (limited to 'src')
-rw-r--r--src/Autodoc.zig35
-rw-r--r--src/Compilation.zig289
-rw-r--r--src/Module.zig207
-rw-r--r--src/Package.zig1444
-rw-r--r--src/Package/Fetch.zig1557
-rw-r--r--src/Package/Fetch/git.zig (renamed from src/git.zig)2
-rw-r--r--src/Package/Fetch/git/testdata/testrepo.idx (renamed from src/git/testdata/testrepo.idx)bin3480 -> 3480 bytes
-rw-r--r--src/Package/Fetch/git/testdata/testrepo.pack (renamed from src/git/testdata/testrepo.pack)bin6511 -> 6511 bytes
-rw-r--r--src/Package/Manifest.zig (renamed from src/Manifest.zig)73
-rw-r--r--src/Package/Module.zig34
-rw-r--r--src/Package/hash.zig153
-rw-r--r--src/Sema.zig54
-rw-r--r--src/codegen/llvm.zig42
-rw-r--r--src/crash_report.zig22
-rw-r--r--src/glibc.zig2
-rw-r--r--src/libcxx.zig4
-rw-r--r--src/libtsan.zig2
-rw-r--r--src/libunwind.zig2
-rw-r--r--src/link/Dwarf.zig18
-rw-r--r--src/link/Elf.zig6
-rw-r--r--src/link/Plan9.zig9
-rw-r--r--src/main.zig534
-rw-r--r--src/musl.zig2
23 files changed, 2437 insertions, 2054 deletions
diff --git a/src/Autodoc.zig b/src/Autodoc.zig
index a70040558c..5a241e51ab 100644
--- a/src/Autodoc.zig
+++ b/src/Autodoc.zig
@@ -6,7 +6,7 @@ const Autodoc = @This();
const Compilation = @import("Compilation.zig");
const CompilationModule = @import("Module.zig");
const File = CompilationModule.File;
-const Module = @import("Package.zig");
+const Module = @import("Package.zig").Module;
const Tokenizer = std.zig.Tokenizer;
const InternPool = @import("InternPool.zig");
const Zir = @import("Zir.zig");
@@ -98,9 +98,8 @@ pub fn generate(cm: *CompilationModule, output_dir: std.fs.Dir) !void {
}
fn generateZirData(self: *Autodoc, output_dir: std.fs.Dir) !void {
- const root_src_dir = self.comp_module.main_pkg.root_src_directory;
- const root_src_path = self.comp_module.main_pkg.root_src_path;
- const joined_src_path = try root_src_dir.join(self.arena, &.{root_src_path});
+ const root_src_path = self.comp_module.main_mod.root_src_path;
+ const joined_src_path = try self.comp_module.main_mod.root.joinString(self.arena, root_src_path);
defer self.arena.free(joined_src_path);
const abs_root_src_path = try std.fs.path.resolve(self.arena, &.{ ".", joined_src_path });
@@ -295,20 +294,20 @@ fn generateZirData(self: *Autodoc, output_dir: std.fs.Dir) !void {
}
const rootName = blk: {
- const rootName = std.fs.path.basename(self.comp_module.main_pkg.root_src_path);
+ const rootName = std.fs.path.basename(self.comp_module.main_mod.root_src_path);
break :blk rootName[0 .. rootName.len - 4];
};
const main_type_index = self.types.items.len;
{
- try self.modules.put(self.arena, self.comp_module.main_pkg, .{
+ try self.modules.put(self.arena, self.comp_module.main_mod, .{
.name = rootName,
.main = main_type_index,
.table = .{},
});
try self.modules.entries.items(.value)[0].table.put(
self.arena,
- self.comp_module.main_pkg,
+ self.comp_module.main_mod,
.{
.name = rootName,
.value = 0,
@@ -412,7 +411,7 @@ fn generateZirData(self: *Autodoc, output_dir: std.fs.Dir) !void {
while (files_iterator.next()) |entry| {
const sub_file_path = entry.key_ptr.*.sub_file_path;
- const file_module = entry.key_ptr.*.pkg;
+ const file_module = entry.key_ptr.*.mod;
const module_name = (self.modules.get(file_module) orelse continue).name;
const file_path = std.fs.path.dirname(sub_file_path) orelse "";
@@ -986,12 +985,12 @@ fn walkInstruction(
// importFile cannot error out since all files
// are already loaded at this point
- if (file.pkg.table.get(path)) |other_module| {
+ if (file.mod.deps.get(path)) |other_module| {
const result = try self.modules.getOrPut(self.arena, other_module);
// Immediately add this module to the import table of our
// current module, regardless of wether it's new or not.
- if (self.modules.getPtr(file.pkg)) |current_module| {
+ if (self.modules.getPtr(file.mod)) |current_module| {
// TODO: apparently, in the stdlib a file gets analyzed before
// its module gets added. I guess we're importing a file
// that belongs to another module through its file path?
@@ -1025,12 +1024,12 @@ fn walkInstruction(
// TODO: Add this module as a dependency to the current module
// TODO: this seems something that could be done in bulk
// at the beginning or the end, or something.
- const root_src_dir = other_module.root_src_directory;
- const root_src_path = other_module.root_src_path;
- const joined_src_path = try root_src_dir.join(self.arena, &.{root_src_path});
- defer self.arena.free(joined_src_path);
-
- const abs_root_src_path = try std.fs.path.resolve(self.arena, &.{ ".", joined_src_path });
+ const abs_root_src_path = try std.fs.path.resolve(self.arena, &.{
+ ".",
+ other_module.root.root_dir.path orelse ".",
+ other_module.root.sub_path,
+ other_module.root_src_path,
+ });
defer self.arena.free(abs_root_src_path);
const new_file = self.comp_module.import_table.get(abs_root_src_path).?;
@@ -5683,7 +5682,7 @@ fn writeFileTableToJson(
while (it.next()) |entry| {
try jsw.beginArray();
try jsw.write(entry.key_ptr.*.sub_file_path);
- try jsw.write(mods.getIndex(entry.key_ptr.*.pkg) orelse 0);
+ try jsw.write(mods.getIndex(entry.key_ptr.*.mod) orelse 0);
try jsw.endArray();
}
try jsw.endArray();
@@ -5840,7 +5839,7 @@ fn addGuide(self: *Autodoc, file: *File, guide_path: []const u8, section: *Secti
file.sub_file_path, "..", guide_path,
});
- var guide_file = try file.pkg.root_src_directory.handle.openFile(resolved_path, .{});
+ var guide_file = try file.mod.root.openFile(resolved_path, .{});
defer guide_file.close();
const guide = guide_file.reader().readAllAlloc(self.arena, 1 * 1024 * 1024) catch |err| switch (err) {
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 28b67ff734..cd4c6ea11b 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -41,8 +41,9 @@ const resinator = @import("resinator.zig");
/// General-purpose allocator. Used for both temporary and long-term storage.
gpa: Allocator,
-/// Arena-allocated memory used during initialization. Should be untouched until deinit.
-arena_state: std.heap.ArenaAllocator.State,
+/// Arena-allocated memory, mostly used during initialization. However, it can be used
+/// for other things requiring the same lifetime as the `Compilation`.
+arena: std.heap.ArenaAllocator,
bin_file: *link.File,
c_object_table: std.AutoArrayHashMapUnmanaged(*CObject, void) = .{},
win32_resource_table: if (build_options.only_core_functionality) void else std.AutoArrayHashMapUnmanaged(*Win32Resource, void) =
@@ -124,7 +125,7 @@ cache_parent: *Cache,
/// Path to own executable for invoking `zig clang`.
self_exe_path: ?[]const u8,
/// null means -fno-emit-bin.
-/// This is mutable memory allocated into the Compilation-lifetime arena (`arena_state`)
+/// This is mutable memory allocated into the Compilation-lifetime arena (`arena`)
/// of exactly the correct size for "o/[digest]/[basename]".
/// The basename is of the outputted binary file in case we don't know the directory yet.
whole_bin_sub_path: ?[]u8,
@@ -273,8 +274,8 @@ const Job = union(enum) {
/// The source file containing the Decl has been updated, and so the
/// Decl may need its line number information updated in the debug info.
update_line_number: Module.Decl.Index,
- /// The main source file for the package needs to be analyzed.
- analyze_pkg: *Package,
+ /// The main source file for the module needs to be analyzed.
+ analyze_mod: *Package.Module,
/// one of the glibc static objects
glibc_crt_file: glibc.CRTFile,
@@ -414,7 +415,7 @@ pub const MiscTask = enum {
compiler_rt,
libssp,
zig_libc,
- analyze_pkg,
+ analyze_mod,
@"musl crti.o",
@"musl crtn.o",
@@ -544,7 +545,7 @@ pub const InitOptions = struct {
global_cache_directory: Directory,
target: Target,
root_name: []const u8,
- main_pkg: ?*Package,
+ main_mod: ?*Package.Module,
output_mode: std.builtin.OutputMode,
thread_pool: *ThreadPool,
dynamic_linker: ?[]const u8 = null,
@@ -736,53 +737,55 @@ pub const InitOptions = struct {
pdb_out_path: ?[]const u8 = null,
};
-fn addPackageTableToCacheHash(
+fn addModuleTableToCacheHash(
hash: *Cache.HashHelper,
arena: *std.heap.ArenaAllocator,
- pkg_table: Package.Table,
- seen_table: *std.AutoHashMap(*Package, void),
+ mod_table: Package.Module.Deps,
+ seen_table: *std.AutoHashMap(*Package.Module, void),
hash_type: union(enum) { path_bytes, files: *Cache.Manifest },
) (error{OutOfMemory} || std.os.GetCwdError)!void {
const allocator = arena.allocator();
- const packages = try allocator.alloc(Package.Table.KV, pkg_table.count());
+ const modules = try allocator.alloc(Package.Module.Deps.KV, mod_table.count());
{
// Copy over the hashmap entries to our slice
- var table_it = pkg_table.iterator();
+ var table_it = mod_table.iterator();
var idx: usize = 0;
while (table_it.next()) |entry| : (idx += 1) {
- packages[idx] = .{
+ modules[idx] = .{
.key = entry.key_ptr.*,
.value = entry.value_ptr.*,
};
}
}
// Sort the slice by package name
- mem.sort(Package.Table.KV, packages, {}, struct {
- fn lessThan(_: void, lhs: Package.Table.KV, rhs: Package.Table.KV) bool {
+ mem.sortUnstable(Package.Module.Deps.KV, modules, {}, struct {
+ fn lessThan(_: void, lhs: Package.Module.Deps.KV, rhs: Package.Module.Deps.KV) bool {
return std.mem.lessThan(u8, lhs.key, rhs.key);
}
}.lessThan);
- for (packages) |pkg| {
- if ((try seen_table.getOrPut(pkg.value)).found_existing) continue;
+ for (modules) |mod| {
+ if ((try seen_table.getOrPut(mod.value)).found_existing) continue;
// Finally insert the package name and path to the cache hash.
- hash.addBytes(pkg.key);
+ hash.addBytes(mod.key);
switch (hash_type) {
.path_bytes => {
- hash.addBytes(pkg.value.root_src_path);
- hash.addOptionalBytes(pkg.value.root_src_directory.path);
+ hash.addBytes(mod.value.root_src_path);
+ hash.addOptionalBytes(mod.value.root.root_dir.path);
+ hash.addBytes(mod.value.root.sub_path);
},
.files => |man| {
- const pkg_zig_file = try pkg.value.root_src_directory.join(allocator, &[_][]const u8{
- pkg.value.root_src_path,
- });
+ const pkg_zig_file = try mod.value.root.joinString(
+ allocator,
+ mod.value.root_src_path,
+ );
_ = try man.addFile(pkg_zig_file, null);
},
}
- // Recurse to handle the package's dependencies
- try addPackageTableToCacheHash(hash, arena, pkg.value.table, seen_table, hash_type);
+ // Recurse to handle the module's dependencies
+ try addModuleTableToCacheHash(hash, arena, mod.value.deps, seen_table, hash_type);
}
}
@@ -839,7 +842,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
break :blk true;
// If we have no zig code to compile, no need for LLVM.
- if (options.main_pkg == null)
+ if (options.main_mod == null)
break :blk false;
// If LLVM does not support the target, then we can't use it.
@@ -869,7 +872,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
// compiler state, the second clause here can be removed so that incremental
// cache mode is used for LLVM backend too. We need some fuzz testing before
// that can be enabled.
- const cache_mode = if ((use_llvm or options.main_pkg == null) and !options.disable_lld_caching)
+ const cache_mode = if ((use_llvm or options.main_mod == null) and !options.disable_lld_caching)
CacheMode.whole
else
options.cache_mode;
@@ -925,7 +928,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
if (use_llvm) {
// If stage1 generates an object file, self-hosted linker is not
// yet sophisticated enough to handle that.
- break :blk options.main_pkg != null;
+ break :blk options.main_mod != null;
}
break :blk false;
@@ -1210,7 +1213,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
if (options.target.os.tag == .wasi) cache.hash.add(wasi_exec_model);
// TODO audit this and make sure everything is in it
- const module: ?*Module = if (options.main_pkg) |main_pkg| blk: {
+ const module: ?*Module = if (options.main_mod) |main_mod| blk: {
// Options that are specific to zig source files, that cannot be
// modified between incremental updates.
var hash = cache.hash;
@@ -1223,11 +1226,12 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
// do want to namespace different source file names because they are
// likely different compilations and therefore this would be likely to
// cause cache hits.
- hash.addBytes(main_pkg.root_src_path);
- hash.addOptionalBytes(main_pkg.root_src_directory.path);
+ hash.addBytes(main_mod.root_src_path);
+ hash.addOptionalBytes(main_mod.root.root_dir.path);
+ hash.addBytes(main_mod.root.sub_path);
{
- var seen_table = std.AutoHashMap(*Package, void).init(arena);
- try addPackageTableToCacheHash(&hash, &arena_allocator, main_pkg.table, &seen_table, .path_bytes);
+ var seen_table = std.AutoHashMap(*Package.Module, void).init(arena);
+ try addModuleTableToCacheHash(&hash, &arena_allocator, main_mod.deps, &seen_table, .path_bytes);
}
},
.whole => {
@@ -1283,81 +1287,83 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
.path = try options.local_cache_directory.join(arena, &[_][]const u8{artifact_sub_dir}),
};
- const builtin_pkg = try Package.createWithDir(
- gpa,
- zig_cache_artifact_directory,
- null,
- "builtin.zig",
- );
- errdefer builtin_pkg.destroy(gpa);
+ const builtin_mod = try Package.Module.create(arena, .{
+ .root = .{ .root_dir = zig_cache_artifact_directory },
+ .root_src_path = "builtin.zig",
+ .fully_qualified_name = "builtin",
+ });
- // When you're testing std, the main module is std. In that case, we'll just set the std
- // module to the main one, since avoiding the errors caused by duplicating it is more
- // effort than it's worth.
- const main_pkg_is_std = m: {
+ // When you're testing std, the main module is std. In that case,
+ // we'll just set the std module to the main one, since avoiding
+ // the errors caused by duplicating it is more effort than it's
+ // worth.
+ const main_mod_is_std = m: {
const std_path = try std.fs.path.resolve(arena, &[_][]const u8{
options.zig_lib_directory.path orelse ".",
"std",
"std.zig",
});
- defer arena.free(std_path);
const main_path = try std.fs.path.resolve(arena, &[_][]const u8{
- main_pkg.root_src_directory.path orelse ".",
- main_pkg.root_src_path,
+ main_mod.root.root_dir.path orelse ".",
+ main_mod.root.sub_path,
+ main_mod.root_src_path,
});
- defer arena.free(main_path);
break :m mem.eql(u8, main_path, std_path);
};
- const std_pkg = if (main_pkg_is_std)
- main_pkg
+ const std_mod = if (main_mod_is_std)
+ main_mod
else
- try Package.createWithDir(
- gpa,
- options.zig_lib_directory,
- "std",
- "std.zig",
- );
-
- errdefer if (!main_pkg_is_std) std_pkg.destroy(gpa);
+ try Package.Module.create(arena, .{
+ .root = .{
+ .root_dir = options.zig_lib_directory,
+ .sub_path = "std",
+ },
+ .root_src_path = "std.zig",
+ .fully_qualified_name = "std",
+ });
- const root_pkg = if (options.is_test) root_pkg: {
- const test_pkg = if (options.test_runner_path) |test_runner| test_pkg: {
- const test_dir = std.fs.path.dirname(test_runner);
- const basename = std.fs.path.basename(test_runner);
- const pkg = try Package.create(gpa, test_dir, basename);
+ const root_mod = if (options.is_test) root_mod: {
+ const test_mod = if (options.test_runner_path) |test_runner| test_mod: {
+ const pkg = try Package.Module.create(arena, .{
+ .root = .{
+ .root_dir = Directory.cwd(),
+ .sub_path = std.fs.path.dirname(test_runner) orelse "",
+ },
+ .root_src_path = std.fs.path.basename(test_runner),
+ .fully_qualified_name = "root",
+ });
- // copy package table from main_pkg to root_pkg
- pkg.table = try main_pkg.table.clone(gpa);
- break :test_pkg pkg;
- } else try Package.createWithDir(
- gpa,
- options.zig_lib_directory,
- null,
- "test_runner.zig",
- );
- errdefer test_pkg.destroy(gpa);
+ pkg.deps = try main_mod.deps.clone(arena);
+ break :test_mod pkg;
+ } else try Package.Module.create(arena, .{
+ .root = .{
+ .root_dir = options.zig_lib_directory,
+ },
+ .root_src_path = "test_runner.zig",
+ .fully_qualified_name = "root",
+ });
- break :root_pkg test_pkg;
- } else main_pkg;
- errdefer if (options.is_test) root_pkg.destroy(gpa);
+ break :root_mod test_mod;
+ } else main_mod;
- const compiler_rt_pkg = if (include_compiler_rt and options.output_mode == .Obj) compiler_rt_pkg: {
- break :compiler_rt_pkg try Package.createWithDir(
- gpa,
- options.zig_lib_directory,
- null,
- "compiler_rt.zig",
- );
+ const compiler_rt_mod = if (include_compiler_rt and options.output_mode == .Obj) compiler_rt_mod: {
+ break :compiler_rt_mod try Package.Module.create(arena, .{
+ .root = .{
+ .root_dir = options.zig_lib_directory,
+ },
+ .root_src_path = "compiler_rt.zig",
+ .fully_qualified_name = "compiler_rt",
+ });
} else null;
- errdefer if (compiler_rt_pkg) |p| p.destroy(gpa);
- try main_pkg.add(gpa, "builtin", builtin_pkg);
- try main_pkg.add(gpa, "root", root_pkg);
- try main_pkg.add(gpa, "std", std_pkg);
-
- if (compiler_rt_pkg) |p| {
- try main_pkg.add(gpa, "compiler_rt", p);
+ {
+ try main_mod.deps.ensureUnusedCapacity(arena, 4);
+ main_mod.deps.putAssumeCapacity("builtin", builtin_mod);
+ main_mod.deps.putAssumeCapacity("root", root_mod);
+ main_mod.deps.putAssumeCapacity("std", std_mod);
+ if (compiler_rt_mod) |m|
+ main_mod.deps.putAssumeCapacity("compiler_rt", m);
}
// Pre-open the directory handles for cached ZIR code so that it does not need
@@ -1395,8 +1401,8 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
module.* = .{
.gpa = gpa,
.comp = comp,
- .main_pkg = main_pkg,
- .root_pkg = root_pkg,
+ .main_mod = main_mod,
+ .root_mod = root_mod,
.zig_cache_artifact_directory = zig_cache_artifact_directory,
.global_zir_cache = global_zir_cache,
.local_zir_cache = local_zir_cache,
@@ -1664,7 +1670,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
errdefer bin_file.destroy();
comp.* = .{
.gpa = gpa,
- .arena_state = arena_allocator.state,
+ .arena = arena_allocator,
.zig_lib_directory = options.zig_lib_directory,
.local_cache_directory = options.local_cache_directory,
.global_cache_directory = options.global_cache_directory,
@@ -1982,7 +1988,8 @@ pub fn destroy(self: *Compilation) void {
if (self.owned_link_dir) |*dir| dir.close();
// This destroys `self`.
- self.arena_state.promote(gpa).deinit();
+ var arena_instance = self.arena;
+ arena_instance.deinit();
}
pub fn clearMiscFailures(comp: *Compilation) void {
@@ -2005,8 +2012,8 @@ fn restorePrevZigCacheArtifactDirectory(comp: *Compilation, directory: *Director
// This is only for cleanup purposes; Module.deinit calls close
// on the handle of zig_cache_artifact_directory.
if (comp.bin_file.options.module) |module| {
- const builtin_pkg = module.main_pkg.table.get("builtin").?;
- module.zig_cache_artifact_directory = builtin_pkg.root_src_directory;
+ const builtin_mod = module.main_mod.deps.get("builtin").?;
+ module.zig_cache_artifact_directory = builtin_mod.root.root_dir;
}
}
@@ -2148,8 +2155,8 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void
// Make sure std.zig is inside the import_table. We unconditionally need
// it for start.zig.
- const std_pkg = module.main_pkg.table.get("std").?;
- _ = try module.importPkg(std_pkg);
+ const std_mod = module.main_mod.deps.get("std").?;
+ _ = try module.importPkg(std_mod);
// Normally we rely on importing std to in turn import the root source file
// in the start code, but when using the stage1 backend that won't happen,
@@ -2158,11 +2165,11 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void
// Likewise, in the case of `zig test`, the test runner is the root source file,
// and so there is nothing to import the main file.
if (comp.bin_file.options.is_test) {
- _ = try module.importPkg(module.main_pkg);
+ _ = try module.importPkg(module.main_mod);
}
- if (module.main_pkg.table.get("compiler_rt")) |compiler_rt_pkg| {
- _ = try module.importPkg(compiler_rt_pkg);
+ if (module.main_mod.deps.get("compiler_rt")) |compiler_rt_mod| {
+ _ = try module.importPkg(compiler_rt_mod);
}
// Put a work item in for every known source file to detect if
@@ -2185,13 +2192,13 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void
}
}
- try comp.work_queue.writeItem(.{ .analyze_pkg = std_pkg });
+ try comp.work_queue.writeItem(.{ .analyze_mod = std_mod });
if (comp.bin_file.options.is_test) {
- try comp.work_queue.writeItem(.{ .analyze_pkg = module.main_pkg });
+ try comp.work_queue.writeItem(.{ .analyze_mod = module.main_mod });
}
- if (module.main_pkg.table.get("compiler_rt")) |compiler_rt_pkg| {
- try comp.work_queue.writeItem(.{ .analyze_pkg = compiler_rt_pkg });
+ if (module.main_mod.deps.get("compiler_rt")) |compiler_rt_mod| {
+ try comp.work_queue.writeItem(.{ .analyze_mod = compiler_rt_mod });
}
}
@@ -2420,19 +2427,17 @@ fn addNonIncrementalStuffToCacheManifest(comp: *Compilation, man: *Cache.Manifes
comptime assert(link_hash_implementation_version == 10);
if (comp.bin_file.options.module) |mod| {
- const main_zig_file = try mod.main_pkg.root_src_directory.join(arena, &[_][]const u8{
- mod.main_pkg.root_src_path,
- });
+ const main_zig_file = try mod.main_mod.root.joinString(arena, mod.main_mod.root_src_path);
_ = try man.addFile(main_zig_file, null);
{
- var seen_table = std.AutoHashMap(*Package, void).init(arena);
+ var seen_table = std.AutoHashMap(*Package.Module, void).init(arena);
// Skip builtin.zig; it is useless as an input, and we don't want to have to
// write it before checking for a cache hit.
- const builtin_pkg = mod.main_pkg.table.get("builtin").?;
- try seen_table.put(builtin_pkg, {});
+ const builtin_mod = mod.main_mod.deps.get("builtin").?;
+ try seen_table.put(builtin_mod, {});
- try addPackageTableToCacheHash(&man.hash, &arena_allocator, mod.main_pkg.table, &seen_table, .{ .files = man });
+ try addModuleTableToCacheHash(&man.hash, &arena_allocator, mod.main_mod.deps, &seen_table, .{ .files = man });
}
// Synchronize with other matching comments: ZigOnlyHashStuff
@@ -2616,23 +2621,19 @@ fn reportMultiModuleErrors(mod: *Module) !void {
errdefer for (notes[0..i]) |*n| n.deinit(mod.gpa);
note.* = switch (ref) {
.import => |loc| blk: {
- const name = try loc.file_scope.pkg.getName(mod.gpa, mod.*);
- defer mod.gpa.free(name);
break :blk try Module.ErrorMsg.init(
mod.gpa,
loc,
"imported from module {s}",
- .{name},
+ .{loc.file_scope.mod.fully_qualified_name},
);
},
.root => |pkg| blk: {
- const name = try pkg.getName(mod.gpa, mod.*);
- defer mod.gpa.free(name);
break :blk try Module.ErrorMsg.init(
mod.gpa,
.{ .file_scope = file, .parent_decl_node = 0, .lazy = .entire_file },
"root of module {s}",
- .{name},
+ .{pkg.fully_qualified_name},
);
},
};
@@ -3564,8 +3565,8 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v
decl.analysis = .codegen_failure_retryable;
};
},
- .analyze_pkg => |pkg| {
- const named_frame = tracy.namedFrame("analyze_pkg");
+ .analyze_mod => |pkg| {
+ const named_frame = tracy.namedFrame("analyze_mod");
defer named_frame.end();
const module = comp.bin_file.options.module.?;
@@ -3904,17 +3905,12 @@ pub fn obtainWin32ResourceCacheManifest(comp: *const Compilation) Cache.Manifest
return man;
}
-test "cImport" {
- _ = cImport;
-}
-
pub const CImportResult = struct {
out_zig_path: []u8,
cache_hit: bool,
errors: std.zig.ErrorBundle,
pub fn deinit(result: *CImportResult, gpa: std.mem.Allocator) void {
- gpa.free(result.out_zig_path);
result.errors.deinit(gpa);
}
};
@@ -4059,7 +4055,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult {
};
}
- const out_zig_path = try comp.local_cache_directory.join(comp.gpa, &[_][]const u8{
+ const out_zig_path = try comp.local_cache_directory.join(comp.arena.allocator(), &.{
"o", &digest, cimport_zig_basename,
});
if (comp.verbose_cimport) {
@@ -4214,17 +4210,9 @@ fn reportRetryableAstGenError(
},
};
- const err_msg = if (file.pkg.root_src_directory.path) |dir_path|
- try Module.ErrorMsg.create(
- gpa,
- src_loc,
- "unable to load '{s}" ++ std.fs.path.sep_str ++ "{s}': {s}",
- .{ dir_path, file.sub_file_path, @errorName(err) },
- )
- else
- try Module.ErrorMsg.create(gpa, src_loc, "unable to load '{s}': {s}", .{
- file.sub_file_path, @errorName(err),
- });
+ const err_msg = try Module.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{
+ file.mod.root, file.sub_file_path, @errorName(err),
+ });
errdefer err_msg.destroy(gpa);
{
@@ -4244,17 +4232,10 @@ fn reportRetryableEmbedFileError(
const src_loc: Module.SrcLoc = mod.declPtr(embed_file.owner_decl).srcLoc(mod);
- const err_msg = if (embed_file.pkg.root_src_directory.path) |dir_path|
- try Module.ErrorMsg.create(
- gpa,
- src_loc,
- "unable to load '{s}" ++ std.fs.path.sep_str ++ "{s}': {s}",
- .{ dir_path, embed_file.sub_file_path, @errorName(err) },
- )
- else
- try Module.ErrorMsg.create(gpa, src_loc, "unable to load '{s}': {s}", .{
- embed_file.sub_file_path, @errorName(err),
- });
+ const err_msg = try Module.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{
+ embed_file.mod.root, embed_file.sub_file_path, @errorName(err),
+ });
+
errdefer err_msg.destroy(gpa);
{
@@ -6377,13 +6358,13 @@ fn buildOutputFromZig(
const tracy_trace = trace(@src());
defer tracy_trace.end();
- std.debug.assert(output_mode != .Exe);
+ assert(output_mode != .Exe);
- var main_pkg: Package = .{
- .root_src_directory = comp.zig_lib_directory,
+ var main_mod: Package.Module = .{
+ .root = .{ .root_dir = comp.zig_lib_directory },
.root_src_path = src_basename,
+ .fully_qualified_name = "root",
};
- defer main_pkg.deinitTable(comp.gpa);
const root_name = src_basename[0 .. src_basename.len - std.fs.path.extension(src_basename).len];
const target = comp.getTarget();
const bin_basename = try std.zig.binNameAlloc(comp.gpa, .{
@@ -6404,7 +6385,7 @@ fn buildOutputFromZig(
.cache_mode = .whole,
.target = target,
.root_name = root_name,
- .main_pkg = &main_pkg,
+ .main_mod = &main_mod,
.output_mode = output_mode,
.thread_pool = comp.thread_pool,
.libc_installation = comp.bin_file.options.libc_installation,
@@ -6481,7 +6462,7 @@ pub fn build_crt_file(
.cache_mode = .whole,
.target = target,
.root_name = root_name,
- .main_pkg = null,
+ .main_mod = null,
.output_mode = output_mode,
.thread_pool = comp.thread_pool,
.libc_installation = comp.bin_file.options.libc_installation,
diff --git a/src/Module.zig b/src/Module.zig
index 8c4035cc9a..41f4ec2b41 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -55,10 +55,10 @@ comp: *Compilation,
/// Where build artifacts and incremental compilation metadata serialization go.
zig_cache_artifact_directory: Compilation.Directory,
/// Pointer to externally managed resource.
-root_pkg: *Package,
-/// Normally, `main_pkg` and `root_pkg` are the same. The exception is `zig test`, in which
-/// `root_pkg` is the test runner, and `main_pkg` is the user's source file which has the tests.
-main_pkg: *Package,
+root_mod: *Package.Module,
+/// Normally, `main_mod` and `root_mod` are the same. The exception is `zig test`, in which
+/// `root_mod` is the test runner, and `main_mod` is the user's source file which has the tests.
+main_mod: *Package.Module,
sema_prog_node: std.Progress.Node = undefined,
/// Used by AstGen worker to load and store ZIR cache.
@@ -973,8 +973,8 @@ pub const File = struct {
tree: Ast,
/// Whether this is populated or not depends on `zir_loaded`.
zir: Zir,
- /// Package that this file is a part of, managed externally.
- pkg: *Package,
+ /// Module that this file is a part of, managed externally.
+ mod: *Package.Module,
/// Whether this file is a part of multiple packages. This is an error condition which will be reported after AstGen.
multi_pkg: bool = false,
/// List of references to this file, used for multi-package errors.
@@ -998,8 +998,8 @@ pub const File = struct {
pub const Reference = union(enum) {
/// The file is imported directly (i.e. not as a package) with @import.
import: SrcLoc,
- /// The file is the root of a package.
- root: *Package,
+ /// The file is the root of a module.
+ root: *Package.Module,
};
pub fn unload(file: *File, gpa: Allocator) void {
@@ -1058,14 +1058,9 @@ pub const File = struct {
.stat = file.stat,
};
- const root_dir_path = file.pkg.root_src_directory.path orelse ".";
- log.debug("File.getSource, not cached. pkgdir={s} sub_file_path={s}", .{
- root_dir_path, file.sub_file_path,
- });
-
// Keep track of inode, file size, mtime, hash so we can detect which files
// have been modified when an incremental update is requested.
- var f = try file.pkg.root_src_directory.handle.openFile(file.sub_file_path, .{});
+ var f = try file.mod.root.openFile(file.sub_file_path, .{});
defer f.close();
const stat = try f.stat();
@@ -1134,14 +1129,12 @@ pub const File = struct {
return ip.getOrPutTrailingString(mod.gpa, ip.string_bytes.items.len - start);
}
- /// Returns the full path to this file relative to its package.
pub fn fullPath(file: File, ally: Allocator) ![]u8 {
- return file.pkg.root_src_directory.join(ally, &[_][]const u8{file.sub_file_path});
+ return file.mod.root.joinString(ally, file.sub_file_path);
}
- /// Returns the full path to this file relative to its package.
pub fn fullPathZ(file: File, ally: Allocator) ![:0]u8 {
- return file.pkg.root_src_directory.joinZ(ally, &[_][]const u8{file.sub_file_path});
+ return file.mod.root.joinStringZ(ally, file.sub_file_path);
}
pub fn dumpSrc(file: *File, src: LazySrcLoc) void {
@@ -1181,10 +1174,10 @@ pub const File = struct {
}
const pkg = switch (ref) {
- .import => |loc| loc.file_scope.pkg,
+ .import => |loc| loc.file_scope.mod,
.root => |pkg| pkg,
};
- if (pkg != file.pkg) file.multi_pkg = true;
+ if (pkg != file.mod) file.multi_pkg = true;
}
/// Mark this file and every file referenced by it as multi_pkg and report an
@@ -1226,7 +1219,7 @@ pub const EmbedFile = struct {
bytes: [:0]const u8,
stat: Cache.File.Stat,
/// Package that this file is a part of, managed externally.
- pkg: *Package,
+ mod: *Package.Module,
/// The Decl that was created from the `@embedFile` to own this resource.
/// This is how zig knows what other Decl objects to invalidate if the file
/// changes on disk.
@@ -2542,28 +2535,6 @@ pub fn deinit(mod: *Module) void {
}
mod.deletion_set.deinit(gpa);
-
- // The callsite of `Compilation.create` owns the `main_pkg`, however
- // Module owns the builtin and std packages that it adds.
- if (mod.main_pkg.table.fetchRemove("builtin")) |kv| {
- gpa.free(kv.key);
- kv.value.destroy(gpa);
- }
- if (mod.main_pkg.table.fetchRemove("std")) |kv| {
- gpa.free(kv.key);
- // It's possible for main_pkg to be std when running 'zig test'! In this case, we must not
- // destroy it, since it would lead to a double-free.
- if (kv.value != mod.main_pkg) {
- kv.value.destroy(gpa);
- }
- }
- if (mod.main_pkg.table.fetchRemove("root")) |kv| {
- gpa.free(kv.key);
- }
- if (mod.root_pkg != mod.main_pkg) {
- mod.root_pkg.destroy(gpa);
- }
-
mod.compile_log_text.deinit(gpa);
mod.zig_cache_artifact_directory.handle.close();
@@ -2710,18 +2681,19 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
const gpa = mod.gpa;
// In any case we need to examine the stat of the file to determine the course of action.
- var source_file = try file.pkg.root_src_directory.handle.openFile(file.sub_file_path, .{});
+ var source_file = try file.mod.root.openFile(file.sub_file_path, .{});
defer source_file.close();
const stat = try source_file.stat();
- const want_local_cache = file.pkg == mod.main_pkg;
+ const want_local_cache = file.mod == mod.main_mod;
const digest = hash: {
var path_hash: Cache.HashHelper = .{};
path_hash.addBytes(build_options.version);
path_hash.add(builtin.zig_backend);
if (!want_local_cache) {
- path_hash.addOptionalBytes(file.pkg.root_src_directory.path);
+ path_hash.addOptionalBytes(file.mod.root.root_dir.path);
+ path_hash.addBytes(file.mod.root.sub_path);
}
path_hash.addBytes(file.sub_file_path);
break :hash path_hash.final();
@@ -2946,10 +2918,8 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
},
};
cache_file.writevAll(&iovecs) catch |err| {
- const pkg_path = file.pkg.root_src_directory.path orelse ".";
- const cache_path = cache_directory.path orelse ".";
- log.warn("unable to write cached ZIR code for {s}/{s} to {s}/{s}: {s}", .{
- pkg_path, file.sub_file_path, cache_path, &digest, @errorName(err),
+ log.warn("unable to write cached ZIR code for {}{s} to {}{s}: {s}", .{
+ file.mod.root, file.sub_file_path, cache_directory, &digest, @errorName(err),
});
};
@@ -3154,37 +3124,27 @@ pub fn populateBuiltinFile(mod: *Module) !void {
defer tracy.end();
const comp = mod.comp;
- const pkg_and_file = blk: {
+ const builtin_mod, const file = blk: {
comp.mutex.lock();
defer comp.mutex.unlock();
- const builtin_pkg = mod.main_pkg.table.get("builtin").?;
- const result = try mod.importPkg(builtin_pkg);
- break :blk .{
- .file = result.file,
- .pkg = builtin_pkg,
- };
+ const builtin_mod = mod.main_mod.deps.get("builtin").?;
+ const result = try mod.importPkg(builtin_mod);
+ break :blk .{ builtin_mod, result.file };
};
- const file = pkg_and_file.file;
- const builtin_pkg = pkg_and_file.pkg;
const gpa = mod.gpa;
file.source = try comp.generateBuiltinZigSource(gpa);
file.source_loaded = true;
- if (builtin_pkg.root_src_directory.handle.statFile(builtin_pkg.root_src_path)) |stat| {
+ if (builtin_mod.root.statFile(builtin_mod.root_src_path)) |stat| {
if (stat.size != file.source.len) {
- const full_path = try builtin_pkg.root_src_directory.join(gpa, &.{
- builtin_pkg.root_src_path,
- });
- defer gpa.free(full_path);
-
log.warn(
- "the cached file '{s}' had the wrong size. Expected {d}, found {d}. " ++
+ "the cached file '{}{s}' had the wrong size. Expected {d}, found {d}. " ++
"Overwriting with correct file contents now",
- .{ full_path, file.source.len, stat.size },
+ .{ builtin_mod.root, builtin_mod.root_src_path, file.source.len, stat.size },
);
- try writeBuiltinFile(file, builtin_pkg);
+ try writeBuiltinFile(file, builtin_mod);
} else {
file.stat = .{
.size = stat.size,
@@ -3198,7 +3158,7 @@ pub fn populateBuiltinFile(mod: *Module) !void {
error.PipeBusy => unreachable, // it's not a pipe
error.WouldBlock => unreachable, // not asking for non-blocking I/O
- error.FileNotFound => try writeBuiltinFile(file, builtin_pkg),
+ error.FileNotFound => try writeBuiltinFile(file, builtin_mod),
else => |e| return e,
}
@@ -3212,8 +3172,8 @@ pub fn populateBuiltinFile(mod: *Module) !void {
file.status = .success_zir;
}
-fn writeBuiltinFile(file: *File, builtin_pkg: *Package) !void {
- var af = try builtin_pkg.root_src_directory.handle.atomicFile(builtin_pkg.root_src_path, .{});
+fn writeBuiltinFile(file: *File, builtin_mod: *Package.Module) !void {
+ var af = try builtin_mod.root.atomicFile(builtin_mod.root_src_path, .{});
defer af.deinit();
try af.file.writeAll(file.source);
try af.finish();
@@ -3609,7 +3569,8 @@ pub fn updateEmbedFile(mod: *Module, embed_file: *EmbedFile) SemaError!void {
}
}
-pub fn semaPkg(mod: *Module, pkg: *Package) !void {
+/// https://github.com/ziglang/zig/issues/14307
+pub fn semaPkg(mod: *Module, pkg: *Package.Module) !void {
const file = (try mod.importPkg(pkg)).file;
return mod.semaFile(file);
}
@@ -3711,13 +3672,11 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
return error.AnalysisFail;
};
- const resolved_path = std.fs.path.resolve(
- gpa,
- if (file.pkg.root_src_directory.path) |pkg_path|
- &[_][]const u8{ pkg_path, file.sub_file_path }
- else
- &[_][]const u8{file.sub_file_path},
- ) catch |err| {
+ const resolved_path = std.fs.path.resolve(gpa, &.{
+ file.mod.root.root_dir.path orelse ".",
+ file.mod.root.sub_path,
+ file.sub_file_path,
+ }) catch |err| {
try reportRetryableFileError(mod, file, "unable to resolve path: {s}", .{@errorName(err)});
return error.AnalysisFail;
};
@@ -3748,8 +3707,8 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
// TODO: figure out how this works under incremental changes to builtin.zig!
const builtin_type_target_index: InternPool.Index = blk: {
- const std_mod = mod.main_pkg.table.get("std").?;
- if (decl.getFileScope(mod).pkg != std_mod) break :blk .none;
+ const std_mod = mod.main_mod.deps.get("std").?;
+ if (decl.getFileScope(mod).mod != std_mod) break :blk .none;
// We're in the std module.
const std_file = (try mod.importPkg(std_mod)).file;
const std_decl = mod.declPtr(std_file.root_decl.unwrap().?);
@@ -4042,14 +4001,17 @@ pub const ImportFileResult = struct {
is_pkg: bool,
};
-pub fn importPkg(mod: *Module, pkg: *Package) !ImportFileResult {
+/// https://github.com/ziglang/zig/issues/14307
+pub fn importPkg(mod: *Module, pkg: *Package.Module) !ImportFileResult {
const gpa = mod.gpa;
// The resolved path is used as the key in the import table, to detect if
// an import refers to the same as another, despite different relative paths
// or differently mapped package names.
- const resolved_path = try std.fs.path.resolve(gpa, &[_][]const u8{
- pkg.root_src_directory.path orelse ".", pkg.root_src_path,
+ const resolved_path = try std.fs.path.resolve(gpa, &.{
+ pkg.root.root_dir.path orelse ".",
+ pkg.root.sub_path,
+ pkg.root_src_path,
});
var keep_resolved_path = false;
defer if (!keep_resolved_path) gpa.free(resolved_path);
@@ -4083,7 +4045,7 @@ pub fn importPkg(mod: *Module, pkg: *Package) !ImportFileResult {
.tree = undefined,
.zir = undefined,
.status = .never_loaded,
- .pkg = pkg,
+ .mod = pkg,
.root_decl = .none,
};
try new_file.addReference(mod.*, .{ .root = pkg });
@@ -4100,29 +4062,33 @@ pub fn importFile(
import_string: []const u8,
) !ImportFileResult {
if (std.mem.eql(u8, import_string, "std")) {
- return mod.importPkg(mod.main_pkg.table.get("std").?);
+ return mod.importPkg(mod.main_mod.deps.get("std").?);
}
if (std.mem.eql(u8, import_string, "builtin")) {
- return mod.importPkg(mod.main_pkg.table.get("builtin").?);
+ return mod.importPkg(mod.main_mod.deps.get("builtin").?);
}
if (std.mem.eql(u8, import_string, "root")) {
- return mod.importPkg(mod.root_pkg);
+ return mod.importPkg(mod.root_mod);
}
- if (cur_file.pkg.table.get(import_string)) |pkg| {
+ if (cur_file.mod.deps.get(import_string)) |pkg| {
return mod.importPkg(pkg);
}
if (!mem.endsWith(u8, import_string, ".zig")) {
- return error.PackageNotFound;
+ return error.ModuleNotFound;
}
const gpa = mod.gpa;
// The resolved path is used as the key in the import table, to detect if
// an import refers to the same as another, despite different relative paths
// or differently mapped package names.
- const cur_pkg_dir_path = cur_file.pkg.root_src_directory.path orelse ".";
- const resolved_path = try std.fs.path.resolve(gpa, &[_][]const u8{
- cur_pkg_dir_path, cur_file.sub_file_path, "..", import_string,
+ const resolved_path = try std.fs.path.resolve(gpa, &.{
+ cur_file.mod.root.root_dir.path orelse ".",
+ cur_file.mod.root.sub_path,
+ cur_file.sub_file_path,
+ "..",
+ import_string,
});
+
var keep_resolved_path = false;
defer if (!keep_resolved_path) gpa.free(resolved_path);
@@ -4137,7 +4103,10 @@ pub fn importFile(
const new_file = try gpa.create(File);
errdefer gpa.destroy(new_file);
- const resolved_root_path = try std.fs.path.resolve(gpa, &[_][]const u8{cur_pkg_dir_path});
+ const resolved_root_path = try std.fs.path.resolve(gpa, &.{
+ cur_file.mod.root.root_dir.path orelse ".",
+ cur_file.mod.root.sub_path,
+ });
defer gpa.free(resolved_root_path);
const sub_file_path = p: {
@@ -4151,7 +4120,7 @@ pub fn importFile(
{
break :p try gpa.dupe(u8, resolved_path);
}
- return error.ImportOutsidePkgPath;
+ return error.ImportOutsideModulePath;
};
errdefer gpa.free(sub_file_path);
@@ -4171,7 +4140,7 @@ pub fn importFile(
.tree = undefined,
.zir = undefined,
.status = .never_loaded,
- .pkg = cur_file.pkg,
+ .mod = cur_file.mod,
.root_decl = .none,
};
return ImportFileResult{
@@ -4184,9 +4153,11 @@ pub fn importFile(
pub fn embedFile(mod: *Module, cur_file: *File, import_string: []const u8) !*EmbedFile {
const gpa = mod.gpa;
- if (cur_file.pkg.table.get(import_string)) |pkg| {
- const resolved_path = try std.fs.path.resolve(gpa, &[_][]const u8{
- pkg.root_src_directory.path orelse ".", pkg.root_src_path,
+ if (cur_file.mod.deps.get(import_string)) |pkg| {
+ const resolved_path = try std.fs.path.resolve(gpa, &.{
+ pkg.root.root_dir.path orelse ".",
+ pkg.root.sub_path,
+ pkg.root_src_path,
});
var keep_resolved_path = false;
defer if (!keep_resolved_path) gpa.free(resolved_path);
@@ -4203,10 +4174,14 @@ pub fn embedFile(mod: *Module, cur_file: *File, import_string: []const u8) !*Emb
// The resolved path is used as the key in the table, to detect if a file
// refers to the same as another, despite different relative paths.
- const cur_pkg_dir_path = cur_file.pkg.root_src_directory.path orelse ".";
- const resolved_path = try std.fs.path.resolve(gpa, &[_][]const u8{
- cur_pkg_dir_path, cur_file.sub_file_path, "..", import_string,
+ const resolved_path = try std.fs.path.resolve(gpa, &.{
+ cur_file.mod.root.root_dir.path orelse ".",
+ cur_file.mod.root.sub_path,
+ cur_file.sub_file_path,
+ "..",
+ import_string,
});
+
var keep_resolved_path = false;
defer if (!keep_resolved_path) gpa.free(resolved_path);
@@ -4214,7 +4189,10 @@ pub fn embedFile(mod: *Module, cur_file: *File, import_string: []const u8) !*Emb
errdefer assert(mod.embed_table.remove(resolved_path));
if (gop.found_existing) return gop.value_ptr.*;
- const resolved_root_path = try std.fs.path.resolve(gpa, &[_][]const u8{cur_pkg_dir_path});
+ const resolved_root_path = try std.fs.path.resolve(gpa, &.{
+ cur_file.mod.root.root_dir.path orelse ".",
+ cur_file.mod.root.sub_path,
+ });
defer gpa.free(resolved_root_path);
const sub_file_path = p: {
@@ -4228,16 +4206,17 @@ pub fn embedFile(mod: *Module, cur_file: *File, import_string: []const u8) !*Emb
{
break :p try gpa.dupe(u8, resolved_path);
}
- return error.ImportOutsidePkgPath;
+ return error.ImportOutsideModulePath;
};
errdefer gpa.free(sub_file_path);
- return newEmbedFile(mod, cur_file.pkg, sub_file_path, resolved_path, &keep_resolved_path, gop);
+ return newEmbedFile(mod, cur_file.mod, sub_file_path, resolved_path, &keep_resolved_path, gop);
}
+/// https://github.com/ziglang/zig/issues/14307
fn newEmbedFile(
mod: *Module,
- pkg: *Package,
+ pkg: *Package.Module,
sub_file_path: []const u8,
resolved_path: []const u8,
keep_resolved_path: *bool,
@@ -4248,7 +4227,7 @@ fn newEmbedFile(
const new_file = try gpa.create(EmbedFile);
errdefer gpa.destroy(new_file);
- var file = try pkg.root_src_directory.handle.openFile(sub_file_path, .{});
+ var file = try pkg.root.openFile(sub_file_path, .{});
defer file.close();
const actual_stat = try file.stat();
@@ -4275,14 +4254,14 @@ fn newEmbedFile(
.sub_file_path = sub_file_path,
.bytes = bytes,
.stat = stat,
- .pkg = pkg,
+ .mod = pkg,
.owner_decl = undefined, // Set by Sema immediately after this function returns.
};
return new_file;
}
pub fn detectEmbedFileUpdate(mod: *Module, embed_file: *EmbedFile) !void {
- var file = try embed_file.pkg.root_src_directory.handle.openFile(embed_file.sub_file_path, .{});
+ var file = try embed_file.mod.root.openFile(embed_file.sub_file_path, .{});
defer file.close();
const stat = try file.stat();
@@ -4455,21 +4434,21 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err
gop.key_ptr.* = new_decl_index;
// Exported decls, comptime decls, usingnamespace decls, and
// test decls if in test mode, get analyzed.
- const decl_pkg = namespace.file_scope.pkg;
+ const decl_mod = namespace.file_scope.mod;
const want_analysis = is_exported or switch (decl_name_index) {
0 => true, // comptime or usingnamespace decl
1 => blk: {
// test decl with no name. Skip the part where we check against
// the test name filter.
if (!comp.bin_file.options.is_test) break :blk false;
- if (decl_pkg != mod.main_pkg) break :blk false;
+ if (decl_mod != mod.main_mod) break :blk false;
try mod.test_functions.put(gpa, new_decl_index, {});
break :blk true;
},
else => blk: {
if (!is_named_test) break :blk false;
if (!comp.bin_file.options.is_test) break :blk false;
- if (decl_pkg != mod.main_pkg) break :blk false;
+ if (decl_mod != mod.main_mod) break :blk false;
if (comp.test_filter) |test_filter| {
if (mem.indexOf(u8, ip.stringToSlice(decl_name), test_filter) == null) {
break :blk false;
@@ -5596,8 +5575,8 @@ pub fn populateTestFunctions(
) !void {
const gpa = mod.gpa;
const ip = &mod.intern_pool;
- const builtin_pkg = mod.main_pkg.table.get("builtin").?;
- const builtin_file = (mod.importPkg(builtin_pkg) catch unreachable).file;
+ const builtin_mod = mod.main_mod.deps.get("builtin").?;
+ const builtin_file = (mod.importPkg(builtin_mod) catch unreachable).file;
const root_decl = mod.declPtr(builtin_file.root_decl.unwrap().?);
const builtin_namespace = mod.namespacePtr(root_decl.src_namespace);
const test_functions_str = try ip.getOrPutString(gpa, "test_functions");
diff --git a/src/Package.zig b/src/Package.zig
index 14052e3de4..c36830d599 100644
--- a/src/Package.zig
+++ b/src/Package.zig
@@ -1,1349 +1,153 @@
-const Package = @This();
-
-const builtin = @import("builtin");
-const std = @import("std");
-const fs = std.fs;
-const mem = std.mem;
-const Allocator = mem.Allocator;
-const ascii = std.ascii;
-const assert = std.debug.assert;
-const log = std.log.scoped(.package);
-const main = @import("main.zig");
-const ThreadPool = std.Thread.Pool;
-
-const Compilation = @import("Compilation.zig");
-const Module = @import("Module.zig");
-const Cache = std.Build.Cache;
-const build_options = @import("build_options");
-const git = @import("git.zig");
-const computePackageHash = @import("Package/hash.zig").compute;
-
-pub const Manifest = @import("Manifest.zig");
-pub const Table = std.StringHashMapUnmanaged(*Package);
-
-root_src_directory: Compilation.Directory,
-/// Relative to `root_src_directory`. May contain path separators.
-root_src_path: []const u8,
-/// The dependency table of this module. Shared dependencies such as 'std', 'builtin', and 'root'
-/// are not specified in every dependency table, but instead only in the table of `main_pkg`.
-/// `Module.importFile` is responsible for detecting these names and using the correct package.
-table: Table = .{},
-/// Whether to free `root_src_directory` on `destroy`.
-root_src_directory_owned: bool = false,
-
-/// Allocate a Package. No references to the slices passed are kept.
-pub fn create(
- gpa: Allocator,
- /// Null indicates the current working directory
- root_src_dir_path: ?[]const u8,
- /// Relative to root_src_dir_path
- root_src_path: []const u8,
-) !*Package {
- const ptr = try gpa.create(Package);
- errdefer gpa.destroy(ptr);
-
- const owned_dir_path = if (root_src_dir_path) |p| try gpa.dupe(u8, p) else null;
- errdefer if (owned_dir_path) |p| gpa.free(p);
-
- const owned_src_path = try gpa.dupe(u8, root_src_path);
- errdefer gpa.free(owned_src_path);
-
- ptr.* = .{
- .root_src_directory = .{
- .path = owned_dir_path,
- .handle = if (owned_dir_path) |p| try fs.cwd().openDir(p, .{}) else fs.cwd(),
- },
- .root_src_path = owned_src_path,
- .root_src_directory_owned = true,
- };
-
- return ptr;
-}
-
-pub fn createWithDir(
- gpa: Allocator,
- directory: Compilation.Directory,
- /// Relative to `directory`. If null, means `directory` is the root src dir
- /// and is owned externally.
- root_src_dir_path: ?[]const u8,
- /// Relative to root_src_dir_path
- root_src_path: []const u8,
-) !*Package {
- const ptr = try gpa.create(Package);
- errdefer gpa.destroy(ptr);
-
- const owned_src_path = try gpa.dupe(u8, root_src_path);
- errdefer gpa.free(owned_src_path);
-
- if (root_src_dir_path) |p| {
- const owned_dir_path = try directory.join(gpa, &[1][]const u8{p});
- errdefer gpa.free(owned_dir_path);
-
- ptr.* = .{
- .root_src_directory = .{
- .path = owned_dir_path,
- .handle = try directory.handle.openDir(p, .{}),
- },
- .root_src_directory_owned = true,
- .root_src_path = owned_src_path,
- };
- } else {
- ptr.* = .{
- .root_src_directory = directory,
- .root_src_directory_owned = false,
- .root_src_path = owned_src_path,
+pub const Module = @import("Package/Module.zig");
+pub const Fetch = @import("Package/Fetch.zig");
+pub const build_zig_basename = "build.zig";
+pub const Manifest = @import("Package/Manifest.zig");
+
+pub const Path = struct {
+ root_dir: Cache.Directory,
+ /// The path, relative to the root dir, that this `Path` represents.
+ /// Empty string means the root_dir is the path.
+ sub_path: []const u8 = "",
+
+ pub fn clone(p: Path, arena: Allocator) Allocator.Error!Path {
+ return .{
+ .root_dir = try p.root_dir.clone(arena),
+ .sub_path = try arena.dupe(u8, p.sub_path),
};
}
- return ptr;
-}
-/// Free all memory associated with this package. It does not destroy any packages
-/// inside its table; the caller is responsible for calling destroy() on them.
-pub fn destroy(pkg: *Package, gpa: Allocator) void {
- gpa.free(pkg.root_src_path);
-
- if (pkg.root_src_directory_owned) {
- // If root_src_directory.path is null then the handle is the cwd()
- // which shouldn't be closed.
- if (pkg.root_src_directory.path) |p| {
- gpa.free(p);
- pkg.root_src_directory.handle.close();
- }
+ pub fn cwd() Path {
+ return .{ .root_dir = Cache.Directory.cwd() };
}
- pkg.deinitTable(gpa);
- gpa.destroy(pkg);
-}
-
-/// Only frees memory associated with the table.
-pub fn deinitTable(pkg: *Package, gpa: Allocator) void {
- pkg.table.deinit(gpa);
-}
-
-pub fn add(pkg: *Package, gpa: Allocator, name: []const u8, package: *Package) !void {
- try pkg.table.ensureUnusedCapacity(gpa, 1);
- const name_dupe = try gpa.dupe(u8, name);
- pkg.table.putAssumeCapacityNoClobber(name_dupe, package);
-}
-
-/// Compute a readable name for the package. The returned name should be freed from gpa. This
-/// function is very slow, as it traverses the whole package hierarchy to find a path to this
-/// package. It should only be used for error output.
-pub fn getName(target: *const Package, gpa: Allocator, mod: Module) ![]const u8 {
- // we'll do a breadth-first search from the root module to try and find a short name for this
- // module, using a DoublyLinkedList of module/parent pairs. note that the "parent" there is
- // just the first-found shortest path - a module may be children of arbitrarily many other
- // modules. This path may vary between executions due to hashmap iteration order, but that
- // doesn't matter too much.
- var node_arena = std.heap.ArenaAllocator.init(gpa);
- defer node_arena.deinit();
- const Parented = struct {
- parent: ?*const @This(),
- mod: *const Package,
- };
- const Queue = std.DoublyLinkedList(Parented);
- var to_check: Queue = .{};
-
- {
- const new = try node_arena.allocator().create(Queue.Node);
- new.* = .{ .data = .{ .parent = null, .mod = mod.root_pkg } };
- to_check.prepend(new);
- }
-
- if (mod.main_pkg != mod.root_pkg) {
- const new = try node_arena.allocator().create(Queue.Node);
- // TODO: once #12201 is resolved, we may want a way of indicating a different name for this
- new.* = .{ .data = .{ .parent = null, .mod = mod.main_pkg } };
- to_check.prepend(new);
- }
-
- // set of modules we've already checked to prevent loops
- var checked = std.AutoHashMap(*const Package, void).init(gpa);
- defer checked.deinit();
-
- const linked = while (to_check.pop()) |node| {
- const check = &node.data;
-
- if (checked.contains(check.mod)) continue;
- try checked.put(check.mod, {});
-
- if (check.mod == target) break check;
-
- var it = check.mod.table.iterator();
- while (it.next()) |kv| {
- var new = try node_arena.allocator().create(Queue.Node);
- new.* = .{ .data = .{
- .parent = check,
- .mod = kv.value_ptr.*,
- } };
- to_check.prepend(new);
- }
- } else {
- // this can happen for e.g. @cImport packages
- return gpa.dupe(u8, "<unnamed>");
- };
-
- // we found a path to the module! unfortunately, we can only traverse *up* it, so we have to put
- // all the names into a buffer so we can then print them in order.
- var names = std.ArrayList([]const u8).init(gpa);
- defer names.deinit();
-
- var cur: *const Parented = linked;
- while (cur.parent) |parent| : (cur = parent) {
- // find cur's name in parent
- var it = parent.mod.table.iterator();
- const name = while (it.next()) |kv| {
- if (kv.value_ptr.* == cur.mod) {
- break kv.key_ptr.*;
- }
- } else unreachable;
- try names.append(name);
- }
-
- // finally, print the names into a buffer!
- var buf = std.ArrayList(u8).init(gpa);
- defer buf.deinit();
- try buf.writer().writeAll("root");
- var i: usize = names.items.len;
- while (i > 0) {
- i -= 1;
- try buf.writer().print(".{s}", .{names.items[i]});
+ pub fn join(p: Path, arena: Allocator, sub_path: []const u8) Allocator.Error!Path {
+ if (sub_path.len == 0) return p;
+ const parts: []const []const u8 =
+ if (p.sub_path.len == 0) &.{sub_path} else &.{ p.sub_path, sub_path };
+ return .{
+ .root_dir = p.root_dir,
+ .sub_path = try fs.path.join(arena, parts),
+ };
}
- return buf.toOwnedSlice();
-}
-
-pub const build_zig_basename = "build.zig";
-
-/// Fetches a package and all of its dependencies recursively. Writes the
-/// corresponding datastructures for the build runner into `dependencies_source`.
-pub fn fetchAndAddDependencies(
- pkg: *Package,
- deps_pkg: *Package,
- arena: Allocator,
- thread_pool: *ThreadPool,
- http_client: *std.http.Client,
- directory: Compilation.Directory,
- global_cache_directory: Compilation.Directory,
- local_cache_directory: Compilation.Directory,
- dependencies_source: *std.ArrayList(u8),
- error_bundle: *std.zig.ErrorBundle.Wip,
- all_modules: *AllModules,
- root_prog_node: *std.Progress.Node,
- /// null for the root package
- this_hash: ?[]const u8,
-) !void {
- const max_bytes = 10 * 1024 * 1024;
- const gpa = thread_pool.allocator;
- const build_zig_zon_bytes = directory.handle.readFileAllocOptions(
- arena,
- Manifest.basename,
- max_bytes,
- null,
- 1,
- 0,
- ) catch |err| switch (err) {
- error.FileNotFound => {
- // Handle the same as no dependencies.
- if (this_hash) |hash| {
- try dependencies_source.writer().print(
- \\ pub const {} = struct {{
- \\ pub const build_root = "{}";
- \\ pub const build_zig = @import("{}");
- \\ pub const deps: []const struct {{ []const u8, []const u8 }} = &.{{}};
- \\ }};
- \\
- , .{
- std.zig.fmtId(hash),
- std.zig.fmtEscapes(pkg.root_src_directory.path.?),
- std.zig.fmtEscapes(hash),
- });
- } else {
- try dependencies_source.writer().writeAll(
- \\pub const packages = struct {};
- \\pub const root_deps: []const struct { []const u8, []const u8 } = &.{};
- \\
- );
- }
- return;
- },
- else => |e| return e,
- };
-
- var ast = try std.zig.Ast.parse(gpa, build_zig_zon_bytes, .zon);
- defer ast.deinit(gpa);
-
- if (ast.errors.len > 0) {
- const file_path = try directory.join(arena, &.{Manifest.basename});
- try main.putAstErrorsIntoBundle(gpa, ast, file_path, error_bundle);
- return error.PackageFetchFailed;
+ pub fn resolvePosix(p: Path, arena: Allocator, sub_path: []const u8) Allocator.Error!Path {
+ if (sub_path.len == 0) return p;
+ return .{
+ .root_dir = p.root_dir,
+ .sub_path = try fs.path.resolvePosix(arena, &.{ p.sub_path, sub_path }),
+ };
}
- var manifest = try Manifest.parse(gpa, ast);
- defer manifest.deinit(gpa);
-
- if (manifest.errors.len > 0) {
- const file_path = try directory.join(arena, &.{Manifest.basename});
- for (manifest.errors) |msg| {
- const str = try error_bundle.addString(msg.msg);
- try Report.addErrorMessage(&ast, file_path, error_bundle, 0, str, msg.tok, msg.off);
- }
- return error.PackageFetchFailed;
+ pub fn joinString(p: Path, allocator: Allocator, sub_path: []const u8) Allocator.Error![]u8 {
+ const parts: []const []const u8 =
+ if (p.sub_path.len == 0) &.{sub_path} else &.{ p.sub_path, sub_path };
+ return p.root_dir.join(allocator, parts);
}
- const report: Report = .{
- .ast = &ast,
- .directory = directory,
- .error_bundle = error_bundle,
- };
-
- for (manifest.dependencies.values()) |dep| {
- // If the hash is invalid, let errors happen later
- // We only want to add these for progress reporting
- const hash = dep.hash orelse continue;
- if (hash.len != hex_multihash_len) continue;
- const gop = try all_modules.getOrPut(gpa, hash[0..hex_multihash_len].*);
- if (!gop.found_existing) gop.value_ptr.* = null;
+ pub fn joinStringZ(p: Path, allocator: Allocator, sub_path: []const u8) Allocator.Error![:0]u8 {
+ const parts: []const []const u8 =
+ if (p.sub_path.len == 0) &.{sub_path} else &.{ p.sub_path, sub_path };
+ return p.root_dir.joinZ(allocator, parts);
}
- root_prog_node.setEstimatedTotalItems(all_modules.count());
-
- if (this_hash == null) {
- try dependencies_source.writer().writeAll("pub const packages = struct {\n");
+ pub fn openFile(
+ p: Path,
+ sub_path: []const u8,
+ flags: fs.File.OpenFlags,
+ ) !fs.File {
+ var buf: [fs.MAX_PATH_BYTES]u8 = undefined;
+ const joined_path = if (p.sub_path.len == 0) sub_path else p: {
+ break :p std.fmt.bufPrint(&buf, "{s}" ++ fs.path.sep_str ++ "{s}", .{
+ p.sub_path, sub_path,
+ }) catch return error.NameTooLong;
+ };
+ return p.root_dir.handle.openFile(joined_path, flags);
}
- for (manifest.dependencies.keys(), manifest.dependencies.values()) |name, *dep| {
- var fetch_location = try FetchLocation.init(gpa, dep.*, directory, report);
- defer fetch_location.deinit(gpa);
-
- // Directories do not provide a hash in build.zig.zon.
- // Hash the path to the module rather than its contents.
- const sub_mod, const found_existing = if (fetch_location == .directory)
- try getDirectoryModule(gpa, fetch_location, directory, all_modules, dep, report)
- else
- try getCachedPackage(
- gpa,
- global_cache_directory,
- dep.*,
- all_modules,
- root_prog_node,
- ) orelse .{
- try fetchAndUnpack(
- fetch_location,
- thread_pool,
- http_client,
- directory,
- global_cache_directory,
- dep.*,
- report,
- all_modules,
- root_prog_node,
- name,
- ),
- false,
- };
-
- assert(dep.hash != null);
-
- switch (sub_mod) {
- .zig_pkg => |sub_pkg| {
- if (!found_existing) {
- try sub_pkg.fetchAndAddDependencies(
- deps_pkg,
- arena,
- thread_pool,
- http_client,
- sub_pkg.root_src_directory,
- global_cache_directory,
- local_cache_directory,
- dependencies_source,
- error_bundle,
- all_modules,
- root_prog_node,
- dep.hash.?,
- );
- }
-
- try pkg.add(gpa, name, sub_pkg);
- if (deps_pkg.table.get(dep.hash.?)) |other_sub| {
- // This should be the same package (and hence module) since it's the same hash
- // TODO: dedup multiple versions of the same package
- assert(other_sub == sub_pkg);
- } else {
- try deps_pkg.add(gpa, dep.hash.?, sub_pkg);
- }
- },
- .non_zig_pkg => |sub_pkg| {
- if (!found_existing) {
- try dependencies_source.writer().print(
- \\ pub const {} = struct {{
- \\ pub const build_root = "{}";
- \\ pub const deps: []const struct {{ []const u8, []const u8 }} = &.{{}};
- \\ }};
- \\
- , .{
- std.zig.fmtId(dep.hash.?),
- std.zig.fmtEscapes(sub_pkg.root_src_directory.path.?),
- });
- }
- },
- }
+ pub fn makeOpenPath(p: Path, sub_path: []const u8, opts: fs.OpenDirOptions) !fs.Dir {
+ var buf: [fs.MAX_PATH_BYTES]u8 = undefined;
+ const joined_path = if (p.sub_path.len == 0) sub_path else p: {
+ break :p std.fmt.bufPrint(&buf, "{s}" ++ fs.path.sep_str ++ "{s}", .{
+ p.sub_path, sub_path,
+ }) catch return error.NameTooLong;
+ };
+ return p.root_dir.handle.makeOpenPath(joined_path, opts);
}
- if (this_hash) |hash| {
- try dependencies_source.writer().print(
- \\ pub const {} = struct {{
- \\ pub const build_root = "{}";
- \\ pub const build_zig = @import("{}");
- \\ pub const deps: []const struct {{ []const u8, []const u8 }} = &.{{
- \\
- , .{
- std.zig.fmtId(hash),
- std.zig.fmtEscapes(pkg.root_src_directory.path.?),
- std.zig.fmtEscapes(hash),
- });
- for (manifest.dependencies.keys(), manifest.dependencies.values()) |name, dep| {
- try dependencies_source.writer().print(
- " .{{ \"{}\", \"{}\" }},\n",
- .{ std.zig.fmtEscapes(name), std.zig.fmtEscapes(dep.hash.?) },
- );
- }
- try dependencies_source.writer().writeAll(
- \\ };
- \\ };
- \\
- );
- } else {
- try dependencies_source.writer().writeAll(
- \\};
- \\
- \\pub const root_deps: []const struct { []const u8, []const u8 } = &.{
- \\
- );
- for (manifest.dependencies.keys(), manifest.dependencies.values()) |name, dep| {
- try dependencies_source.writer().print(
- " .{{ \"{}\", \"{}\" }},\n",
- .{ std.zig.fmtEscapes(name), std.zig.fmtEscapes(dep.hash.?) },
- );
- }
- try dependencies_source.writer().writeAll("};\n");
+ pub fn statFile(p: Path, sub_path: []const u8) !fs.Dir.Stat {
+ var buf: [fs.MAX_PATH_BYTES]u8 = undefined;
+ const joined_path = if (p.sub_path.len == 0) sub_path else p: {
+ break :p std.fmt.bufPrint(&buf, "{s}" ++ fs.path.sep_str ++ "{s}", .{
+ p.sub_path, sub_path,
+ }) catch return error.NameTooLong;
+ };
+ return p.root_dir.handle.statFile(joined_path);
+ }
+
+ pub fn atomicFile(
+ p: Path,
+ sub_path: []const u8,
+ options: fs.Dir.AtomicFileOptions,
+ ) !fs.AtomicFile {
+ var buf: [fs.MAX_PATH_BYTES]u8 = undefined;
+ const joined_path = if (p.sub_path.len == 0) sub_path else p: {
+ break :p std.fmt.bufPrint(&buf, "{s}" ++ fs.path.sep_str ++ "{s}", .{
+ p.sub_path, sub_path,
+ }) catch return error.NameTooLong;
+ };
+ return p.root_dir.handle.atomicFile(joined_path, options);
}
-}
-pub fn createFilePkg(
- gpa: Allocator,
- cache_directory: Compilation.Directory,
- basename: []const u8,
- contents: []const u8,
-) !*Package {
- const rand_int = std.crypto.random.int(u64);
- const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ Manifest.hex64(rand_int);
- {
- var tmp_dir = try cache_directory.handle.makeOpenPath(tmp_dir_sub_path, .{});
- defer tmp_dir.close();
- try tmp_dir.writeFile(basename, contents);
+ pub fn access(p: Path, sub_path: []const u8, flags: fs.File.OpenFlags) !void {
+ var buf: [fs.MAX_PATH_BYTES]u8 = undefined;
+ const joined_path = if (p.sub_path.len == 0) sub_path else p: {
+ break :p std.fmt.bufPrint(&buf, "{s}" ++ fs.path.sep_str ++ "{s}", .{
+ p.sub_path, sub_path,
+ }) catch return error.NameTooLong;
+ };
+ return p.root_dir.handle.access(joined_path, flags);
}
- var hh: Cache.HashHelper = .{};
- hh.addBytes(build_options.version);
- hh.addBytes(contents);
- const hex_digest = hh.final();
-
- const o_dir_sub_path = "o" ++ fs.path.sep_str ++ hex_digest;
- try renameTmpIntoCache(cache_directory.handle, tmp_dir_sub_path, o_dir_sub_path);
-
- return createWithDir(gpa, cache_directory, o_dir_sub_path, basename);
-}
-
-pub const Report = struct {
- ast: ?*const std.zig.Ast,
- directory: Compilation.Directory,
- error_bundle: *std.zig.ErrorBundle.Wip,
-
- fn fail(
- report: Report,
- tok: std.zig.Ast.TokenIndex,
+ pub fn format(
+ self: Path,
comptime fmt_string: []const u8,
- fmt_args: anytype,
- ) error{ PackageFetchFailed, OutOfMemory } {
- const msg = try report.error_bundle.printString(fmt_string, fmt_args);
- return failMsg(report, tok, msg);
- }
-
- fn failMsg(
- report: Report,
- tok: std.zig.Ast.TokenIndex,
- msg: u32,
- ) error{ PackageFetchFailed, OutOfMemory } {
- const gpa = report.error_bundle.gpa;
-
- const file_path = try report.directory.join(gpa, &.{Manifest.basename});
- defer gpa.free(file_path);
-
- const eb = report.error_bundle;
-
- if (report.ast) |ast| {
- try addErrorMessage(ast, file_path, eb, 0, msg, tok, 0);
- } else {
- try eb.addRootErrorMessage(.{
- .msg = msg,
- .src_loc = .none,
- .notes_len = 0,
- });
- }
-
- return error.PackageFetchFailed;
- }
-
- fn addErrorWithNotes(
- report: Report,
- notes_len: u32,
- msg: Manifest.ErrorMessage,
- ) error{OutOfMemory}!void {
- const eb = report.error_bundle;
- const msg_str = try eb.addString(msg.msg);
- if (report.ast) |ast| {
- const gpa = eb.gpa;
- const file_path = try report.directory.join(gpa, &.{Manifest.basename});
- defer gpa.free(file_path);
- return addErrorMessage(ast, file_path, eb, notes_len, msg_str, msg.tok, msg.off);
- } else {
- return eb.addRootErrorMessage(.{
- .msg = msg_str,
- .src_loc = .none,
- .notes_len = notes_len,
- });
- }
- }
-
- fn addErrorMessage(
- ast: *const std.zig.Ast,
- file_path: []const u8,
- eb: *std.zig.ErrorBundle.Wip,
- notes_len: u32,
- msg_str: u32,
- msg_tok: std.zig.Ast.TokenIndex,
- msg_off: u32,
- ) error{OutOfMemory}!void {
- const token_starts = ast.tokens.items(.start);
- const start_loc = ast.tokenLocation(0, msg_tok);
-
- try eb.addRootErrorMessage(.{
- .msg = msg_str,
- .src_loc = try eb.addSourceLocation(.{
- .src_path = try eb.addString(file_path),
- .span_start = token_starts[msg_tok],
- .span_end = @as(u32, @intCast(token_starts[msg_tok] + ast.tokenSlice(msg_tok).len)),
- .span_main = token_starts[msg_tok] + msg_off,
- .line = @intCast(start_loc.line),
- .column = @as(u32, @intCast(start_loc.column)),
- .source_line = try eb.addString(ast.source[start_loc.line_start..start_loc.line_end]),
- }),
- .notes_len = notes_len,
- });
- }
-};
-
-pub const FetchLocation = union(enum) {
- /// The relative path to a file or directory.
- /// This may be a file that requires unpacking (such as a .tar.gz),
- /// or the path to the root directory of a package.
- file: []const u8,
- directory: []const u8,
- http_request: std.Uri,
- git_request: std.Uri,
-
- pub fn init(
- gpa: Allocator,
- dep: Manifest.Dependency,
- root_dir: Compilation.Directory,
- report: Report,
- ) !FetchLocation {
- switch (dep.location) {
- .url => |url| {
- const uri = std.Uri.parse(url) catch |err| switch (err) {
- error.UnexpectedCharacter => return report.fail(dep.location_tok, "failed to parse dependency location as URI", .{}),
- else => return err,
- };
- return initUri(uri, dep.location_tok, report);
- },
- .path => |path| {
- if (fs.path.isAbsolute(path)) {
- return report.fail(dep.location_tok, "absolute paths are not allowed. Use a relative path instead", .{});
- }
-
- const is_dir = isDirectory(root_dir, path) catch |err| switch (err) {
- error.FileNotFound => return report.fail(dep.location_tok, "file not found: {s}", .{path}),
- else => return err,
- };
-
- return if (is_dir)
- .{ .directory = try gpa.dupe(u8, path) }
- else
- .{ .file = try gpa.dupe(u8, path) };
- },
- }
- }
-
- pub fn initUri(uri: std.Uri, location_tok: std.zig.Ast.TokenIndex, report: Report) !FetchLocation {
- if (ascii.eqlIgnoreCase(uri.scheme, "file")) {
- return report.fail(location_tok, "'file' scheme is not allowed for URLs. Use '.path' instead", .{});
- } else if (ascii.eqlIgnoreCase(uri.scheme, "http") or ascii.eqlIgnoreCase(uri.scheme, "https")) {
- return .{ .http_request = uri };
- } else if (ascii.eqlIgnoreCase(uri.scheme, "git+http") or ascii.eqlIgnoreCase(uri.scheme, "git+https")) {
- return .{ .git_request = uri };
- } else {
- return report.fail(location_tok, "unsupported URL scheme: {s}", .{uri.scheme});
- }
- }
-
- pub fn deinit(f: *FetchLocation, gpa: Allocator) void {
- switch (f.*) {
- .file, .directory => |path| gpa.free(path),
- .http_request, .git_request => {},
- }
- f.* = undefined;
- }
-
- pub fn fetch(
- f: FetchLocation,
- gpa: Allocator,
- root_dir: Compilation.Directory,
- http_client: *std.http.Client,
- dep_location_tok: std.zig.Ast.TokenIndex,
- report: Report,
- ) !ReadableResource {
- switch (f) {
- .file => |file| {
- const owned_path = try gpa.dupe(u8, file);
- errdefer gpa.free(owned_path);
- return .{
- .path = owned_path,
- .resource = .{ .file = try root_dir.handle.openFile(file, .{}) },
- };
- },
- .http_request => |uri| {
- var h = std.http.Headers{ .allocator = gpa };
- defer h.deinit();
-
- var req = try http_client.request(.GET, uri, h, .{});
- errdefer req.deinit();
-
- try req.start(.{});
- try req.wait();
-
- if (req.response.status != .ok) {
- return report.fail(dep_location_tok, "expected response status '200 OK' got '{} {s}'", .{
- @intFromEnum(req.response.status),
- req.response.status.phrase() orelse "",
- });
- }
-
- return .{
- .path = try gpa.dupe(u8, uri.path),
- .resource = .{ .http_request = req },
- };
- },
- .git_request => |uri| {
- var transport_uri = uri;
- transport_uri.scheme = uri.scheme["git+".len..];
- var redirect_uri: []u8 = undefined;
- var session: git.Session = .{ .transport = http_client, .uri = transport_uri };
- session.discoverCapabilities(gpa, &redirect_uri) catch |e| switch (e) {
- error.Redirected => {
- defer gpa.free(redirect_uri);
- return report.fail(dep_location_tok, "repository moved to {s}", .{redirect_uri});
- },
- else => |other| return other,
- };
-
- const want_oid = want_oid: {
- const want_ref = uri.fragment orelse "HEAD";
- if (git.parseOid(want_ref)) |oid| break :want_oid oid else |_| {}
-
- const want_ref_head = try std.fmt.allocPrint(gpa, "refs/heads/{s}", .{want_ref});
- defer gpa.free(want_ref_head);
- const want_ref_tag = try std.fmt.allocPrint(gpa, "refs/tags/{s}", .{want_ref});
- defer gpa.free(want_ref_tag);
-
- var ref_iterator = try session.listRefs(gpa, .{
- .ref_prefixes = &.{ want_ref, want_ref_head, want_ref_tag },
- .include_peeled = true,
- });
- defer ref_iterator.deinit();
- while (try ref_iterator.next()) |ref| {
- if (mem.eql(u8, ref.name, want_ref) or
- mem.eql(u8, ref.name, want_ref_head) or
- mem.eql(u8, ref.name, want_ref_tag))
- {
- break :want_oid ref.peeled orelse ref.oid;
- }
- }
- return report.fail(dep_location_tok, "ref not found: {s}", .{want_ref});
- };
- if (uri.fragment == null) {
- const notes_len = 1;
- try report.addErrorWithNotes(notes_len, .{
- .tok = dep_location_tok,
- .off = 0,
- .msg = "url field is missing an explicit ref",
- });
- const eb = report.error_bundle;
- const notes_start = try eb.reserveNotes(notes_len);
- eb.extra.items[notes_start] = @intFromEnum(try eb.addErrorMessage(.{
- .msg = try eb.printString("try .url = \"{+/}#{}\",", .{ uri, std.fmt.fmtSliceHexLower(&want_oid) }),
- }));
- return error.PackageFetchFailed;
- }
-
- var want_oid_buf: [git.fmt_oid_length]u8 = undefined;
- _ = std.fmt.bufPrint(&want_oid_buf, "{}", .{std.fmt.fmtSliceHexLower(&want_oid)}) catch unreachable;
- var fetch_stream = try session.fetch(gpa, &.{&want_oid_buf});
- errdefer fetch_stream.deinit();
-
- return .{
- .path = try gpa.dupe(u8, &want_oid_buf),
- .resource = .{ .git_fetch_stream = fetch_stream },
- };
- },
- .directory => unreachable, // Directories do not require fetching
- }
- }
-};
-
-pub const ReadableResource = struct {
- path: []const u8,
- resource: union(enum) {
- file: fs.File,
- http_request: std.http.Client.Request,
- git_fetch_stream: git.Session.FetchStream,
- dir: fs.IterableDir,
- },
-
- /// Unpack the package into the global cache directory.
- /// If `ps` does not require unpacking (for example, if it is a directory), then no caching is performed.
- /// In either case, the hash is computed and returned along with the path to the package.
- pub fn unpack(
- rr: *ReadableResource,
- allocator: Allocator,
- thread_pool: *ThreadPool,
- global_cache_directory: Compilation.Directory,
- dep_location_tok: std.zig.Ast.TokenIndex,
- report: Report,
- pkg_prog_node: *std.Progress.Node,
- ) !PackageLocation {
- switch (rr.resource) {
- inline .file, .http_request, .git_fetch_stream, .dir => |*r, tag| {
- const s = fs.path.sep_str;
- const rand_int = std.crypto.random.int(u64);
- const tmp_dir_sub_path = "tmp" ++ s ++ Manifest.hex64(rand_int);
-
- const actual_hash = h: {
- var tmp_directory: Compilation.Directory = d: {
- const path = try global_cache_directory.join(allocator, &.{tmp_dir_sub_path});
- errdefer allocator.free(path);
-
- const iterable_dir = try global_cache_directory.handle.makeOpenPathIterable(tmp_dir_sub_path, .{});
- errdefer iterable_dir.close();
-
- break :d .{
- .path = path,
- .handle = iterable_dir.dir,
- };
- };
- defer tmp_directory.closeAndFree(allocator);
-
- if (tag != .dir) {
- const opt_content_length = try rr.getSize();
-
- var prog_reader: ProgressReader(@TypeOf(r.reader())) = .{
- .child_reader = r.reader(),
- .prog_node = pkg_prog_node,
- .unit = if (opt_content_length) |content_length| unit: {
- const kib = content_length / 1024;
- const mib = kib / 1024;
- if (mib > 0) {
- pkg_prog_node.setEstimatedTotalItems(@intCast(mib));
- pkg_prog_node.setUnit("MiB");
- break :unit .mib;
- } else {
- pkg_prog_node.setEstimatedTotalItems(@intCast(@max(1, kib)));
- pkg_prog_node.setUnit("KiB");
- break :unit .kib;
- }
- } else .any,
- };
-
- switch (try rr.getFileType(dep_location_tok, report)) {
- .tar => try unpackTarball(allocator, prog_reader.reader(), tmp_directory.handle, dep_location_tok, report),
- .@"tar.gz" => try unpackTarballCompressed(allocator, prog_reader, tmp_directory.handle, dep_location_tok, report, std.compress.gzip),
- .@"tar.xz" => try unpackTarballCompressed(allocator, prog_reader, tmp_directory.handle, dep_location_tok, report, std.compress.xz),
- .git_pack => try unpackGitPack(allocator, &prog_reader, git.parseOid(rr.path) catch unreachable, tmp_directory.handle, dep_location_tok, report),
- }
- } else {
- // Recursive directory copy.
- var it = try r.walk(allocator);
- defer it.deinit();
- while (try it.next()) |entry| {
- switch (entry.kind) {
- .directory => try tmp_directory.handle.makePath(entry.path),
- .file => try r.dir.copyFile(
- entry.path,
- tmp_directory.handle,
- entry.path,
- .{},
- ),
- .sym_link => {
- var buf: [fs.MAX_PATH_BYTES]u8 = undefined;
- const link_name = try r.dir.readLink(entry.path, &buf);
- // TODO: if this would create a symlink to outside
- // the destination directory, fail with an error instead.
- try tmp_directory.handle.symLink(link_name, entry.path, .{});
- },
- else => return error.IllegalFileTypeInPackage,
- }
- }
- }
-
- break :h try computePackageHash(thread_pool, .{ .dir = tmp_directory.handle });
- };
-
- const pkg_dir_sub_path = "p" ++ s ++ Manifest.hexDigest(actual_hash);
- const unpacked_path = try global_cache_directory.join(allocator, &.{pkg_dir_sub_path});
- defer allocator.free(unpacked_path);
-
- const relative_unpacked_path = try fs.path.relative(allocator, global_cache_directory.path.?, unpacked_path);
- errdefer allocator.free(relative_unpacked_path);
- try renameTmpIntoCache(global_cache_directory.handle, tmp_dir_sub_path, relative_unpacked_path);
-
- return .{
- .hash = actual_hash,
- .relative_unpacked_path = relative_unpacked_path,
- };
- },
- }
- }
-
- const FileType = enum {
- tar,
- @"tar.gz",
- @"tar.xz",
- git_pack,
- };
-
- pub fn getSize(rr: ReadableResource) !?u64 {
- switch (rr.resource) {
- .file => |f| return (try f.metadata()).size(),
- // TODO: Handle case of chunked content-length
- .http_request => |req| return req.response.content_length,
- .git_fetch_stream => |stream| return stream.request.response.content_length,
- .dir => unreachable,
- }
- }
-
- pub fn getFileType(
- rr: ReadableResource,
- dep_location_tok: std.zig.Ast.TokenIndex,
- report: Report,
- ) !FileType {
- switch (rr.resource) {
- .file => {
- return fileTypeFromPath(rr.path) orelse
- return report.fail(dep_location_tok, "unknown file type", .{});
- },
- .http_request => |req| {
- const content_type = req.response.headers.getFirstValue("Content-Type") orelse
- return report.fail(dep_location_tok, "missing 'Content-Type' header", .{});
-
- // If the response has a different content type than the URI indicates, override
- // the previously assumed file type.
- if (ascii.eqlIgnoreCase(content_type, "application/x-tar")) return .tar;
-
- return if (ascii.eqlIgnoreCase(content_type, "application/gzip") or
- ascii.eqlIgnoreCase(content_type, "application/x-gzip") or
- ascii.eqlIgnoreCase(content_type, "application/tar+gzip"))
- .@"tar.gz"
- else if (ascii.eqlIgnoreCase(content_type, "application/x-xz"))
- .@"tar.xz"
- else if (ascii.eqlIgnoreCase(content_type, "application/octet-stream")) ty: {
- // support gitlab tarball urls such as https://gitlab.com/<namespace>/<project>/-/archive/<sha>/<project>-<sha>.tar.gz
- // whose content-disposition header is: 'attachment; filename="<project>-<sha>.tar.gz"'
- const content_disposition = req.response.headers.getFirstValue("Content-Disposition") orelse
- return report.fail(dep_location_tok, "missing 'Content-Disposition' header for Content-Type=application/octet-stream", .{});
- break :ty getAttachmentType(content_disposition) orelse
- return report.fail(dep_location_tok, "unsupported 'Content-Disposition' header value: '{s}' for Content-Type=application/octet-stream", .{content_disposition});
- } else return report.fail(dep_location_tok, "unrecognized value for 'Content-Type' header: {s}", .{content_type});
- },
- .git_fetch_stream => return .git_pack,
- .dir => unreachable,
- }
- }
-
- fn fileTypeFromPath(file_path: []const u8) ?FileType {
- if (ascii.endsWithIgnoreCase(file_path, ".tar")) return .tar;
- if (ascii.endsWithIgnoreCase(file_path, ".tar.gz")) return .@"tar.gz";
- if (ascii.endsWithIgnoreCase(file_path, ".tar.xz")) return .@"tar.xz";
- return null;
- }
-
- fn getAttachmentType(content_disposition: []const u8) ?FileType {
- const disposition_type_end = ascii.indexOfIgnoreCase(content_disposition, "attachment;") orelse return null;
-
- var value_start = ascii.indexOfIgnoreCasePos(content_disposition, disposition_type_end + 1, "filename") orelse return null;
- value_start += "filename".len;
- if (content_disposition[value_start] == '*') {
- value_start += 1;
- }
- if (content_disposition[value_start] != '=') return null;
- value_start += 1;
-
- var value_end = mem.indexOfPos(u8, content_disposition, value_start, ";") orelse content_disposition.len;
- if (content_disposition[value_end - 1] == '\"') {
- value_end -= 1;
- }
- return fileTypeFromPath(content_disposition[value_start..value_end]);
- }
-
- pub fn deinit(rr: *ReadableResource, gpa: Allocator) void {
- gpa.free(rr.path);
- switch (rr.resource) {
- .file => |file| file.close(),
- .http_request => |*req| req.deinit(),
- .git_fetch_stream => |*stream| stream.deinit(),
- .dir => |*dir| dir.close(),
- }
- rr.* = undefined;
- }
-};
-
-pub const PackageLocation = struct {
- /// For packages that require unpacking, this is the hash of the package contents.
- /// For directories, this is the hash of the absolute file path.
- hash: [Manifest.Hash.digest_length]u8,
- relative_unpacked_path: []const u8,
-
- pub fn deinit(pl: *PackageLocation, allocator: Allocator) void {
- allocator.free(pl.relative_unpacked_path);
- pl.* = undefined;
- }
-};
-
-const hex_multihash_len = 2 * Manifest.multihash_len;
-const MultiHashHexDigest = [hex_multihash_len]u8;
-
-const DependencyModule = union(enum) {
- zig_pkg: *Package,
- non_zig_pkg: *Package,
-};
-/// This is to avoid creating multiple modules for the same build.zig file.
-/// If the value is `null`, the package is a known dependency, but has not yet
-/// been fetched.
-pub const AllModules = std.AutoHashMapUnmanaged(MultiHashHexDigest, ?DependencyModule);
-
-fn ProgressReader(comptime ReaderType: type) type {
- return struct {
- child_reader: ReaderType,
- bytes_read: u64 = 0,
- prog_node: *std.Progress.Node,
- unit: enum {
- kib,
- mib,
- any,
- },
-
- pub const Error = ReaderType.Error;
- pub const Reader = std.io.Reader(*@This(), Error, read);
-
- pub fn read(self: *@This(), buf: []u8) Error!usize {
- const amt = try self.child_reader.read(buf);
- self.bytes_read += amt;
- const kib = self.bytes_read / 1024;
- const mib = kib / 1024;
- switch (self.unit) {
- .kib => self.prog_node.setCompletedItems(@intCast(kib)),
- .mib => self.prog_node.setCompletedItems(@intCast(mib)),
- .any => {
- if (mib > 0) {
- self.prog_node.setUnit("MiB");
- self.prog_node.setCompletedItems(@intCast(mib));
- } else {
- self.prog_node.setUnit("KiB");
- self.prog_node.setCompletedItems(@intCast(kib));
- }
- },
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+ ) !void {
+ if (fmt_string.len == 1) {
+ // Quote-escape the string.
+ const stringEscape = std.zig.fmt.stringEscape;
+ const f = switch (fmt_string[0]) {
+ 'q' => "",
+ '\'' => '\'',
+ else => @compileError("unsupported format string: " ++ fmt_string),
+ };
+ if (self.root_dir.path) |p| {
+ try stringEscape(p, f, options, writer);
+ if (self.sub_path.len > 0) try writer.writeAll(fs.path.sep_str);
}
- self.prog_node.activate();
- return amt;
- }
-
- pub fn reader(self: *@This()) Reader {
- return .{ .context = self };
- }
- };
-}
-
-/// Get a cached package if it exists.
-/// Returns `null` if the package has not been cached
-/// If the package exists in the cache, returns a pointer to the package and a
-/// boolean indicating whether this package has already been seen in the build
-/// (i.e. whether or not its transitive dependencies have been fetched).
-fn getCachedPackage(
- gpa: Allocator,
- global_cache_directory: Compilation.Directory,
- dep: Manifest.Dependency,
- all_modules: *AllModules,
- root_prog_node: *std.Progress.Node,
-) !?struct { DependencyModule, bool } {
- const s = fs.path.sep_str;
- // Check if the expected_hash is already present in the global package
- // cache, and thereby avoid both fetching and unpacking.
- if (dep.hash) |h| {
- const hex_digest = h[0..hex_multihash_len];
- const pkg_dir_sub_path = "p" ++ s ++ hex_digest;
-
- var pkg_dir = global_cache_directory.handle.openDir(pkg_dir_sub_path, .{}) catch |err| switch (err) {
- error.FileNotFound => return null,
- else => |e| return e,
- };
- errdefer pkg_dir.close();
-
- // The compiler has a rule that a file must not be included in multiple modules,
- // so we must detect if a module has been created for this package and reuse it.
- const gop = try all_modules.getOrPut(gpa, hex_digest.*);
- if (gop.found_existing) {
- if (gop.value_ptr.*) |mod| {
- return .{ mod, true };
- }
- }
-
- root_prog_node.completeOne();
-
- const is_zig_mod = if (pkg_dir.access(build_zig_basename, .{})) |_| true else |_| false;
- const basename = if (is_zig_mod) build_zig_basename else "";
- const pkg = try createWithDir(gpa, global_cache_directory, pkg_dir_sub_path, basename);
-
- const module: DependencyModule = if (is_zig_mod)
- .{ .zig_pkg = pkg }
- else
- .{ .non_zig_pkg = pkg };
-
- try all_modules.put(gpa, hex_digest.*, module);
- return .{ module, false };
- }
-
- return null;
-}
-
-fn getDirectoryModule(
- gpa: Allocator,
- fetch_location: FetchLocation,
- directory: Compilation.Directory,
- all_modules: *AllModules,
- dep: *Manifest.Dependency,
- report: Report,
-) !struct { DependencyModule, bool } {
- assert(fetch_location == .directory);
-
- if (dep.hash != null) {
- return report.fail(dep.hash_tok, "hash not allowed for directory package", .{});
- }
-
- const hash = try computePathHash(gpa, directory, fetch_location.directory);
- const hex_digest = Manifest.hexDigest(hash);
- dep.hash = try gpa.dupe(u8, &hex_digest);
-
- // There is no fixed location to check for directory modules.
- // Instead, check whether it is already listed in all_modules.
- if (all_modules.get(hex_digest)) |mod| return .{ mod.?, true };
-
- var pkg_dir = directory.handle.openDir(fetch_location.directory, .{}) catch |err| switch (err) {
- error.FileNotFound => return report.fail(dep.location_tok, "file not found: {s}", .{fetch_location.directory}),
- else => |e| return e,
- };
- defer pkg_dir.close();
-
- const is_zig_mod = if (pkg_dir.access(build_zig_basename, .{})) |_| true else |_| false;
- const basename = if (is_zig_mod) build_zig_basename else "";
-
- const pkg = try createWithDir(gpa, directory, fetch_location.directory, basename);
- const module: DependencyModule = if (is_zig_mod)
- .{ .zig_pkg = pkg }
- else
- .{ .non_zig_pkg = pkg };
-
- try all_modules.put(gpa, hex_digest, module);
- return .{ module, false };
-}
-
-fn fetchAndUnpack(
- fetch_location: FetchLocation,
- thread_pool: *ThreadPool,
- http_client: *std.http.Client,
- directory: Compilation.Directory,
- global_cache_directory: Compilation.Directory,
- dep: Manifest.Dependency,
- report: Report,
- all_modules: *AllModules,
- root_prog_node: *std.Progress.Node,
- /// This does not have to be any form of canonical or fully-qualified name: it
- /// is only intended to be human-readable for progress reporting.
- name_for_prog: []const u8,
-) !DependencyModule {
- assert(fetch_location != .directory);
-
- const gpa = http_client.allocator;
-
- var pkg_prog_node = root_prog_node.start(name_for_prog, 0);
- defer pkg_prog_node.end();
- pkg_prog_node.activate();
-
- var readable_resource = try fetch_location.fetch(gpa, directory, http_client, dep.location_tok, report);
- defer readable_resource.deinit(gpa);
-
- var package_location = try readable_resource.unpack(
- gpa,
- thread_pool,
- global_cache_directory,
- dep.location_tok,
- report,
- &pkg_prog_node,
- );
- defer package_location.deinit(gpa);
-
- const actual_hex = Manifest.hexDigest(package_location.hash);
- if (dep.hash) |h| {
- if (!mem.eql(u8, h, &actual_hex)) {
- return report.fail(dep.hash_tok, "hash mismatch: expected: {s}, found: {s}", .{
- h, actual_hex,
- });
- }
- } else {
- const notes_len = 1;
- try report.addErrorWithNotes(notes_len, .{
- .tok = dep.location_tok,
- .off = 0,
- .msg = "dependency is missing hash field",
- });
- const eb = report.error_bundle;
- const notes_start = try eb.reserveNotes(notes_len);
- eb.extra.items[notes_start] = @intFromEnum(try eb.addErrorMessage(.{
- .msg = try eb.printString("expected .hash = \"{s}\",", .{&actual_hex}),
- }));
- return error.PackageFetchFailed;
- }
-
- const build_zig_path = try fs.path.join(gpa, &.{ package_location.relative_unpacked_path, build_zig_basename });
- defer gpa.free(build_zig_path);
-
- const is_zig_mod = if (global_cache_directory.handle.access(build_zig_path, .{})) |_| true else |_| false;
- const basename = if (is_zig_mod) build_zig_basename else "";
- const pkg = try createWithDir(gpa, global_cache_directory, package_location.relative_unpacked_path, basename);
- const module: DependencyModule = if (is_zig_mod)
- .{ .zig_pkg = pkg }
- else
- .{ .non_zig_pkg = pkg };
-
- try all_modules.put(gpa, actual_hex, module);
- return module;
-}
-
-fn unpackTarballCompressed(
- gpa: Allocator,
- reader: anytype,
- out_dir: fs.Dir,
- dep_location_tok: std.zig.Ast.TokenIndex,
- report: Report,
- comptime Compression: type,
-) !void {
- var br = std.io.bufferedReaderSize(std.crypto.tls.max_ciphertext_record_len, reader);
-
- var decompress = try Compression.decompress(gpa, br.reader());
- defer decompress.deinit();
-
- return unpackTarball(gpa, decompress.reader(), out_dir, dep_location_tok, report);
-}
-
-fn unpackTarball(
- gpa: Allocator,
- reader: anytype,
- out_dir: fs.Dir,
- dep_location_tok: std.zig.Ast.TokenIndex,
- report: Report,
-) !void {
- var diagnostics: std.tar.Options.Diagnostics = .{ .allocator = gpa };
- defer diagnostics.deinit();
-
- try std.tar.pipeToFileSystem(out_dir, reader, .{
- .diagnostics = &diagnostics,
- .strip_components = 1,
- // TODO: we would like to set this to executable_bit_only, but two
- // things need to happen before that:
- // 1. the tar implementation needs to support it
- // 2. the hashing algorithm here needs to support detecting the is_executable
- // bit on Windows from the ACLs (see the isExecutable function).
- .mode_mode = .ignore,
- });
-
- if (diagnostics.errors.items.len > 0) {
- const notes_len: u32 = @intCast(diagnostics.errors.items.len);
- try report.addErrorWithNotes(notes_len, .{
- .tok = dep_location_tok,
- .off = 0,
- .msg = "unable to unpack tarball",
- });
- const eb = report.error_bundle;
- const notes_start = try eb.reserveNotes(notes_len);
- for (diagnostics.errors.items, notes_start..) |item, note_i| {
- switch (item) {
- .unable_to_create_sym_link => |info| {
- eb.extra.items[note_i] = @intFromEnum(try eb.addErrorMessage(.{
- .msg = try eb.printString("unable to create symlink from '{s}' to '{s}': {s}", .{
- info.file_name, info.link_name, @errorName(info.code),
- }),
- }));
- },
- .unsupported_file_type => |info| {
- eb.extra.items[note_i] = @intFromEnum(try eb.addErrorMessage(.{
- .msg = try eb.printString("file '{s}' has unsupported type '{c}'", .{
- info.file_name, @intFromEnum(info.file_type),
- }),
- }));
- },
+ if (self.sub_path.len > 0) {
+ try stringEscape(self.sub_path, f, options, writer);
}
+ return;
}
- return error.InvalidTarball;
- }
-}
-
-fn unpackGitPack(
- gpa: Allocator,
- reader: anytype,
- want_oid: git.Oid,
- out_dir: fs.Dir,
- dep_location_tok: std.zig.Ast.TokenIndex,
- report: Report,
-) !void {
- // The .git directory is used to store the packfile and associated index, but
- // we do not attempt to replicate the exact structure of a real .git
- // directory, since that isn't relevant for fetching a package.
- {
- var pack_dir = try out_dir.makeOpenPath(".git", .{});
- defer pack_dir.close();
- var pack_file = try pack_dir.createFile("pkg.pack", .{ .read = true });
- defer pack_file.close();
- var fifo = std.fifo.LinearFifo(u8, .{ .Static = 4096 }).init();
- try fifo.pump(reader.reader(), pack_file.writer());
- try pack_file.sync();
-
- var index_file = try pack_dir.createFile("pkg.idx", .{ .read = true });
- defer index_file.close();
- {
- var index_prog_node = reader.prog_node.start("Index pack", 0);
- defer index_prog_node.end();
- index_prog_node.activate();
- var index_buffered_writer = std.io.bufferedWriter(index_file.writer());
- try git.indexPack(gpa, pack_file, index_buffered_writer.writer());
- try index_buffered_writer.flush();
- try index_file.sync();
+ if (fmt_string.len > 0)
+ std.fmt.invalidFmtError(fmt_string, self);
+ if (self.root_dir.path) |p| {
+ try writer.writeAll(p);
+ try writer.writeAll(fs.path.sep_str);
}
-
- {
- var checkout_prog_node = reader.prog_node.start("Checkout", 0);
- defer checkout_prog_node.end();
- checkout_prog_node.activate();
- var repository = try git.Repository.init(gpa, pack_file, index_file);
- defer repository.deinit();
- var diagnostics: git.Diagnostics = .{ .allocator = gpa };
- defer diagnostics.deinit();
- try repository.checkout(out_dir, want_oid, &diagnostics);
-
- if (diagnostics.errors.items.len > 0) {
- const notes_len: u32 = @intCast(diagnostics.errors.items.len);
- try report.addErrorWithNotes(notes_len, .{
- .tok = dep_location_tok,
- .off = 0,
- .msg = "unable to unpack packfile",
- });
- const eb = report.error_bundle;
- const notes_start = try eb.reserveNotes(notes_len);
- for (diagnostics.errors.items, notes_start..) |item, note_i| {
- switch (item) {
- .unable_to_create_sym_link => |info| {
- eb.extra.items[note_i] = @intFromEnum(try eb.addErrorMessage(.{
- .msg = try eb.printString("unable to create symlink from '{s}' to '{s}': {s}", .{
- info.file_name, info.link_name, @errorName(info.code),
- }),
- }));
- },
- }
- }
- return error.InvalidGitPack;
- }
+ if (self.sub_path.len > 0) {
+ try writer.writeAll(self.sub_path);
+ try writer.writeAll(fs.path.sep_str);
}
}
+};
- try out_dir.deleteTree(".git");
-}
-
-/// Compute the hash of a file path.
-fn computePathHash(gpa: Allocator, dir: Compilation.Directory, path: []const u8) ![Manifest.Hash.digest_length]u8 {
- const resolved_path = try std.fs.path.resolve(gpa, &.{ dir.path.?, path });
- defer gpa.free(resolved_path);
- var hasher = Manifest.Hash.init(.{});
- hasher.update(resolved_path);
- return hasher.finalResult();
-}
-
-fn isDirectory(root_dir: Compilation.Directory, path: []const u8) !bool {
- var dir = root_dir.handle.openDir(path, .{}) catch |err| switch (err) {
- error.NotDir => return false,
- else => return err,
- };
- defer dir.close();
- return true;
-}
-
-fn renameTmpIntoCache(
- cache_dir: fs.Dir,
- tmp_dir_sub_path: []const u8,
- dest_dir_sub_path: []const u8,
-) !void {
- assert(dest_dir_sub_path[1] == fs.path.sep);
- var handled_missing_dir = false;
- while (true) {
- cache_dir.rename(tmp_dir_sub_path, dest_dir_sub_path) catch |err| switch (err) {
- error.FileNotFound => {
- if (handled_missing_dir) return err;
- cache_dir.makeDir(dest_dir_sub_path[0..1]) catch |mkd_err| switch (mkd_err) {
- error.PathAlreadyExists => handled_missing_dir = true,
- else => |e| return e,
- };
- continue;
- },
- error.PathAlreadyExists, error.AccessDenied => {
- // Package has been already downloaded and may already be in use on the system.
- cache_dir.deleteTree(tmp_dir_sub_path) catch |del_err| {
- std.log.warn("unable to delete temp directory: {s}", .{@errorName(del_err)});
- };
- },
- else => |e| return e,
- };
- break;
- }
-}
-
-test "getAttachmentType" {
- try std.testing.expectEqual(@as(?ReadableResource.FileType, .@"tar.gz"), ReadableResource.getAttachmentType("attaChment; FILENAME=\"stuff.tar.gz\"; size=42"));
- try std.testing.expectEqual(@as(?ReadableResource.FileType, .@"tar.gz"), ReadableResource.getAttachmentType("attachment; filename*=\"stuff.tar.gz\""));
- try std.testing.expectEqual(@as(?ReadableResource.FileType, .@"tar.xz"), ReadableResource.getAttachmentType("ATTACHMENT; filename=\"stuff.tar.xz\""));
- try std.testing.expectEqual(@as(?ReadableResource.FileType, .@"tar.xz"), ReadableResource.getAttachmentType("attachment; FileName=\"stuff.tar.xz\""));
- try std.testing.expectEqual(@as(?ReadableResource.FileType, .@"tar.gz"), ReadableResource.getAttachmentType("attachment; FileName*=UTF-8\'\'xyz%2Fstuff.tar.gz"));
-
- try std.testing.expect(ReadableResource.getAttachmentType("attachment FileName=\"stuff.tar.gz\"") == null);
- try std.testing.expect(ReadableResource.getAttachmentType("attachment; FileName=\"stuff.tar\"") == null);
- try std.testing.expect(ReadableResource.getAttachmentType("attachment; FileName\"stuff.gz\"") == null);
- try std.testing.expect(ReadableResource.getAttachmentType("attachment; size=42") == null);
- try std.testing.expect(ReadableResource.getAttachmentType("inline; size=42") == null);
- try std.testing.expect(ReadableResource.getAttachmentType("FileName=\"stuff.tar.gz\"; attachment;") == null);
- try std.testing.expect(ReadableResource.getAttachmentType("FileName=\"stuff.tar.gz\";") == null);
-}
+const Package = @This();
+const builtin = @import("builtin");
+const std = @import("std");
+const fs = std.fs;
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+const Cache = std.Build.Cache;
diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig
new file mode 100644
index 0000000000..3aae5a533f
--- /dev/null
+++ b/src/Package/Fetch.zig
@@ -0,0 +1,1557 @@
+//! Represents one independent job whose responsibility is to:
+//!
+//! 1. Check the global zig package cache to see if the hash already exists.
+//! If so, load, parse, and validate the build.zig.zon file therein, and
+//! goto step 8. Likewise if the location is a relative path, treat this
+//! the same as a cache hit. Otherwise, proceed.
+//! 2. Fetch and unpack a URL into a temporary directory.
+//! 3. Load, parse, and validate the build.zig.zon file therein. It is allowed
+//! for the file to be missing, in which case this fetched package is considered
+//! to be a "naked" package.
+//! 4. Apply inclusion rules of the build.zig.zon to the temporary directory by
+//! deleting excluded files. If any files had errors for files that were
+//! ultimately excluded, those errors should be ignored, such as failure to
+//! create symlinks that weren't supposed to be included anyway.
+//! 5. Compute the package hash based on the remaining files in the temporary
+//! directory.
+//! 6. Rename the temporary directory into the global zig package cache
+//! directory. If the hash already exists, delete the temporary directory and
+//! leave the zig package cache directory untouched as it may be in use by the
+//! system. This is done even if the hash is invalid, in case the package with
+//! the different hash is used in the future.
+//! 7. Validate the computed hash against the expected hash. If invalid,
+//! this job is done.
+//! 8. Spawn a new fetch job for each dependency in the manifest file. Use
+//! a mutex and a hash map so that redundant jobs do not get queued up.
+//!
+//! All of this must be done with only referring to the state inside this struct
+//! because this work will be done in a dedicated thread.
+
+arena: std.heap.ArenaAllocator,
+location: Location,
+location_tok: std.zig.Ast.TokenIndex,
+hash_tok: std.zig.Ast.TokenIndex,
+parent_package_root: Package.Path,
+parent_manifest_ast: ?*const std.zig.Ast,
+prog_node: *std.Progress.Node,
+job_queue: *JobQueue,
+/// If true, don't add an error for a missing hash. This flag is not passed
+/// down to recursive dependencies. It's intended to be used only be the CLI.
+omit_missing_hash_error: bool,
+/// If true, don't fail when a manifest file is missing the `paths` field,
+/// which specifies inclusion rules. This is intended to be true for the first
+/// fetch task and false for the recursive dependencies.
+allow_missing_paths_field: bool,
+
+// Above this are fields provided as inputs to `run`.
+// Below this are fields populated by `run`.
+
+/// This will either be relative to `global_cache`, or to the build root of
+/// the root package.
+package_root: Package.Path,
+error_bundle: ErrorBundle.Wip,
+manifest: ?Manifest,
+manifest_ast: std.zig.Ast,
+actual_hash: Manifest.Digest,
+/// Fetch logic notices whether a package has a build.zig file and sets this flag.
+has_build_zig: bool,
+/// Indicates whether the task aborted due to an out-of-memory condition.
+oom_flag: bool,
+
+// This field is used by the CLI only, untouched by this file.
+
+/// The module for this `Fetch` tasks's package, which exposes `build.zig` as
+/// the root source file.
+module: ?*Package.Module,
+
+/// Contains shared state among all `Fetch` tasks.
+pub const JobQueue = struct {
+ mutex: std.Thread.Mutex = .{},
+ /// It's an array hash map so that it can be sorted before rendering the
+ /// dependencies.zig source file.
+ /// Protected by `mutex`.
+ table: Table = .{},
+ /// `table` may be missing some tasks such as ones that failed, so this
+ /// field contains references to all of them.
+ /// Protected by `mutex`.
+ all_fetches: std.ArrayListUnmanaged(*Fetch) = .{},
+
+ http_client: *std.http.Client,
+ thread_pool: *ThreadPool,
+ wait_group: WaitGroup = .{},
+ global_cache: Cache.Directory,
+ recursive: bool,
+ work_around_btrfs_bug: bool,
+
+ pub const Table = std.AutoArrayHashMapUnmanaged(Manifest.MultiHashHexDigest, *Fetch);
+
+ pub fn deinit(jq: *JobQueue) void {
+ if (jq.all_fetches.items.len == 0) return;
+ const gpa = jq.all_fetches.items[0].arena.child_allocator;
+ jq.table.deinit(gpa);
+ // These must be deinitialized in reverse order because subsequent
+ // `Fetch` instances are allocated in prior ones' arenas.
+ // Sorry, I know it's a bit weird, but it slightly simplifies the
+ // critical section.
+ while (jq.all_fetches.popOrNull()) |f| f.deinit();
+ jq.all_fetches.deinit(gpa);
+ jq.* = undefined;
+ }
+
+ /// Dumps all subsequent error bundles into the first one.
+ pub fn consolidateErrors(jq: *JobQueue) !void {
+ const root = &jq.all_fetches.items[0].error_bundle;
+ const gpa = root.gpa;
+ for (jq.all_fetches.items[1..]) |fetch| {
+ if (fetch.error_bundle.root_list.items.len > 0) {
+ var bundle = try fetch.error_bundle.toOwnedBundle("");
+ defer bundle.deinit(gpa);
+ try root.addBundleAsRoots(bundle);
+ }
+ }
+ }
+
+ /// Creates the dependencies.zig source code for the build runner to obtain
+ /// via `@import("@dependencies")`.
+ pub fn createDependenciesSource(jq: *JobQueue, buf: *std.ArrayList(u8)) Allocator.Error!void {
+ const keys = jq.table.keys();
+
+ assert(keys.len != 0); // caller should have added the first one
+ if (keys.len == 1) {
+ // This is the first one. It must have no dependencies.
+ return createEmptyDependenciesSource(buf);
+ }
+
+ try buf.appendSlice("pub const packages = struct {\n");
+
+ // Ensure the generated .zig file is deterministic.
+ jq.table.sortUnstable(@as(struct {
+ keys: []const Manifest.MultiHashHexDigest,
+ pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool {
+ return std.mem.lessThan(u8, &ctx.keys[a_index], &ctx.keys[b_index]);
+ }
+ }, .{ .keys = keys }));
+
+ for (keys, jq.table.values()) |hash, fetch| {
+ if (fetch == jq.all_fetches.items[0]) {
+ // The first one is a dummy package for the current project.
+ continue;
+ }
+ try buf.writer().print(
+ \\ pub const {} = struct {{
+ \\ pub const build_root = "{q}";
+ \\
+ , .{ std.zig.fmtId(&hash), fetch.package_root });
+
+ if (fetch.has_build_zig) {
+ try buf.writer().print(
+ \\ pub const build_zig = @import("{}");
+ \\
+ , .{std.zig.fmtEscapes(&hash)});
+ }
+
+ if (fetch.manifest) |*manifest| {
+ try buf.appendSlice(
+ \\ pub const deps: []const struct { []const u8, []const u8 } = &.{
+ \\
+ );
+ for (manifest.dependencies.keys(), manifest.dependencies.values()) |name, dep| {
+ const h = depDigest(fetch.package_root, jq.global_cache, dep) orelse continue;
+ try buf.writer().print(
+ " .{{ \"{}\", \"{}\" }},\n",
+ .{ std.zig.fmtEscapes(name), std.zig.fmtEscapes(&h) },
+ );
+ }
+
+ try buf.appendSlice(
+ \\ };
+ \\ };
+ \\
+ );
+ } else {
+ try buf.appendSlice(
+ \\ pub const deps: []const struct { []const u8, []const u8 } = &.{};
+ \\ };
+ \\
+ );
+ }
+ }
+
+ try buf.appendSlice(
+ \\};
+ \\
+ \\pub const root_deps: []const struct { []const u8, []const u8 } = &.{
+ \\
+ );
+
+ const root_fetch = jq.all_fetches.items[0];
+ const root_manifest = &root_fetch.manifest.?;
+
+ for (root_manifest.dependencies.keys(), root_manifest.dependencies.values()) |name, dep| {
+ const h = depDigest(root_fetch.package_root, jq.global_cache, dep) orelse continue;
+ try buf.writer().print(
+ " .{{ \"{}\", \"{}\" }},\n",
+ .{ std.zig.fmtEscapes(name), std.zig.fmtEscapes(&h) },
+ );
+ }
+ try buf.appendSlice("};\n");
+ }
+
+ pub fn createEmptyDependenciesSource(buf: *std.ArrayList(u8)) Allocator.Error!void {
+ try buf.appendSlice(
+ \\pub const packages = struct {};
+ \\pub const root_deps: []const struct { []const u8, []const u8 } = &.{};
+ \\
+ );
+ }
+};
+
+pub const Location = union(enum) {
+ remote: Remote,
+ /// A directory found inside the parent package.
+ relative_path: Package.Path,
+ /// Recursive Fetch tasks will never use this Location, but it may be
+ /// passed in by the CLI. Indicates the file contents here should be copied
+ /// into the global package cache. It may be a file relative to the cwd or
+ /// absolute, in which case it should be treated exactly like a `file://`
+ /// URL, or a directory, in which case it should be treated as an
+ /// already-unpacked directory (but still needs to be copied into the
+ /// global package cache and have inclusion rules applied).
+ path_or_url: []const u8,
+
+ pub const Remote = struct {
+ url: []const u8,
+ /// If this is null it means the user omitted the hash field from a dependency.
+ /// It will be an error but the logic should still fetch and print the discovered hash.
+ hash: ?Manifest.MultiHashHexDigest,
+ };
+};
+
+pub const RunError = error{
+ OutOfMemory,
+ /// This error code is intended to be handled by inspecting the
+ /// `error_bundle` field.
+ FetchFailed,
+};
+
+pub fn run(f: *Fetch) RunError!void {
+ const eb = &f.error_bundle;
+ const arena = f.arena.allocator();
+ const gpa = f.arena.child_allocator;
+ const cache_root = f.job_queue.global_cache;
+
+ try eb.init(gpa);
+
+ // Check the global zig package cache to see if the hash already exists. If
+ // so, load, parse, and validate the build.zig.zon file therein, and skip
+ // ahead to queuing up jobs for dependencies. Likewise if the location is a
+ // relative path, treat this the same as a cache hit. Otherwise, proceed.
+
+ const remote = switch (f.location) {
+ .relative_path => |pkg_root| {
+ if (fs.path.isAbsolute(pkg_root.sub_path)) return f.fail(
+ f.location_tok,
+ try eb.addString("expected path relative to build root; found absolute path"),
+ );
+ if (f.hash_tok != 0) return f.fail(
+ f.hash_tok,
+ try eb.addString("path-based dependencies are not hashed"),
+ );
+ if (std.mem.startsWith(u8, pkg_root.sub_path, "../") or
+ std.mem.eql(u8, pkg_root.sub_path, ".."))
+ {
+ return f.fail(
+ f.location_tok,
+ try eb.printString("dependency path outside project: '{}{s}'", .{
+ pkg_root.root_dir, pkg_root.sub_path,
+ }),
+ );
+ }
+ f.package_root = pkg_root;
+ try loadManifest(f, pkg_root);
+ if (!f.has_build_zig) try checkBuildFileExistence(f);
+ if (!f.job_queue.recursive) return;
+ return queueJobsForDeps(f);
+ },
+ .remote => |remote| remote,
+ .path_or_url => |path_or_url| {
+ if (fs.cwd().openIterableDir(path_or_url, .{})) |dir| {
+ var resource: Resource = .{ .dir = dir };
+ return runResource(f, path_or_url, &resource, null);
+ } else |dir_err| {
+ const file_err = if (dir_err == error.NotDir) e: {
+ if (fs.cwd().openFile(path_or_url, .{})) |file| {
+ var resource: Resource = .{ .file = file };
+ return runResource(f, path_or_url, &resource, null);
+ } else |err| break :e err;
+ } else dir_err;
+
+ const uri = std.Uri.parse(path_or_url) catch |uri_err| {
+ return f.fail(0, try eb.printString(
+ "'{s}' could not be recognized as a file path ({s}) or an URL ({s})",
+ .{ path_or_url, @errorName(file_err), @errorName(uri_err) },
+ ));
+ };
+ var resource = try f.initResource(uri);
+ return runResource(f, uri.path, &resource, null);
+ }
+ },
+ };
+
+ const s = fs.path.sep_str;
+ if (remote.hash) |expected_hash| {
+ const pkg_sub_path = "p" ++ s ++ expected_hash;
+ if (cache_root.handle.access(pkg_sub_path, .{})) |_| {
+ f.package_root = .{
+ .root_dir = cache_root,
+ .sub_path = try arena.dupe(u8, pkg_sub_path),
+ };
+ try loadManifest(f, f.package_root);
+ try checkBuildFileExistence(f);
+ if (!f.job_queue.recursive) return;
+ return queueJobsForDeps(f);
+ } else |err| switch (err) {
+ error.FileNotFound => {},
+ else => |e| {
+ try eb.addRootErrorMessage(.{
+ .msg = try eb.printString("unable to open global package cache directory '{}{s}': {s}", .{
+ cache_root, pkg_sub_path, @errorName(e),
+ }),
+ });
+ return error.FetchFailed;
+ },
+ }
+ }
+
+ // Fetch and unpack the remote into a temporary directory.
+
+ const uri = std.Uri.parse(remote.url) catch |err| return f.fail(
+ f.location_tok,
+ try eb.printString("invalid URI: {s}", .{@errorName(err)}),
+ );
+ var resource = try f.initResource(uri);
+ return runResource(f, uri.path, &resource, remote.hash);
+}
+
+pub fn deinit(f: *Fetch) void {
+ f.error_bundle.deinit();
+ f.arena.deinit();
+}
+
+/// Consumes `resource`, even if an error is returned.
+fn runResource(
+ f: *Fetch,
+ uri_path: []const u8,
+ resource: *Resource,
+ remote_hash: ?Manifest.MultiHashHexDigest,
+) RunError!void {
+ defer resource.deinit();
+ const arena = f.arena.allocator();
+ const eb = &f.error_bundle;
+ const s = fs.path.sep_str;
+ const cache_root = f.job_queue.global_cache;
+ const rand_int = std.crypto.random.int(u64);
+ const tmp_dir_sub_path = "tmp" ++ s ++ Manifest.hex64(rand_int);
+
+ const tmp_directory_path = try cache_root.join(arena, &.{tmp_dir_sub_path});
+ var tmp_directory: Cache.Directory = .{
+ .path = tmp_directory_path,
+ .handle = handle: {
+ const dir = cache_root.handle.makeOpenPathIterable(tmp_dir_sub_path, .{}) catch |err| {
+ try eb.addRootErrorMessage(.{
+ .msg = try eb.printString("unable to create temporary directory '{s}': {s}", .{
+ tmp_directory_path, @errorName(err),
+ }),
+ });
+ return error.FetchFailed;
+ };
+ break :handle dir.dir;
+ },
+ };
+ defer tmp_directory.handle.close();
+
+ try unpackResource(f, resource, uri_path, tmp_directory);
+
+ // Load, parse, and validate the unpacked build.zig.zon file. It is allowed
+ // for the file to be missing, in which case this fetched package is
+ // considered to be a "naked" package.
+ try loadManifest(f, .{ .root_dir = tmp_directory });
+
+ // Apply the manifest's inclusion rules to the temporary directory by
+ // deleting excluded files. If any error occurred for files that were
+ // ultimately excluded, those errors should be ignored, such as failure to
+ // create symlinks that weren't supposed to be included anyway.
+
+ // Empty directories have already been omitted by `unpackResource`.
+
+ const filter: Filter = .{
+ .include_paths = if (f.manifest) |m| m.paths else .{},
+ };
+
+ // Compute the package hash based on the remaining files in the temporary
+ // directory.
+
+ if (builtin.os.tag == .linux and f.job_queue.work_around_btrfs_bug) {
+ // https://github.com/ziglang/zig/issues/17095
+ tmp_directory.handle.close();
+ const iterable_dir = cache_root.handle.makeOpenPathIterable(tmp_dir_sub_path, .{}) catch
+ @panic("btrfs workaround failed");
+ tmp_directory.handle = iterable_dir.dir;
+ }
+
+ f.actual_hash = try computeHash(f, tmp_directory, filter);
+
+ // Rename the temporary directory into the global zig package cache
+ // directory. If the hash already exists, delete the temporary directory
+ // and leave the zig package cache directory untouched as it may be in use
+ // by the system. This is done even if the hash is invalid, in case the
+ // package with the different hash is used in the future.
+
+ f.package_root = .{
+ .root_dir = cache_root,
+ .sub_path = try arena.dupe(u8, "p" ++ s ++ Manifest.hexDigest(f.actual_hash)),
+ };
+ renameTmpIntoCache(cache_root.handle, tmp_dir_sub_path, f.package_root.sub_path) catch |err| {
+ const src = try cache_root.join(arena, &.{tmp_dir_sub_path});
+ const dest = try cache_root.join(arena, &.{f.package_root.sub_path});
+ try eb.addRootErrorMessage(.{ .msg = try eb.printString(
+ "unable to rename temporary directory '{s}' into package cache directory '{s}': {s}",
+ .{ src, dest, @errorName(err) },
+ ) });
+ return error.FetchFailed;
+ };
+
+ // Validate the computed hash against the expected hash. If invalid, this
+ // job is done.
+
+ const actual_hex = Manifest.hexDigest(f.actual_hash);
+ if (remote_hash) |declared_hash| {
+ if (!std.mem.eql(u8, &declared_hash, &actual_hex)) {
+ return f.fail(f.hash_tok, try eb.printString(
+ "hash mismatch: manifest declares {s} but the fetched package has {s}",
+ .{ declared_hash, actual_hex },
+ ));
+ }
+ } else if (!f.omit_missing_hash_error) {
+ const notes_len = 1;
+ try eb.addRootErrorMessage(.{
+ .msg = try eb.addString("dependency is missing hash field"),
+ .src_loc = try f.srcLoc(f.location_tok),
+ .notes_len = notes_len,
+ });
+ const notes_start = try eb.reserveNotes(notes_len);
+ eb.extra.items[notes_start] = @intFromEnum(try eb.addErrorMessage(.{
+ .msg = try eb.printString("expected .hash = \"{s}\",", .{&actual_hex}),
+ }));
+ return error.FetchFailed;
+ }
+
+ // Spawn a new fetch job for each dependency in the manifest file. Use
+ // a mutex and a hash map so that redundant jobs do not get queued up.
+ if (!f.job_queue.recursive) return;
+ return queueJobsForDeps(f);
+}
+
+/// `computeHash` gets a free check for the existence of `build.zig`, but when
+/// not computing a hash, we need to do a syscall to check for it.
+fn checkBuildFileExistence(f: *Fetch) RunError!void {
+ const eb = &f.error_bundle;
+ if (f.package_root.access(Package.build_zig_basename, .{})) |_| {
+ f.has_build_zig = true;
+ } else |err| switch (err) {
+ error.FileNotFound => {},
+ else => |e| {
+ try eb.addRootErrorMessage(.{
+ .msg = try eb.printString("unable to access '{}{s}': {s}", .{
+ f.package_root, Package.build_zig_basename, @errorName(e),
+ }),
+ });
+ return error.FetchFailed;
+ },
+ }
+}
+
+/// This function populates `f.manifest` or leaves it `null`.
+fn loadManifest(f: *Fetch, pkg_root: Package.Path) RunError!void {
+ const eb = &f.error_bundle;
+ const arena = f.arena.allocator();
+ const manifest_bytes = pkg_root.root_dir.handle.readFileAllocOptions(
+ arena,
+ try fs.path.join(arena, &.{ pkg_root.sub_path, Manifest.basename }),
+ Manifest.max_bytes,
+ null,
+ 1,
+ 0,
+ ) catch |err| switch (err) {
+ error.FileNotFound => return,
+ else => |e| {
+ const file_path = try pkg_root.join(arena, Manifest.basename);
+ try eb.addRootErrorMessage(.{
+ .msg = try eb.printString("unable to load package manifest '{}': {s}", .{
+ file_path, @errorName(e),
+ }),
+ });
+ return error.FetchFailed;
+ },
+ };
+
+ const ast = &f.manifest_ast;
+ ast.* = try std.zig.Ast.parse(arena, manifest_bytes, .zon);
+
+ if (ast.errors.len > 0) {
+ const file_path = try std.fmt.allocPrint(arena, "{}" ++ Manifest.basename, .{pkg_root});
+ try main.putAstErrorsIntoBundle(arena, ast.*, file_path, eb);
+ return error.FetchFailed;
+ }
+
+ f.manifest = try Manifest.parse(arena, ast.*, .{
+ .allow_missing_paths_field = f.allow_missing_paths_field,
+ });
+ const manifest = &f.manifest.?;
+
+ if (manifest.errors.len > 0) {
+ const src_path = try eb.printString("{}{s}", .{ pkg_root, Manifest.basename });
+ const token_starts = ast.tokens.items(.start);
+
+ for (manifest.errors) |msg| {
+ const start_loc = ast.tokenLocation(0, msg.tok);
+
+ try eb.addRootErrorMessage(.{
+ .msg = try eb.addString(msg.msg),
+ .src_loc = try eb.addSourceLocation(.{
+ .src_path = src_path,
+ .span_start = token_starts[msg.tok],
+ .span_end = @intCast(token_starts[msg.tok] + ast.tokenSlice(msg.tok).len),
+ .span_main = token_starts[msg.tok] + msg.off,
+ .line = @intCast(start_loc.line),
+ .column = @intCast(start_loc.column),
+ .source_line = try eb.addString(ast.source[start_loc.line_start..start_loc.line_end]),
+ }),
+ });
+ }
+ return error.FetchFailed;
+ }
+}
+
+fn queueJobsForDeps(f: *Fetch) RunError!void {
+ assert(f.job_queue.recursive);
+
+ // If the package does not have a build.zig.zon file then there are no dependencies.
+ const manifest = f.manifest orelse return;
+
+ const new_fetches, const prog_names = nf: {
+ const parent_arena = f.arena.allocator();
+ const gpa = f.arena.child_allocator;
+ const cache_root = f.job_queue.global_cache;
+ const dep_names = manifest.dependencies.keys();
+ const deps = manifest.dependencies.values();
+ // Grab the new tasks into a temporary buffer so we can unlock that mutex
+ // as fast as possible.
+ // This overallocates any fetches that get skipped by the `continue` in the
+ // loop below.
+ const new_fetches = try parent_arena.alloc(Fetch, deps.len);
+ const prog_names = try parent_arena.alloc([]const u8, deps.len);
+ var new_fetch_index: usize = 0;
+
+ f.job_queue.mutex.lock();
+ defer f.job_queue.mutex.unlock();
+
+ try f.job_queue.all_fetches.ensureUnusedCapacity(gpa, new_fetches.len);
+ try f.job_queue.table.ensureUnusedCapacity(gpa, @intCast(new_fetches.len));
+
+ // There are four cases here:
+ // * Correct hash is provided by manifest.
+ // - Hash map already has the entry, no need to add it again.
+ // * Incorrect hash is provided by manifest.
+ // - Hash mismatch error emitted; `queueJobsForDeps` is not called.
+ // * Hash is not provided by manifest.
+ // - Hash missing error emitted; `queueJobsForDeps` is not called.
+ // * path-based location is used without a hash.
+ // - Hash is added to the table based on the path alone before
+ // calling run(); no need to add it again.
+
+ for (dep_names, deps) |dep_name, dep| {
+ const new_fetch = &new_fetches[new_fetch_index];
+ const location: Location = switch (dep.location) {
+ .url => |url| .{ .remote = .{
+ .url = url,
+ .hash = h: {
+ const h = dep.hash orelse break :h null;
+ const digest_len = @typeInfo(Manifest.MultiHashHexDigest).Array.len;
+ const multihash_digest = h[0..digest_len].*;
+ const gop = f.job_queue.table.getOrPutAssumeCapacity(multihash_digest);
+ if (gop.found_existing) continue;
+ gop.value_ptr.* = new_fetch;
+ break :h multihash_digest;
+ },
+ } },
+ .path => |rel_path| l: {
+ // This might produce an invalid path, which is checked for
+ // at the beginning of run().
+ const new_root = try f.package_root.resolvePosix(parent_arena, rel_path);
+ const multihash_digest = relativePathDigest(new_root, cache_root);
+ const gop = f.job_queue.table.getOrPutAssumeCapacity(multihash_digest);
+ if (gop.found_existing) continue;
+ gop.value_ptr.* = new_fetch;
+ break :l .{ .relative_path = new_root };
+ },
+ };
+ prog_names[new_fetch_index] = dep_name;
+ new_fetch_index += 1;
+ f.job_queue.all_fetches.appendAssumeCapacity(new_fetch);
+ new_fetch.* = .{
+ .arena = std.heap.ArenaAllocator.init(gpa),
+ .location = location,
+ .location_tok = dep.location_tok,
+ .hash_tok = dep.hash_tok,
+ .parent_package_root = f.package_root,
+ .parent_manifest_ast = &f.manifest_ast,
+ .prog_node = f.prog_node,
+ .job_queue = f.job_queue,
+ .omit_missing_hash_error = false,
+ .allow_missing_paths_field = true,
+
+ .package_root = undefined,
+ .error_bundle = undefined,
+ .manifest = null,
+ .manifest_ast = undefined,
+ .actual_hash = undefined,
+ .has_build_zig = false,
+ .oom_flag = false,
+
+ .module = null,
+ };
+ }
+
+ // job_queue mutex is locked so this is OK.
+ f.prog_node.unprotected_estimated_total_items += new_fetch_index;
+
+ break :nf .{ new_fetches[0..new_fetch_index], prog_names[0..new_fetch_index] };
+ };
+
+ // Now it's time to give tasks to the thread pool.
+ const thread_pool = f.job_queue.thread_pool;
+
+ for (new_fetches, prog_names) |*new_fetch, prog_name| {
+ f.job_queue.wait_group.start();
+ thread_pool.spawn(workerRun, .{ new_fetch, prog_name }) catch |err| switch (err) {
+ error.OutOfMemory => {
+ new_fetch.oom_flag = true;
+ f.job_queue.wait_group.finish();
+ continue;
+ },
+ };
+ }
+}
+
+pub fn relativePathDigest(
+ pkg_root: Package.Path,
+ cache_root: Cache.Directory,
+) Manifest.MultiHashHexDigest {
+ var hasher = Manifest.Hash.init(.{});
+ // This hash is a tuple of:
+ // * whether it relative to the global cache directory or to the root package
+ // * the relative file path from there to the build root of the package
+ hasher.update(if (pkg_root.root_dir.eql(cache_root))
+ &package_hash_prefix_cached
+ else
+ &package_hash_prefix_project);
+ hasher.update(pkg_root.sub_path);
+ return Manifest.hexDigest(hasher.finalResult());
+}
+
+pub fn workerRun(f: *Fetch, prog_name: []const u8) void {
+ defer f.job_queue.wait_group.finish();
+
+ var prog_node = f.prog_node.start(prog_name, 0);
+ defer prog_node.end();
+ prog_node.activate();
+
+ run(f) catch |err| switch (err) {
+ error.OutOfMemory => f.oom_flag = true,
+ error.FetchFailed => {
+ // Nothing to do because the errors are already reported in `error_bundle`,
+ // and a reference is kept to the `Fetch` task inside `all_fetches`.
+ },
+ };
+}
+
+fn srcLoc(
+ f: *Fetch,
+ tok: std.zig.Ast.TokenIndex,
+) Allocator.Error!ErrorBundle.SourceLocationIndex {
+ const ast = f.parent_manifest_ast orelse return .none;
+ const eb = &f.error_bundle;
+ const token_starts = ast.tokens.items(.start);
+ const start_loc = ast.tokenLocation(0, tok);
+ const src_path = try eb.printString("{}" ++ Manifest.basename, .{f.parent_package_root});
+ const msg_off = 0;
+ return eb.addSourceLocation(.{
+ .src_path = src_path,
+ .span_start = token_starts[tok],
+ .span_end = @intCast(token_starts[tok] + ast.tokenSlice(tok).len),
+ .span_main = token_starts[tok] + msg_off,
+ .line = @intCast(start_loc.line),
+ .column = @intCast(start_loc.column),
+ .source_line = try eb.addString(ast.source[start_loc.line_start..start_loc.line_end]),
+ });
+}
+
+fn fail(f: *Fetch, msg_tok: std.zig.Ast.TokenIndex, msg_str: u32) RunError {
+ const eb = &f.error_bundle;
+ try eb.addRootErrorMessage(.{
+ .msg = msg_str,
+ .src_loc = try f.srcLoc(msg_tok),
+ });
+ return error.FetchFailed;
+}
+
+const Resource = union(enum) {
+ file: fs.File,
+ http_request: std.http.Client.Request,
+ git: Git,
+ dir: fs.IterableDir,
+
+ const Git = struct {
+ fetch_stream: git.Session.FetchStream,
+ want_oid: [git.oid_length]u8,
+ };
+
+ fn deinit(resource: *Resource) void {
+ switch (resource.*) {
+ .file => |*file| file.close(),
+ .http_request => |*req| req.deinit(),
+ .git => |*git_resource| git_resource.fetch_stream.deinit(),
+ .dir => |*dir| dir.close(),
+ }
+ resource.* = undefined;
+ }
+
+ fn reader(resource: *Resource) std.io.AnyReader {
+ return .{
+ .context = resource,
+ .readFn = read,
+ };
+ }
+
+ fn read(context: *const anyopaque, buffer: []u8) anyerror!usize {
+ const resource: *Resource = @constCast(@ptrCast(@alignCast(context)));
+ switch (resource.*) {
+ .file => |*f| return f.read(buffer),
+ .http_request => |*r| return r.read(buffer),
+ .git => |*g| return g.fetch_stream.read(buffer),
+ .dir => unreachable,
+ }
+ }
+};
+
+const FileType = enum {
+ tar,
+ @"tar.gz",
+ @"tar.xz",
+ git_pack,
+
+ fn fromPath(file_path: []const u8) ?FileType {
+ if (ascii.endsWithIgnoreCase(file_path, ".tar")) return .tar;
+ if (ascii.endsWithIgnoreCase(file_path, ".tar.gz")) return .@"tar.gz";
+ if (ascii.endsWithIgnoreCase(file_path, ".tar.xz")) return .@"tar.xz";
+ return null;
+ }
+
+ /// Parameter is a content-disposition header value.
+ fn fromContentDisposition(cd_header: []const u8) ?FileType {
+ const attach_end = ascii.indexOfIgnoreCase(cd_header, "attachment;") orelse
+ return null;
+
+ var value_start = ascii.indexOfIgnoreCasePos(cd_header, attach_end + 1, "filename") orelse
+ return null;
+ value_start += "filename".len;
+ if (cd_header[value_start] == '*') {
+ value_start += 1;
+ }
+ if (cd_header[value_start] != '=') return null;
+ value_start += 1;
+
+ var value_end = std.mem.indexOfPos(u8, cd_header, value_start, ";") orelse cd_header.len;
+ if (cd_header[value_end - 1] == '\"') {
+ value_end -= 1;
+ }
+ return fromPath(cd_header[value_start..value_end]);
+ }
+
+ test fromContentDisposition {
+ try std.testing.expectEqual(@as(?FileType, .@"tar.gz"), fromContentDisposition("attaChment; FILENAME=\"stuff.tar.gz\"; size=42"));
+ try std.testing.expectEqual(@as(?FileType, .@"tar.gz"), fromContentDisposition("attachment; filename*=\"stuff.tar.gz\""));
+ try std.testing.expectEqual(@as(?FileType, .@"tar.xz"), fromContentDisposition("ATTACHMENT; filename=\"stuff.tar.xz\""));
+ try std.testing.expectEqual(@as(?FileType, .@"tar.xz"), fromContentDisposition("attachment; FileName=\"stuff.tar.xz\""));
+ try std.testing.expectEqual(@as(?FileType, .@"tar.gz"), fromContentDisposition("attachment; FileName*=UTF-8\'\'xyz%2Fstuff.tar.gz"));
+
+ try std.testing.expect(fromContentDisposition("attachment FileName=\"stuff.tar.gz\"") == null);
+ try std.testing.expect(fromContentDisposition("attachment; FileName=\"stuff.tar\"") == null);
+ try std.testing.expect(fromContentDisposition("attachment; FileName\"stuff.gz\"") == null);
+ try std.testing.expect(fromContentDisposition("attachment; size=42") == null);
+ try std.testing.expect(fromContentDisposition("inline; size=42") == null);
+ try std.testing.expect(fromContentDisposition("FileName=\"stuff.tar.gz\"; attachment;") == null);
+ try std.testing.expect(fromContentDisposition("FileName=\"stuff.tar.gz\";") == null);
+ }
+};
+
+fn initResource(f: *Fetch, uri: std.Uri) RunError!Resource {
+ const gpa = f.arena.child_allocator;
+ const arena = f.arena.allocator();
+ const eb = &f.error_bundle;
+
+ if (ascii.eqlIgnoreCase(uri.scheme, "file")) return .{
+ .file = f.parent_package_root.openFile(uri.path, .{}) catch |err| {
+ return f.fail(f.location_tok, try eb.printString("unable to open '{}{s}': {s}", .{
+ f.parent_package_root, uri.path, @errorName(err),
+ }));
+ },
+ };
+
+ const http_client = f.job_queue.http_client;
+
+ if (ascii.eqlIgnoreCase(uri.scheme, "http") or
+ ascii.eqlIgnoreCase(uri.scheme, "https"))
+ {
+ var h = std.http.Headers{ .allocator = gpa };
+ defer h.deinit();
+
+ var req = http_client.request(.GET, uri, h, .{}) catch |err| {
+ return f.fail(f.location_tok, try eb.printString(
+ "unable to connect to server: {s}",
+ .{@errorName(err)},
+ ));
+ };
+ errdefer req.deinit(); // releases more than memory
+
+ req.start(.{}) catch |err| {
+ return f.fail(f.location_tok, try eb.printString(
+ "HTTP request failed: {s}",
+ .{@errorName(err)},
+ ));
+ };
+ req.wait() catch |err| {
+ return f.fail(f.location_tok, try eb.printString(
+ "invalid HTTP response: {s}",
+ .{@errorName(err)},
+ ));
+ };
+
+ if (req.response.status != .ok) {
+ return f.fail(f.location_tok, try eb.printString(
+ "bad HTTP response code: '{d} {s}'",
+ .{ @intFromEnum(req.response.status), req.response.status.phrase() orelse "" },
+ ));
+ }
+
+ return .{ .http_request = req };
+ }
+
+ if (ascii.eqlIgnoreCase(uri.scheme, "git+http") or
+ ascii.eqlIgnoreCase(uri.scheme, "git+https"))
+ {
+ var transport_uri = uri;
+ transport_uri.scheme = uri.scheme["git+".len..];
+ var redirect_uri: []u8 = undefined;
+ var session: git.Session = .{ .transport = http_client, .uri = transport_uri };
+ session.discoverCapabilities(gpa, &redirect_uri) catch |err| switch (err) {
+ error.Redirected => {
+ defer gpa.free(redirect_uri);
+ return f.fail(f.location_tok, try eb.printString(
+ "repository moved to {s}",
+ .{redirect_uri},
+ ));
+ },
+ else => |e| {
+ return f.fail(f.location_tok, try eb.printString(
+ "unable to discover remote git server capabilities: {s}",
+ .{@errorName(e)},
+ ));
+ },
+ };
+
+ const want_oid = want_oid: {
+ const want_ref = uri.fragment orelse "HEAD";
+ if (git.parseOid(want_ref)) |oid| break :want_oid oid else |_| {}
+
+ const want_ref_head = try std.fmt.allocPrint(arena, "refs/heads/{s}", .{want_ref});
+ const want_ref_tag = try std.fmt.allocPrint(arena, "refs/tags/{s}", .{want_ref});
+
+ var ref_iterator = session.listRefs(gpa, .{
+ .ref_prefixes = &.{ want_ref, want_ref_head, want_ref_tag },
+ .include_peeled = true,
+ }) catch |err| {
+ return f.fail(f.location_tok, try eb.printString(
+ "unable to list refs: {s}",
+ .{@errorName(err)},
+ ));
+ };
+ defer ref_iterator.deinit();
+ while (ref_iterator.next() catch |err| {
+ return f.fail(f.location_tok, try eb.printString(
+ "unable to iterate refs: {s}",
+ .{@errorName(err)},
+ ));
+ }) |ref| {
+ if (std.mem.eql(u8, ref.name, want_ref) or
+ std.mem.eql(u8, ref.name, want_ref_head) or
+ std.mem.eql(u8, ref.name, want_ref_tag))
+ {
+ break :want_oid ref.peeled orelse ref.oid;
+ }
+ }
+ return f.fail(f.location_tok, try eb.printString("ref not found: {s}", .{want_ref}));
+ };
+ if (uri.fragment == null) {
+ const notes_len = 1;
+ try eb.addRootErrorMessage(.{
+ .msg = try eb.addString("url field is missing an explicit ref"),
+ .src_loc = try f.srcLoc(f.location_tok),
+ .notes_len = notes_len,
+ });
+ const notes_start = try eb.reserveNotes(notes_len);
+ eb.extra.items[notes_start] = @intFromEnum(try eb.addErrorMessage(.{
+ .msg = try eb.printString("try .url = \"{+/}#{}\",", .{
+ uri, std.fmt.fmtSliceHexLower(&want_oid),
+ }),
+ }));
+ return error.FetchFailed;
+ }
+
+ var want_oid_buf: [git.fmt_oid_length]u8 = undefined;
+ _ = std.fmt.bufPrint(&want_oid_buf, "{}", .{
+ std.fmt.fmtSliceHexLower(&want_oid),
+ }) catch unreachable;
+ var fetch_stream = session.fetch(gpa, &.{&want_oid_buf}) catch |err| {
+ return f.fail(f.location_tok, try eb.printString(
+ "unable to create fetch stream: {s}",
+ .{@errorName(err)},
+ ));
+ };
+ errdefer fetch_stream.deinit();
+
+ return .{ .git = .{
+ .fetch_stream = fetch_stream,
+ .want_oid = want_oid,
+ } };
+ }
+
+ return f.fail(f.location_tok, try eb.printString(
+ "unsupported URL scheme: {s}",
+ .{uri.scheme},
+ ));
+}
+
+fn unpackResource(
+ f: *Fetch,
+ resource: *Resource,
+ uri_path: []const u8,
+ tmp_directory: Cache.Directory,
+) RunError!void {
+ const eb = &f.error_bundle;
+ const file_type = switch (resource.*) {
+ .file => FileType.fromPath(uri_path) orelse
+ return f.fail(f.location_tok, try eb.printString("unknown file type: '{s}'", .{uri_path})),
+
+ .http_request => |req| ft: {
+ // Content-Type takes first precedence.
+ const content_type = req.response.headers.getFirstValue("Content-Type") orelse
+ return f.fail(f.location_tok, try eb.addString("missing 'Content-Type' header"));
+
+ if (ascii.eqlIgnoreCase(content_type, "application/x-tar"))
+ break :ft .tar;
+
+ if (ascii.eqlIgnoreCase(content_type, "application/gzip") or
+ ascii.eqlIgnoreCase(content_type, "application/x-gzip") or
+ ascii.eqlIgnoreCase(content_type, "application/tar+gzip"))
+ {
+ break :ft .@"tar.gz";
+ }
+
+ if (ascii.eqlIgnoreCase(content_type, "application/x-xz"))
+ break :ft .@"tar.xz";
+
+ if (!ascii.eqlIgnoreCase(content_type, "application/octet-stream")) {
+ return f.fail(f.location_tok, try eb.printString(
+ "unrecognized 'Content-Type' header: '{s}'",
+ .{content_type},
+ ));
+ }
+
+ // Next, the filename from 'content-disposition: attachment' takes precedence.
+ if (req.response.headers.getFirstValue("Content-Disposition")) |cd_header| {
+ break :ft FileType.fromContentDisposition(cd_header) orelse {
+ return f.fail(f.location_tok, try eb.printString(
+ "unsupported Content-Disposition header value: '{s}' for Content-Type=application/octet-stream",
+ .{cd_header},
+ ));
+ };
+ }
+
+ // Finally, the path from the URI is used.
+ break :ft FileType.fromPath(uri_path) orelse {
+ return f.fail(f.location_tok, try eb.printString(
+ "unknown file type: '{s}'",
+ .{uri_path},
+ ));
+ };
+ },
+
+ .git => .git_pack,
+
+ .dir => |dir| return f.recursiveDirectoryCopy(dir, tmp_directory.handle) catch |err| {
+ return f.fail(f.location_tok, try eb.printString(
+ "unable to copy directory '{s}': {s}",
+ .{ uri_path, @errorName(err) },
+ ));
+ },
+ };
+
+ switch (file_type) {
+ .tar => try unpackTarball(f, tmp_directory.handle, resource.reader()),
+ .@"tar.gz" => try unpackTarballCompressed(f, tmp_directory.handle, resource, std.compress.gzip),
+ .@"tar.xz" => try unpackTarballCompressed(f, tmp_directory.handle, resource, std.compress.xz),
+ .git_pack => unpackGitPack(f, tmp_directory.handle, resource) catch |err| switch (err) {
+ error.FetchFailed => return error.FetchFailed,
+ error.OutOfMemory => return error.OutOfMemory,
+ else => |e| return f.fail(f.location_tok, try eb.printString(
+ "unable to unpack git files: {s}",
+ .{@errorName(e)},
+ )),
+ },
+ }
+}
+
+fn unpackTarballCompressed(
+ f: *Fetch,
+ out_dir: fs.Dir,
+ resource: *Resource,
+ comptime Compression: type,
+) RunError!void {
+ const gpa = f.arena.child_allocator;
+ const eb = &f.error_bundle;
+ const reader = resource.reader();
+ var br = std.io.bufferedReaderSize(std.crypto.tls.max_ciphertext_record_len, reader);
+
+ var decompress = Compression.decompress(gpa, br.reader()) catch |err| {
+ return f.fail(f.location_tok, try eb.printString(
+ "unable to decompress tarball: {s}",
+ .{@errorName(err)},
+ ));
+ };
+ defer decompress.deinit();
+
+ return unpackTarball(f, out_dir, decompress.reader());
+}
+
+fn unpackTarball(f: *Fetch, out_dir: fs.Dir, reader: anytype) RunError!void {
+ const eb = &f.error_bundle;
+ const gpa = f.arena.child_allocator;
+
+ var diagnostics: std.tar.Options.Diagnostics = .{ .allocator = gpa };
+ defer diagnostics.deinit();
+
+ std.tar.pipeToFileSystem(out_dir, reader, .{
+ .diagnostics = &diagnostics,
+ .strip_components = 1,
+ // TODO: we would like to set this to executable_bit_only, but two
+ // things need to happen before that:
+ // 1. the tar implementation needs to support it
+ // 2. the hashing algorithm here needs to support detecting the is_executable
+ // bit on Windows from the ACLs (see the isExecutable function).
+ .mode_mode = .ignore,
+ .exclude_empty_directories = true,
+ }) catch |err| return f.fail(f.location_tok, try eb.printString(
+ "unable to unpack tarball to temporary directory: {s}",
+ .{@errorName(err)},
+ ));
+
+ if (diagnostics.errors.items.len > 0) {
+ const notes_len: u32 = @intCast(diagnostics.errors.items.len);
+ try eb.addRootErrorMessage(.{
+ .msg = try eb.addString("unable to unpack tarball"),
+ .src_loc = try f.srcLoc(f.location_tok),
+ .notes_len = notes_len,
+ });
+ const notes_start = try eb.reserveNotes(notes_len);
+ for (diagnostics.errors.items, notes_start..) |item, note_i| {
+ switch (item) {
+ .unable_to_create_sym_link => |info| {
+ eb.extra.items[note_i] = @intFromEnum(try eb.addErrorMessage(.{
+ .msg = try eb.printString("unable to create symlink from '{s}' to '{s}': {s}", .{
+ info.file_name, info.link_name, @errorName(info.code),
+ }),
+ }));
+ },
+ .unable_to_create_file => |info| {
+ eb.extra.items[note_i] = @intFromEnum(try eb.addErrorMessage(.{
+ .msg = try eb.printString("unable to create file '{s}': {s}", .{
+ info.file_name, @errorName(info.code),
+ }),
+ }));
+ },
+ .unsupported_file_type => |info| {
+ eb.extra.items[note_i] = @intFromEnum(try eb.addErrorMessage(.{
+ .msg = try eb.printString("file '{s}' has unsupported type '{c}'", .{
+ info.file_name, @intFromEnum(info.file_type),
+ }),
+ }));
+ },
+ }
+ }
+ return error.FetchFailed;
+ }
+}
+
+fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource) anyerror!void {
+ const eb = &f.error_bundle;
+ const gpa = f.arena.child_allocator;
+ const want_oid = resource.git.want_oid;
+ const reader = resource.git.fetch_stream.reader();
+ // The .git directory is used to store the packfile and associated index, but
+ // we do not attempt to replicate the exact structure of a real .git
+ // directory, since that isn't relevant for fetching a package.
+ {
+ var pack_dir = try out_dir.makeOpenPath(".git", .{});
+ defer pack_dir.close();
+ var pack_file = try pack_dir.createFile("pkg.pack", .{ .read = true });
+ defer pack_file.close();
+ var fifo = std.fifo.LinearFifo(u8, .{ .Static = 4096 }).init();
+ try fifo.pump(reader, pack_file.writer());
+ try pack_file.sync();
+
+ var index_file = try pack_dir.createFile("pkg.idx", .{ .read = true });
+ defer index_file.close();
+ {
+ var index_prog_node = f.prog_node.start("Index pack", 0);
+ defer index_prog_node.end();
+ index_prog_node.activate();
+ var index_buffered_writer = std.io.bufferedWriter(index_file.writer());
+ try git.indexPack(gpa, pack_file, index_buffered_writer.writer());
+ try index_buffered_writer.flush();
+ try index_file.sync();
+ }
+
+ {
+ var checkout_prog_node = f.prog_node.start("Checkout", 0);
+ defer checkout_prog_node.end();
+ checkout_prog_node.activate();
+ var repository = try git.Repository.init(gpa, pack_file, index_file);
+ defer repository.deinit();
+ var diagnostics: git.Diagnostics = .{ .allocator = gpa };
+ defer diagnostics.deinit();
+ try repository.checkout(out_dir, want_oid, &diagnostics);
+
+ if (diagnostics.errors.items.len > 0) {
+ const notes_len: u32 = @intCast(diagnostics.errors.items.len);
+ try eb.addRootErrorMessage(.{
+ .msg = try eb.addString("unable to unpack packfile"),
+ .src_loc = try f.srcLoc(f.location_tok),
+ .notes_len = notes_len,
+ });
+ const notes_start = try eb.reserveNotes(notes_len);
+ for (diagnostics.errors.items, notes_start..) |item, note_i| {
+ switch (item) {
+ .unable_to_create_sym_link => |info| {
+ eb.extra.items[note_i] = @intFromEnum(try eb.addErrorMessage(.{
+ .msg = try eb.printString("unable to create symlink from '{s}' to '{s}': {s}", .{
+ info.file_name, info.link_name, @errorName(info.code),
+ }),
+ }));
+ },
+ }
+ }
+ return error.InvalidGitPack;
+ }
+ }
+ }
+
+ try out_dir.deleteTree(".git");
+}
+
+fn recursiveDirectoryCopy(f: *Fetch, dir: fs.IterableDir, tmp_dir: fs.Dir) anyerror!void {
+ const gpa = f.arena.child_allocator;
+ // Recursive directory copy.
+ var it = try dir.walk(gpa);
+ defer it.deinit();
+ while (try it.next()) |entry| {
+ switch (entry.kind) {
+ .directory => {}, // omit empty directories
+ .file => {
+ dir.dir.copyFile(
+ entry.path,
+ tmp_dir,
+ entry.path,
+ .{},
+ ) catch |err| switch (err) {
+ error.FileNotFound => {
+ if (fs.path.dirname(entry.path)) |dirname| try tmp_dir.makePath(dirname);
+ try dir.dir.copyFile(entry.path, tmp_dir, entry.path, .{});
+ },
+ else => |e| return e,
+ };
+ },
+ .sym_link => {
+ var buf: [fs.MAX_PATH_BYTES]u8 = undefined;
+ const link_name = try dir.dir.readLink(entry.path, &buf);
+ // TODO: if this would create a symlink to outside
+ // the destination directory, fail with an error instead.
+ tmp_dir.symLink(link_name, entry.path, .{}) catch |err| switch (err) {
+ error.FileNotFound => {
+ if (fs.path.dirname(entry.path)) |dirname| try tmp_dir.makePath(dirname);
+ try tmp_dir.symLink(link_name, entry.path, .{});
+ },
+ else => |e| return e,
+ };
+ },
+ else => return error.IllegalFileTypeInPackage,
+ }
+ }
+}
+
+pub fn renameTmpIntoCache(
+ cache_dir: fs.Dir,
+ tmp_dir_sub_path: []const u8,
+ dest_dir_sub_path: []const u8,
+) !void {
+ assert(dest_dir_sub_path[1] == fs.path.sep);
+ var handled_missing_dir = false;
+ while (true) {
+ cache_dir.rename(tmp_dir_sub_path, dest_dir_sub_path) catch |err| switch (err) {
+ error.FileNotFound => {
+ if (handled_missing_dir) return err;
+ cache_dir.makeDir(dest_dir_sub_path[0..1]) catch |mkd_err| switch (mkd_err) {
+ error.PathAlreadyExists => handled_missing_dir = true,
+ else => |e| return e,
+ };
+ continue;
+ },
+ error.PathAlreadyExists, error.AccessDenied => {
+ // Package has been already downloaded and may already be in use on the system.
+ cache_dir.deleteTree(tmp_dir_sub_path) catch {
+ // Garbage files leftover in zig-cache/tmp/ is, as they say
+ // on Star Trek, "operating within normal parameters".
+ };
+ },
+ else => |e| return e,
+ };
+ break;
+ }
+}
+
+/// Assumes that files not included in the package have already been filtered
+/// prior to calling this function. This ensures that files not protected by
+/// the hash are not present on the file system. Empty directories are *not
+/// hashed* and must not be present on the file system when calling this
+/// function.
+fn computeHash(
+ f: *Fetch,
+ tmp_directory: Cache.Directory,
+ filter: Filter,
+) RunError!Manifest.Digest {
+ // All the path name strings need to be in memory for sorting.
+ const arena = f.arena.allocator();
+ const gpa = f.arena.child_allocator;
+ const eb = &f.error_bundle;
+ const thread_pool = f.job_queue.thread_pool;
+
+ // Collect all files, recursively, then sort.
+ var all_files = std.ArrayList(*HashedFile).init(gpa);
+ defer all_files.deinit();
+
+ var deleted_files = std.ArrayList(*DeletedFile).init(gpa);
+ defer deleted_files.deinit();
+
+ // Track directories which had any files deleted from them so that empty directories
+ // can be deleted.
+ var sus_dirs: std.StringArrayHashMapUnmanaged(void) = .{};
+ defer sus_dirs.deinit(gpa);
+
+ var walker = try @as(fs.IterableDir, .{ .dir = tmp_directory.handle }).walk(gpa);
+ defer walker.deinit();
+
+ {
+ // The final hash will be a hash of each file hashed independently. This
+ // allows hashing in parallel.
+ var wait_group: WaitGroup = .{};
+ // `computeHash` is called from a worker thread so there must not be
+ // any waiting without working or a deadlock could occur.
+ defer thread_pool.waitAndWork(&wait_group);
+
+ while (walker.next() catch |err| {
+ try eb.addRootErrorMessage(.{ .msg = try eb.printString(
+ "unable to walk temporary directory '{}': {s}",
+ .{ tmp_directory, @errorName(err) },
+ ) });
+ return error.FetchFailed;
+ }) |entry| {
+ if (entry.kind == .directory) continue;
+
+ if (!filter.includePath(entry.path)) {
+ // Delete instead of including in hash calculation.
+ const fs_path = try arena.dupe(u8, entry.path);
+
+ // Also track the parent directory in case it becomes empty.
+ if (fs.path.dirname(fs_path)) |parent|
+ try sus_dirs.put(gpa, parent, {});
+
+ const deleted_file = try arena.create(DeletedFile);
+ deleted_file.* = .{
+ .fs_path = fs_path,
+ .failure = undefined, // to be populated by the worker
+ };
+ wait_group.start();
+ try thread_pool.spawn(workerDeleteFile, .{
+ tmp_directory.handle, deleted_file, &wait_group,
+ });
+ try deleted_files.append(deleted_file);
+ continue;
+ }
+
+ const kind: HashedFile.Kind = switch (entry.kind) {
+ .directory => unreachable,
+ .file => .file,
+ .sym_link => .sym_link,
+ else => return f.fail(f.location_tok, try eb.printString(
+ "package contains '{s}' which has illegal file type '{s}'",
+ .{ entry.path, @tagName(entry.kind) },
+ )),
+ };
+
+ if (std.mem.eql(u8, entry.path, Package.build_zig_basename))
+ f.has_build_zig = true;
+
+ const fs_path = try arena.dupe(u8, entry.path);
+ const hashed_file = try arena.create(HashedFile);
+ hashed_file.* = .{
+ .fs_path = fs_path,
+ .normalized_path = try normalizePath(arena, fs_path),
+ .kind = kind,
+ .hash = undefined, // to be populated by the worker
+ .failure = undefined, // to be populated by the worker
+ };
+ wait_group.start();
+ try thread_pool.spawn(workerHashFile, .{
+ tmp_directory.handle, hashed_file, &wait_group,
+ });
+ try all_files.append(hashed_file);
+ }
+ }
+
+ {
+ // Sort by length, descending, so that child directories get removed first.
+ sus_dirs.sortUnstable(@as(struct {
+ keys: []const []const u8,
+ pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool {
+ return ctx.keys[b_index].len < ctx.keys[a_index].len;
+ }
+ }, .{ .keys = sus_dirs.keys() }));
+
+ // During this loop, more entries will be added, so we must loop by index.
+ var i: usize = 0;
+ while (i < sus_dirs.count()) : (i += 1) {
+ const sus_dir = sus_dirs.keys()[i];
+ tmp_directory.handle.deleteDir(sus_dir) catch |err| switch (err) {
+ error.DirNotEmpty => continue,
+ error.FileNotFound => continue,
+ else => |e| {
+ try eb.addRootErrorMessage(.{ .msg = try eb.printString(
+ "unable to delete empty directory '{s}': {s}",
+ .{ sus_dir, @errorName(e) },
+ ) });
+ return error.FetchFailed;
+ },
+ };
+ if (fs.path.dirname(sus_dir)) |parent| {
+ try sus_dirs.put(gpa, parent, {});
+ }
+ }
+ }
+
+ std.mem.sortUnstable(*HashedFile, all_files.items, {}, HashedFile.lessThan);
+
+ var hasher = Manifest.Hash.init(.{});
+ var any_failures = false;
+ for (all_files.items) |hashed_file| {
+ hashed_file.failure catch |err| {
+ any_failures = true;
+ try eb.addRootErrorMessage(.{
+ .msg = try eb.printString("unable to hash '{s}': {s}", .{
+ hashed_file.fs_path, @errorName(err),
+ }),
+ });
+ };
+ hasher.update(&hashed_file.hash);
+ }
+ for (deleted_files.items) |deleted_file| {
+ deleted_file.failure catch |err| {
+ any_failures = true;
+ try eb.addRootErrorMessage(.{
+ .msg = try eb.printString("failed to delete excluded path '{s}' from package: {s}", .{
+ deleted_file.fs_path, @errorName(err),
+ }),
+ });
+ };
+ }
+
+ if (any_failures) return error.FetchFailed;
+ return hasher.finalResult();
+}
+
+fn workerHashFile(dir: fs.Dir, hashed_file: *HashedFile, wg: *WaitGroup) void {
+ defer wg.finish();
+ hashed_file.failure = hashFileFallible(dir, hashed_file);
+}
+
+fn workerDeleteFile(dir: fs.Dir, deleted_file: *DeletedFile, wg: *WaitGroup) void {
+ defer wg.finish();
+ deleted_file.failure = deleteFileFallible(dir, deleted_file);
+}
+
+fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void {
+ var buf: [8000]u8 = undefined;
+ var hasher = Manifest.Hash.init(.{});
+ hasher.update(hashed_file.normalized_path);
+ switch (hashed_file.kind) {
+ .file => {
+ var file = try dir.openFile(hashed_file.fs_path, .{});
+ defer file.close();
+ hasher.update(&.{ 0, @intFromBool(try isExecutable(file)) });
+ while (true) {
+ const bytes_read = try file.read(&buf);
+ if (bytes_read == 0) break;
+ hasher.update(buf[0..bytes_read]);
+ }
+ },
+ .sym_link => {
+ const link_name = try dir.readLink(hashed_file.fs_path, &buf);
+ hasher.update(link_name);
+ },
+ }
+ hasher.final(&hashed_file.hash);
+}
+
+fn deleteFileFallible(dir: fs.Dir, deleted_file: *DeletedFile) DeletedFile.Error!void {
+ try dir.deleteFile(deleted_file.fs_path);
+}
+
+fn isExecutable(file: fs.File) !bool {
+ if (builtin.os.tag == .windows) {
+ // TODO check the ACL on Windows.
+ // Until this is implemented, this could be a false negative on
+ // Windows, which is why we do not yet set executable_bit_only above
+ // when unpacking the tarball.
+ return false;
+ } else {
+ const stat = try file.stat();
+ return (stat.mode & std.os.S.IXUSR) != 0;
+ }
+}
+
+const DeletedFile = struct {
+ fs_path: []const u8,
+ failure: Error!void,
+
+ const Error =
+ fs.Dir.DeleteFileError ||
+ fs.Dir.DeleteDirError;
+};
+
+const HashedFile = struct {
+ fs_path: []const u8,
+ normalized_path: []const u8,
+ hash: Manifest.Digest,
+ failure: Error!void,
+ kind: Kind,
+
+ const Error =
+ fs.File.OpenError ||
+ fs.File.ReadError ||
+ fs.File.StatError ||
+ fs.Dir.ReadLinkError;
+
+ const Kind = enum { file, sym_link };
+
+ fn lessThan(context: void, lhs: *const HashedFile, rhs: *const HashedFile) bool {
+ _ = context;
+ return std.mem.lessThan(u8, lhs.normalized_path, rhs.normalized_path);
+ }
+};
+
+/// Make a file system path identical independently of operating system path inconsistencies.
+/// This converts backslashes into forward slashes.
+fn normalizePath(arena: Allocator, fs_path: []const u8) ![]const u8 {
+ const canonical_sep = '/';
+
+ if (fs.path.sep == canonical_sep)
+ return fs_path;
+
+ const normalized = try arena.dupe(u8, fs_path);
+ for (normalized) |*byte| {
+ switch (byte.*) {
+ fs.path.sep => byte.* = canonical_sep,
+ else => continue,
+ }
+ }
+ return normalized;
+}
+
+const Filter = struct {
+ include_paths: std.StringArrayHashMapUnmanaged(void) = .{},
+
+ /// sub_path is relative to the package root.
+ pub fn includePath(self: Filter, sub_path: []const u8) bool {
+ if (self.include_paths.count() == 0) return true;
+ if (self.include_paths.contains("")) return true;
+ if (self.include_paths.contains(sub_path)) return true;
+
+ // Check if any included paths are parent directories of sub_path.
+ var dirname = sub_path;
+ while (std.fs.path.dirname(dirname)) |next_dirname| {
+ if (self.include_paths.contains(sub_path)) return true;
+ dirname = next_dirname;
+ }
+
+ return false;
+ }
+};
+
+pub fn depDigest(
+ pkg_root: Package.Path,
+ cache_root: Cache.Directory,
+ dep: Manifest.Dependency,
+) ?Manifest.MultiHashHexDigest {
+ if (dep.hash) |h| return h[0..Manifest.multihash_hex_digest_len].*;
+
+ switch (dep.location) {
+ .url => return null,
+ .path => |rel_path| {
+ var buf: [fs.MAX_PATH_BYTES]u8 = undefined;
+ var fba = std.heap.FixedBufferAllocator.init(&buf);
+ const new_root = pkg_root.resolvePosix(fba.allocator(), rel_path) catch
+ return null;
+ return relativePathDigest(new_root, cache_root);
+ },
+ }
+}
+
+// These are random bytes.
+const package_hash_prefix_cached = [8]u8{ 0x53, 0x7e, 0xfa, 0x94, 0x65, 0xe9, 0xf8, 0x73 };
+const package_hash_prefix_project = [8]u8{ 0xe1, 0x25, 0xee, 0xfa, 0xa6, 0x17, 0x38, 0xcc };
+
+const builtin = @import("builtin");
+const std = @import("std");
+const fs = std.fs;
+const assert = std.debug.assert;
+const ascii = std.ascii;
+const Allocator = std.mem.Allocator;
+const Cache = std.Build.Cache;
+const ThreadPool = std.Thread.Pool;
+const WaitGroup = std.Thread.WaitGroup;
+const Fetch = @This();
+const main = @import("../main.zig");
+const git = @import("Fetch/git.zig");
+const Package = @import("../Package.zig");
+const Manifest = Package.Manifest;
+const ErrorBundle = std.zig.ErrorBundle;
diff --git a/src/git.zig b/src/Package/Fetch/git.zig
index fd35c5c33e..af4317702d 100644
--- a/src/git.zig
+++ b/src/Package/Fetch/git.zig
@@ -11,8 +11,6 @@ const Allocator = mem.Allocator;
const Sha1 = std.crypto.hash.Sha1;
const assert = std.debug.assert;
-const ProgressReader = @import("Package.zig").ProgressReader;
-
pub const oid_length = Sha1.digest_length;
pub const fmt_oid_length = 2 * oid_length;
/// The ID of a Git object (an SHA-1 hash).
diff --git a/src/git/testdata/testrepo.idx b/src/Package/Fetch/git/testdata/testrepo.idx
index fdaba5ed48..fdaba5ed48 100644
--- a/src/git/testdata/testrepo.idx
+++ b/src/Package/Fetch/git/testdata/testrepo.idx
Binary files differ
diff --git a/src/git/testdata/testrepo.pack b/src/Package/Fetch/git/testdata/testrepo.pack
index 1b2f5dd650..1b2f5dd650 100644
--- a/src/git/testdata/testrepo.pack
+++ b/src/Package/Fetch/git/testdata/testrepo.pack
Binary files differ
diff --git a/src/Manifest.zig b/src/Package/Manifest.zig
index 2ff54e6132..c1b1cdfb4f 100644
--- a/src/Manifest.zig
+++ b/src/Package/Manifest.zig
@@ -1,14 +1,21 @@
+pub const max_bytes = 10 * 1024 * 1024;
pub const basename = "build.zig.zon";
pub const Hash = std.crypto.hash.sha2.Sha256;
+pub const Digest = [Hash.digest_length]u8;
+pub const multihash_len = 1 + 1 + Hash.digest_length;
+pub const multihash_hex_digest_len = 2 * multihash_len;
+pub const MultiHashHexDigest = [multihash_hex_digest_len]u8;
pub const Dependency = struct {
- location: union(enum) {
- url: []const u8,
- path: []const u8,
- },
+ location: Location,
location_tok: Ast.TokenIndex,
hash: ?[]const u8,
hash_tok: Ast.TokenIndex,
+
+ pub const Location = union(enum) {
+ url: []const u8,
+ path: []const u8,
+ };
};
pub const ErrorMessage = struct {
@@ -45,18 +52,22 @@ comptime {
assert(@intFromEnum(multihash_function) < 127);
assert(Hash.digest_length < 127);
}
-pub const multihash_len = 1 + 1 + Hash.digest_length;
name: []const u8,
version: std.SemanticVersion,
dependencies: std.StringArrayHashMapUnmanaged(Dependency),
+paths: std.StringArrayHashMapUnmanaged(void),
errors: []ErrorMessage,
arena_state: std.heap.ArenaAllocator.State,
+pub const ParseOptions = struct {
+ allow_missing_paths_field: bool = false,
+};
+
pub const Error = Allocator.Error;
-pub fn parse(gpa: Allocator, ast: std.zig.Ast) Error!Manifest {
+pub fn parse(gpa: Allocator, ast: std.zig.Ast, options: ParseOptions) Error!Manifest {
const node_tags = ast.nodes.items(.tag);
const node_datas = ast.nodes.items(.data);
assert(node_tags[0] == .root);
@@ -74,11 +85,14 @@ pub fn parse(gpa: Allocator, ast: std.zig.Ast) Error!Manifest {
.name = undefined,
.version = undefined,
.dependencies = .{},
+ .paths = .{},
+ .allow_missing_paths_field = options.allow_missing_paths_field,
.buf = .{},
};
defer p.buf.deinit(gpa);
defer p.errors.deinit(gpa);
defer p.dependencies.deinit(gpa);
+ defer p.paths.deinit(gpa);
p.parseRoot(main_node_index) catch |err| switch (err) {
error.ParseFailure => assert(p.errors.items.len > 0),
@@ -89,6 +103,7 @@ pub fn parse(gpa: Allocator, ast: std.zig.Ast) Error!Manifest {
.name = p.name,
.version = p.version,
.dependencies = try p.dependencies.clone(p.arena),
+ .paths = try p.paths.clone(p.arena),
.errors = try p.arena.dupe(ErrorMessage, p.errors.items),
.arena_state = arena_instance.state,
};
@@ -117,8 +132,8 @@ test hex64 {
try std.testing.expectEqualStrings("[00efcdab78563412]", s);
}
-pub fn hexDigest(digest: [Hash.digest_length]u8) [multihash_len * 2]u8 {
- var result: [multihash_len * 2]u8 = undefined;
+pub fn hexDigest(digest: Digest) MultiHashHexDigest {
+ var result: MultiHashHexDigest = undefined;
result[0] = hex_charset[@intFromEnum(multihash_function) >> 4];
result[1] = hex_charset[@intFromEnum(multihash_function) & 15];
@@ -143,6 +158,8 @@ const Parse = struct {
name: []const u8,
version: std.SemanticVersion,
dependencies: std.StringArrayHashMapUnmanaged(Dependency),
+ paths: std.StringArrayHashMapUnmanaged(void),
+ allow_missing_paths_field: bool,
const InnerError = error{ ParseFailure, OutOfMemory };
@@ -158,6 +175,7 @@ const Parse = struct {
var have_name = false;
var have_version = false;
+ var have_included_paths = false;
for (struct_init.ast.fields) |field_init| {
const name_token = ast.firstToken(field_init) - 2;
@@ -167,6 +185,9 @@ const Parse = struct {
// that is desirable on a per-field basis.
if (mem.eql(u8, field_name, "dependencies")) {
try parseDependencies(p, field_init);
+ } else if (mem.eql(u8, field_name, "paths")) {
+ have_included_paths = true;
+ try parseIncludedPaths(p, field_init);
} else if (mem.eql(u8, field_name, "name")) {
p.name = try parseString(p, field_init);
have_name = true;
@@ -190,6 +211,14 @@ const Parse = struct {
if (!have_version) {
try appendError(p, main_token, "missing top-level 'version' field", .{});
}
+
+ if (!have_included_paths) {
+ if (p.allow_missing_paths_field) {
+ try p.paths.put(p.gpa, "", {});
+ } else {
+ try appendError(p, main_token, "missing top-level 'paths' field", .{});
+ }
+ }
}
fn parseDependencies(p: *Parse, node: Ast.Node.Index) !void {
@@ -222,9 +251,9 @@ const Parse = struct {
var dep: Dependency = .{
.location = undefined,
- .location_tok = undefined,
+ .location_tok = 0,
.hash = null,
- .hash_tok = undefined,
+ .hash_tok = 0,
};
var has_location = false;
@@ -277,6 +306,25 @@ const Parse = struct {
return dep;
}
+ fn parseIncludedPaths(p: *Parse, node: Ast.Node.Index) !void {
+ const ast = p.ast;
+ const main_tokens = ast.nodes.items(.main_token);
+
+ var buf: [2]Ast.Node.Index = undefined;
+ const array_init = ast.fullArrayInit(&buf, node) orelse {
+ const tok = main_tokens[node];
+ return fail(p, tok, "expected paths expression to be a struct", .{});
+ };
+
+ for (array_init.ast.elements) |elem_node| {
+ const path_string = try parseString(p, elem_node);
+ // This is normalized so that it can be used in string comparisons
+ // against file system paths.
+ const normalized = try std.fs.path.resolve(p.arena, &.{path_string});
+ try p.paths.put(p.gpa, normalized, {});
+ }
+ }
+
fn parseString(p: *Parse, node: Ast.Node.Index) ![]const u8 {
const ast = p.ast;
const node_tags = ast.nodes.items(.tag);
@@ -309,10 +357,9 @@ const Parse = struct {
}
}
- const hex_multihash_len = 2 * Manifest.multihash_len;
- if (h.len != hex_multihash_len) {
+ if (h.len != multihash_hex_digest_len) {
return fail(p, tok, "wrong hash size. expected: {d}, found: {d}", .{
- hex_multihash_len, h.len,
+ multihash_hex_digest_len, h.len,
});
}
diff --git a/src/Package/Module.zig b/src/Package/Module.zig
new file mode 100644
index 0000000000..7e6b518892
--- /dev/null
+++ b/src/Package/Module.zig
@@ -0,0 +1,34 @@
+//! Corresponds to something that Zig source code can `@import`.
+//! Not to be confused with src/Module.zig which should be renamed
+//! to something else. https://github.com/ziglang/zig/issues/14307
+
+/// Only files inside this directory can be imported.
+root: Package.Path,
+/// Relative to `root`. May contain path separators.
+root_src_path: []const u8,
+/// Name used in compile errors. Looks like "root.foo.bar".
+fully_qualified_name: []const u8,
+/// The dependency table of this module. Shared dependencies such as 'std',
+/// 'builtin', and 'root' are not specified in every dependency table, but
+/// instead only in the table of `main_mod`. `Module.importFile` is
+/// responsible for detecting these names and using the correct package.
+deps: Deps = .{},
+
+pub const Deps = std.StringHashMapUnmanaged(*Module);
+
+pub const Tree = struct {
+ /// Each `Package` exposes a `Module` with build.zig as its root source file.
+ build_module_table: std.AutoArrayHashMapUnmanaged(MultiHashHexDigest, *Module),
+};
+
+pub fn create(allocator: Allocator, m: Module) Allocator.Error!*Module {
+ const new = try allocator.create(Module);
+ new.* = m;
+ return new;
+}
+
+const Module = @This();
+const Package = @import("../Package.zig");
+const std = @import("std");
+const Allocator = std.mem.Allocator;
+const MultiHashHexDigest = Package.Manifest.MultiHashHexDigest;
diff --git a/src/Package/hash.zig b/src/Package/hash.zig
deleted file mode 100644
index b14ec70244..0000000000
--- a/src/Package/hash.zig
+++ /dev/null
@@ -1,153 +0,0 @@
-const builtin = @import("builtin");
-const std = @import("std");
-const fs = std.fs;
-const ThreadPool = std.Thread.Pool;
-const WaitGroup = std.Thread.WaitGroup;
-const Allocator = std.mem.Allocator;
-
-const Hash = @import("../Manifest.zig").Hash;
-
-pub fn compute(thread_pool: *ThreadPool, pkg_dir: fs.IterableDir) ![Hash.digest_length]u8 {
- const gpa = thread_pool.allocator;
-
- // We'll use an arena allocator for the path name strings since they all
- // need to be in memory for sorting.
- var arena_instance = std.heap.ArenaAllocator.init(gpa);
- defer arena_instance.deinit();
- const arena = arena_instance.allocator();
-
- // TODO: delete files not included in the package prior to computing the package hash.
- // for example, if the ini file has directives to include/not include certain files,
- // apply those rules directly to the filesystem right here. This ensures that files
- // not protected by the hash are not present on the file system.
-
- // Collect all files, recursively, then sort.
- var all_files = std.ArrayList(*HashedFile).init(gpa);
- defer all_files.deinit();
-
- var walker = try pkg_dir.walk(gpa);
- defer walker.deinit();
-
- {
- // The final hash will be a hash of each file hashed independently. This
- // allows hashing in parallel.
- var wait_group: WaitGroup = .{};
- defer wait_group.wait();
-
- while (try walker.next()) |entry| {
- const kind: HashedFile.Kind = switch (entry.kind) {
- .directory => continue,
- .file => .file,
- .sym_link => .sym_link,
- else => return error.IllegalFileTypeInPackage,
- };
- const hashed_file = try arena.create(HashedFile);
- const fs_path = try arena.dupe(u8, entry.path);
- hashed_file.* = .{
- .fs_path = fs_path,
- .normalized_path = try normalizePath(arena, fs_path),
- .kind = kind,
- .hash = undefined, // to be populated by the worker
- .failure = undefined, // to be populated by the worker
- };
- wait_group.start();
- try thread_pool.spawn(workerHashFile, .{ pkg_dir.dir, hashed_file, &wait_group });
-
- try all_files.append(hashed_file);
- }
- }
-
- std.mem.sortUnstable(*HashedFile, all_files.items, {}, HashedFile.lessThan);
-
- var hasher = Hash.init(.{});
- var any_failures = false;
- for (all_files.items) |hashed_file| {
- hashed_file.failure catch |err| {
- any_failures = true;
- std.log.err("unable to hash '{s}': {s}", .{ hashed_file.fs_path, @errorName(err) });
- };
- hasher.update(&hashed_file.hash);
- }
- if (any_failures) return error.PackageHashUnavailable;
- return hasher.finalResult();
-}
-
-const HashedFile = struct {
- fs_path: []const u8,
- normalized_path: []const u8,
- hash: [Hash.digest_length]u8,
- failure: Error!void,
- kind: Kind,
-
- const Error =
- fs.File.OpenError ||
- fs.File.ReadError ||
- fs.File.StatError ||
- fs.Dir.ReadLinkError;
-
- const Kind = enum { file, sym_link };
-
- fn lessThan(context: void, lhs: *const HashedFile, rhs: *const HashedFile) bool {
- _ = context;
- return std.mem.lessThan(u8, lhs.normalized_path, rhs.normalized_path);
- }
-};
-
-/// Make a file system path identical independently of operating system path inconsistencies.
-/// This converts backslashes into forward slashes.
-fn normalizePath(arena: Allocator, fs_path: []const u8) ![]const u8 {
- const canonical_sep = '/';
-
- if (fs.path.sep == canonical_sep)
- return fs_path;
-
- const normalized = try arena.dupe(u8, fs_path);
- for (normalized) |*byte| {
- switch (byte.*) {
- fs.path.sep => byte.* = canonical_sep,
- else => continue,
- }
- }
- return normalized;
-}
-
-fn workerHashFile(dir: fs.Dir, hashed_file: *HashedFile, wg: *WaitGroup) void {
- defer wg.finish();
- hashed_file.failure = hashFileFallible(dir, hashed_file);
-}
-
-fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void {
- var buf: [8000]u8 = undefined;
- var hasher = Hash.init(.{});
- hasher.update(hashed_file.normalized_path);
- switch (hashed_file.kind) {
- .file => {
- var file = try dir.openFile(hashed_file.fs_path, .{});
- defer file.close();
- hasher.update(&.{ 0, @intFromBool(try isExecutable(file)) });
- while (true) {
- const bytes_read = try file.read(&buf);
- if (bytes_read == 0) break;
- hasher.update(buf[0..bytes_read]);
- }
- },
- .sym_link => {
- const link_name = try dir.readLink(hashed_file.fs_path, &buf);
- hasher.update(link_name);
- },
- }
- hasher.final(&hashed_file.hash);
-}
-
-fn isExecutable(file: fs.File) !bool {
- if (builtin.os.tag == .windows) {
- // TODO check the ACL on Windows.
- // Until this is implemented, this could be a false negative on
- // Windows, which is why we do not yet set executable_bit_only above
- // when unpacking the tarball.
- return false;
- } else {
- const stat = try file.stat();
- return (stat.mode & std.os.S.IXUSR) != 0;
- }
-}
diff --git a/src/Sema.zig b/src/Sema.zig
index c1187c0591..a7073a5f36 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -5732,6 +5732,9 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
+ const comp = mod.comp;
+ const gpa = sema.gpa;
const pl_node = sema.code.instructions.items(.data)[inst].pl_node;
const src = pl_node.src();
const extra = sema.code.extraData(Zir.Inst.Block, pl_node.payload_index);
@@ -5741,7 +5744,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
if (!@import("build_options").have_llvm)
return sema.fail(parent_block, src, "C import unavailable; Zig compiler built without LLVM extensions", .{});
- var c_import_buf = std.ArrayList(u8).init(sema.gpa);
+ var c_import_buf = std.ArrayList(u8).init(gpa);
defer c_import_buf.deinit();
var comptime_reason: Block.ComptimeReason = .{ .c_import = .{
@@ -5763,25 +5766,24 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
.runtime_loop = parent_block.runtime_loop,
.runtime_index = parent_block.runtime_index,
};
- defer child_block.instructions.deinit(sema.gpa);
+ defer child_block.instructions.deinit(gpa);
// Ignore the result, all the relevant operations have written to c_import_buf already.
_ = try sema.analyzeBodyBreak(&child_block, body);
- const mod = sema.mod;
- var c_import_res = mod.comp.cImport(c_import_buf.items) catch |err|
+ var c_import_res = comp.cImport(c_import_buf.items) catch |err|
return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)});
- defer c_import_res.deinit(mod.comp.gpa);
+ defer c_import_res.deinit(gpa);
if (c_import_res.errors.errorMessageCount() != 0) {
const msg = msg: {
const msg = try sema.errMsg(&child_block, src, "C import failed", .{});
- errdefer msg.destroy(sema.gpa);
+ errdefer msg.destroy(gpa);
- if (!mod.comp.bin_file.options.link_libc)
+ if (!comp.bin_file.options.link_libc)
try sema.errNote(&child_block, src, msg, "libc headers not available; compilation does not link against libc", .{});
- const gop = try mod.cimport_errors.getOrPut(sema.gpa, sema.owner_decl_index);
+ const gop = try mod.cimport_errors.getOrPut(gpa, sema.owner_decl_index);
if (!gop.found_existing) {
gop.value_ptr.* = c_import_res.errors;
c_import_res.errors = std.zig.ErrorBundle.empty;
@@ -5790,16 +5792,16 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
};
return sema.failWithOwnedErrorMsg(&child_block, msg);
}
- const c_import_pkg = Package.create(
- sema.gpa,
- null,
- c_import_res.out_zig_path,
- ) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- else => unreachable, // we pass null for root_src_dir_path
- };
+ const c_import_mod = try Package.Module.create(comp.arena.allocator(), .{
+ .root = .{
+ .root_dir = Compilation.Directory.cwd(),
+ .sub_path = std.fs.path.dirname(c_import_res.out_zig_path) orelse "",
+ },
+ .root_src_path = std.fs.path.basename(c_import_res.out_zig_path),
+ .fully_qualified_name = c_import_res.out_zig_path,
+ });
- const result = mod.importPkg(c_import_pkg) catch |err|
+ const result = mod.importPkg(c_import_mod) catch |err|
return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)});
mod.astGenFile(result.file) catch |err|
@@ -13071,13 +13073,13 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const operand = inst_data.get(sema.code);
const result = mod.importFile(block.getFileScope(mod), operand) catch |err| switch (err) {
- error.ImportOutsidePkgPath => {
- return sema.fail(block, operand_src, "import of file outside package path: '{s}'", .{operand});
+ error.ImportOutsideModulePath => {
+ return sema.fail(block, operand_src, "import of file outside module path: '{s}'", .{operand});
},
- error.PackageNotFound => {
- const name = try block.getFileScope(mod).pkg.getName(sema.gpa, mod.*);
- defer sema.gpa.free(name);
- return sema.fail(block, operand_src, "no package named '{s}' available within package '{s}'", .{ operand, name });
+ error.ModuleNotFound => {
+ return sema.fail(block, operand_src, "no module named '{s}' available within module {s}", .{
+ operand, block.getFileScope(mod).mod.fully_qualified_name,
+ });
},
else => {
// TODO: these errors are file system errors; make sure an update() will
@@ -13106,7 +13108,7 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
}
const embed_file = mod.embedFile(block.getFileScope(mod), name) catch |err| switch (err) {
- error.ImportOutsidePkgPath => {
+ error.ImportOutsideModulePath => {
return sema.fail(block, operand_src, "embed of file outside package path: '{s}'", .{name});
},
else => {
@@ -36415,8 +36417,8 @@ fn getBuiltinDecl(sema: *Sema, block: *Block, name: []const u8) CompileError!Mod
const mod = sema.mod;
const ip = &mod.intern_pool;
- const std_pkg = mod.main_pkg.table.get("std").?;
- const std_file = (mod.importPkg(std_pkg) catch unreachable).file;
+ const std_mod = mod.main_mod.deps.get("std").?;
+ const std_file = (mod.importPkg(std_mod) catch unreachable).file;
const opt_builtin_inst = (try sema.namespaceLookupRef(
block,
src,
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 4355ac1191..66b5bfbe04 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -892,21 +892,24 @@ pub const Object = struct {
build_options.semver.patch,
});
- // We fully resolve all paths at this point to avoid lack of source line info in stack
- // traces or lack of debugging information which, if relative paths were used, would
- // be very location dependent.
+ // We fully resolve all paths at this point to avoid lack of
+ // source line info in stack traces or lack of debugging
+ // information which, if relative paths were used, would be
+ // very location dependent.
// TODO: the only concern I have with this is WASI as either host or target, should
// we leave the paths as relative then?
var buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
- const compile_unit_dir = blk: {
- const path = d: {
- const mod = options.module orelse break :d ".";
- break :d mod.root_pkg.root_src_directory.path orelse ".";
- };
- if (std.fs.path.isAbsolute(path)) break :blk path;
- break :blk std.os.realpath(path, &buf) catch path; // If realpath fails, fallback to whatever path was
+ const compile_unit_dir_z = blk: {
+ if (options.module) |mod| {
+ const d = try mod.root_mod.root.joinStringZ(builder.gpa, "");
+ if (std.fs.path.isAbsolute(d)) break :blk d;
+ const abs = std.fs.realpath(d, &buf) catch break :blk d;
+ builder.gpa.free(d);
+ break :blk try builder.gpa.dupeZ(u8, abs);
+ }
+ const cwd = try std.process.getCwd(&buf);
+ break :blk try builder.gpa.dupeZ(u8, cwd);
};
- const compile_unit_dir_z = try builder.gpa.dupeZ(u8, compile_unit_dir);
defer builder.gpa.free(compile_unit_dir_z);
builder.llvm.di_compile_unit = builder.llvm.di_builder.?.createCompileUnit(
@@ -1833,14 +1836,11 @@ pub const Object = struct {
}
const dir_path_z = d: {
var buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined;
- const dir_path = file.pkg.root_src_directory.path orelse ".";
- const resolved_dir_path = if (std.fs.path.isAbsolute(dir_path))
- dir_path
- else
- std.os.realpath(dir_path, &buffer) catch dir_path; // If realpath fails, fallback to whatever dir_path was
- break :d try std.fs.path.joinZ(gpa, &.{
- resolved_dir_path, std.fs.path.dirname(file.sub_file_path) orelse "",
- });
+ const sub_path = std.fs.path.dirname(file.sub_file_path) orelse "";
+ const dir_path = try file.mod.root.joinStringZ(gpa, sub_path);
+ if (std.fs.path.isAbsolute(dir_path)) break :d dir_path;
+ const abs = std.fs.realpath(dir_path, &buffer) catch break :d dir_path;
+ break :d try std.fs.path.joinZ(gpa, &.{ abs, sub_path });
};
defer gpa.free(dir_path_z);
const sub_file_path_z = try gpa.dupeZ(u8, std.fs.path.basename(file.sub_file_path));
@@ -2828,8 +2828,8 @@ pub const Object = struct {
fn getStackTraceType(o: *Object) Allocator.Error!Type {
const mod = o.module;
- const std_pkg = mod.main_pkg.table.get("std").?;
- const std_file = (mod.importPkg(std_pkg) catch unreachable).file;
+ const std_mod = mod.main_mod.deps.get("std").?;
+ const std_file = (mod.importPkg(std_mod) catch unreachable).file;
const builtin_str = try mod.intern_pool.getOrPutString(mod.gpa, "builtin");
const std_namespace = mod.namespacePtr(mod.declPtr(std_file.root_decl.unwrap().?).src_namespace);
diff --git a/src/crash_report.zig b/src/crash_report.zig
index d4e4b46a53..2b33bd7fa5 100644
--- a/src/crash_report.zig
+++ b/src/crash_report.zig
@@ -139,18 +139,22 @@ fn dumpStatusReport() !void {
var crash_heap: [16 * 4096]u8 = undefined;
-fn writeFilePath(file: *Module.File, stream: anytype) !void {
- if (file.pkg.root_src_directory.path) |path| {
- try stream.writeAll(path);
- try stream.writeAll(std.fs.path.sep_str);
+fn writeFilePath(file: *Module.File, writer: anytype) !void {
+ if (file.mod.root.root_dir.path) |path| {
+ try writer.writeAll(path);
+ try writer.writeAll(std.fs.path.sep_str);
}
- try stream.writeAll(file.sub_file_path);
+ if (file.mod.root.sub_path.len > 0) {
+ try writer.writeAll(file.mod.root.sub_path);
+ try writer.writeAll(std.fs.path.sep_str);
+ }
+ try writer.writeAll(file.sub_file_path);
}
-fn writeFullyQualifiedDeclWithFile(mod: *Module, decl: *Decl, stream: anytype) !void {
- try writeFilePath(decl.getFileScope(mod), stream);
- try stream.writeAll(": ");
- try decl.renderFullyQualifiedDebugName(mod, stream);
+fn writeFullyQualifiedDeclWithFile(mod: *Module, decl: *Decl, writer: anytype) !void {
+ try writeFilePath(decl.getFileScope(mod), writer);
+ try writer.writeAll(": ");
+ try decl.renderFullyQualifiedDebugName(mod, writer);
}
pub fn compilerPanic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace, maybe_ret_addr: ?usize) noreturn {
diff --git a/src/glibc.zig b/src/glibc.zig
index cf12e8ea46..2321063166 100644
--- a/src/glibc.zig
+++ b/src/glibc.zig
@@ -1074,7 +1074,7 @@ fn buildSharedLib(
.cache_mode = .whole,
.target = comp.getTarget(),
.root_name = lib.name,
- .main_pkg = null,
+ .main_mod = null,
.output_mode = .Lib,
.link_mode = .Dynamic,
.thread_pool = comp.thread_pool,
diff --git a/src/libcxx.zig b/src/libcxx.zig
index 0c39469b69..2185d7f890 100644
--- a/src/libcxx.zig
+++ b/src/libcxx.zig
@@ -233,7 +233,7 @@ pub fn buildLibCXX(comp: *Compilation, prog_node: *std.Progress.Node) !void {
.cache_mode = .whole,
.target = target,
.root_name = root_name,
- .main_pkg = null,
+ .main_mod = null,
.output_mode = output_mode,
.thread_pool = comp.thread_pool,
.libc_installation = comp.bin_file.options.libc_installation,
@@ -396,7 +396,7 @@ pub fn buildLibCXXABI(comp: *Compilation, prog_node: *std.Progress.Node) !void {
.cache_mode = .whole,
.target = target,
.root_name = root_name,
- .main_pkg = null,
+ .main_mod = null,
.output_mode = output_mode,
.thread_pool = comp.thread_pool,
.libc_installation = comp.bin_file.options.libc_installation,
diff --git a/src/libtsan.zig b/src/libtsan.zig
index 90d2537876..201f4de785 100644
--- a/src/libtsan.zig
+++ b/src/libtsan.zig
@@ -202,7 +202,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: *std.Progress.Node) !void {
.cache_mode = .whole,
.target = target,
.root_name = root_name,
- .main_pkg = null,
+ .main_mod = null,
.output_mode = output_mode,
.thread_pool = comp.thread_pool,
.libc_installation = comp.bin_file.options.libc_installation,
diff --git a/src/libunwind.zig b/src/libunwind.zig
index da248c021f..589634763d 100644
--- a/src/libunwind.zig
+++ b/src/libunwind.zig
@@ -89,7 +89,7 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: *std.Progress.Node) !void {
.cache_mode = .whole,
.target = target,
.root_name = root_name,
- .main_pkg = null,
+ .main_mod = null,
.output_mode = output_mode,
.thread_pool = comp.thread_pool,
.libc_installation = comp.bin_file.options.libc_installation,
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index cbf5f350e8..931dbacdb9 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -1880,7 +1880,7 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u
},
}
// Write the form for the compile unit, which must match the abbrev table above.
- const name_strp = try self.strtab.insert(self.allocator, module.root_pkg.root_src_path);
+ const name_strp = try self.strtab.insert(self.allocator, module.root_mod.root_src_path);
var compile_unit_dir_buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined;
const compile_unit_dir = resolveCompilationDir(module, &compile_unit_dir_buffer);
const comp_dir_strp = try self.strtab.insert(self.allocator, compile_unit_dir);
@@ -1940,9 +1940,17 @@ fn resolveCompilationDir(module: *Module, buffer: *[std.fs.MAX_PATH_BYTES]u8) []
// be very location dependent.
// TODO: the only concern I have with this is WASI as either host or target, should
// we leave the paths as relative then?
- const comp_dir_path = module.root_pkg.root_src_directory.path orelse ".";
- if (std.fs.path.isAbsolute(comp_dir_path)) return comp_dir_path;
- return std.os.realpath(comp_dir_path, buffer) catch comp_dir_path; // If realpath fails, fallback to whatever comp_dir_path was
+ const root_dir_path = module.root_mod.root.root_dir.path orelse ".";
+ const sub_path = module.root_mod.root.sub_path;
+ const realpath = if (std.fs.path.isAbsolute(root_dir_path)) r: {
+ @memcpy(buffer[0..root_dir_path.len], root_dir_path);
+ break :r root_dir_path;
+ } else std.fs.realpath(root_dir_path, buffer) catch return root_dir_path;
+ const len = realpath.len + 1 + sub_path.len;
+ if (buffer.len < len) return root_dir_path;
+ buffer[realpath.len] = '/';
+ @memcpy(buffer[realpath.len + 1 ..][0..sub_path.len], sub_path);
+ return buffer[0..len];
}
fn writeAddrAssumeCapacity(self: *Dwarf, buf: *std.ArrayList(u8), addr: u64) void {
@@ -2664,7 +2672,7 @@ fn genIncludeDirsAndFileNames(self: *Dwarf, arena: Allocator) !struct {
for (self.di_files.keys()) |dif| {
const dir_path = d: {
var buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined;
- const dir_path = dif.pkg.root_src_directory.path orelse ".";
+ const dir_path = try dif.mod.root.joinString(arena, dif.mod.root.sub_path);
const abs_dir_path = if (std.fs.path.isAbsolute(dir_path))
dir_path
else
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 24da99aab2..f75047424a 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -929,15 +929,15 @@ pub fn populateMissingMetadata(self: *Elf) !void {
if (self.base.options.module) |module| {
if (self.zig_module_index == null and !self.base.options.use_llvm) {
- const index = @as(File.Index, @intCast(try self.files.addOne(gpa)));
+ const index: File.Index = @intCast(try self.files.addOne(gpa));
self.files.set(index, .{ .zig_module = .{
.index = index,
- .path = module.main_pkg.root_src_path,
+ .path = module.main_mod.root_src_path,
} });
self.zig_module_index = index;
const zig_module = self.file(index).?.zig_module;
- const name_off = try self.strtab.insert(gpa, std.fs.path.stem(module.main_pkg.root_src_path));
+ const name_off = try self.strtab.insert(gpa, std.fs.path.stem(module.main_mod.root_src_path));
const symbol_index = try self.addSymbol();
try zig_module.local_symbols.append(gpa, symbol_index);
const symbol_ptr = self.symbol(symbol_index);
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index b519fdda00..3dcef859ae 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -352,9 +352,12 @@ fn putFn(self: *Plan9, decl_index: Module.Decl.Index, out: FnDeclOutput) !void {
// getting the full file path
var buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
- const dir = file.pkg.root_src_directory.path orelse try std.os.getcwd(&buf);
- const sub_path = try std.fs.path.join(arena, &.{ dir, file.sub_file_path });
- try self.addPathComponents(sub_path, &a);
+ const full_path = try std.fs.path.join(arena, &.{
+ file.mod.root.root_dir.path orelse try std.os.getcwd(&buf),
+ file.mod.root.sub_path,
+ file.sub_file_path,
+ });
+ try self.addPathComponents(full_path, &a);
// null terminate
try a.append(0);
diff --git a/src/main.zig b/src/main.zig
index 247669440d..9199fe205b 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -416,7 +416,7 @@ const usage_build_generic =
\\ dep: [[import=]name]
\\ --deps [dep],[dep],... Set dependency names for the root package
\\ dep: [[import=]name]
- \\ --main-pkg-path Set the directory of the root package
+ \\ --main-mod-path Set the directory of the root module
\\ -fPIC Force-enable Position Independent Code
\\ -fno-PIC Force-disable Position Independent Code
\\ -fPIE Force-enable Position Independent Executable
@@ -765,17 +765,11 @@ const Framework = struct {
};
const CliModule = struct {
- mod: *Package,
+ mod: *Package.Module,
/// still in CLI arg format
deps_str: []const u8,
};
-fn cleanupModules(modules: *std.StringArrayHashMap(CliModule)) void {
- var it = modules.iterator();
- while (it.next()) |kv| kv.value_ptr.mod.destroy(modules.allocator);
- modules.deinit();
-}
-
fn buildOutputType(
gpa: Allocator,
arena: Allocator,
@@ -903,7 +897,7 @@ fn buildOutputType(
var override_local_cache_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_LOCAL_CACHE_DIR");
var override_global_cache_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_GLOBAL_CACHE_DIR");
var override_lib_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_LIB_DIR");
- var main_pkg_path: ?[]const u8 = null;
+ var main_mod_path: ?[]const u8 = null;
var clang_preprocessor_mode: Compilation.ClangPreprocessorMode = .no;
var subsystem: ?std.Target.SubSystem = null;
var major_subsystem_version: ?u32 = null;
@@ -950,8 +944,7 @@ fn buildOutputType(
// Contains every module specified via --mod. The dependencies are added
// after argument parsing is completed. We use a StringArrayHashMap to make
// error output consistent.
- var modules = std.StringArrayHashMap(CliModule).init(gpa);
- defer cleanupModules(&modules);
+ var modules = std.StringArrayHashMap(CliModule).init(arena);
// The dependency string for the root package
var root_deps_str: ?[]const u8 = null;
@@ -1023,33 +1016,36 @@ fn buildOutputType(
for ([_][]const u8{ "std", "root", "builtin" }) |name| {
if (mem.eql(u8, mod_name, name)) {
- fatal("unable to add module '{s}' -> '{s}': conflicts with builtin module", .{ mod_name, root_src });
+ fatal("unable to add module '{s}' -> '{s}': conflicts with builtin module", .{
+ mod_name, root_src,
+ });
}
}
- var mod_it = modules.iterator();
- while (mod_it.next()) |kv| {
- if (std.mem.eql(u8, mod_name, kv.key_ptr.*)) {
- fatal("unable to add module '{s}' -> '{s}': already exists as '{s}'", .{ mod_name, root_src, kv.value_ptr.mod.root_src_path });
- }
+ if (modules.get(mod_name)) |value| {
+ fatal("unable to add module '{s}' -> '{s}': already exists as '{s}'", .{
+ mod_name, root_src, value.mod.root_src_path,
+ });
}
- try modules.ensureUnusedCapacity(1);
- modules.put(mod_name, .{
- .mod = try Package.create(
- gpa,
- fs.path.dirname(root_src),
- fs.path.basename(root_src),
- ),
+ try modules.put(mod_name, .{
+ .mod = try Package.Module.create(arena, .{
+ .root = .{
+ .root_dir = Cache.Directory.cwd(),
+ .sub_path = fs.path.dirname(root_src) orelse "",
+ },
+ .root_src_path = fs.path.basename(root_src),
+ .fully_qualified_name = mod_name,
+ }),
.deps_str = deps_str,
- }) catch unreachable;
+ });
} else if (mem.eql(u8, arg, "--deps")) {
if (root_deps_str != null) {
fatal("only one --deps argument is allowed", .{});
}
root_deps_str = args_iter.nextOrFatal();
- } else if (mem.eql(u8, arg, "--main-pkg-path")) {
- main_pkg_path = args_iter.nextOrFatal();
+ } else if (mem.eql(u8, arg, "--main-mod-path")) {
+ main_mod_path = args_iter.nextOrFatal();
} else if (mem.eql(u8, arg, "-cflags")) {
extra_cflags.shrinkRetainingCapacity(0);
while (true) {
@@ -2461,19 +2457,26 @@ fn buildOutputType(
var deps_it = ModuleDepIterator.init(deps_str);
while (deps_it.next()) |dep| {
if (dep.expose.len == 0) {
- fatal("module '{s}' depends on '{s}' with a blank name", .{ kv.key_ptr.*, dep.name });
+ fatal("module '{s}' depends on '{s}' with a blank name", .{
+ kv.key_ptr.*, dep.name,
+ });
}
for ([_][]const u8{ "std", "root", "builtin" }) |name| {
if (mem.eql(u8, dep.expose, name)) {
- fatal("unable to add module '{s}' under name '{s}': conflicts with builtin module", .{ dep.name, dep.expose });
+ fatal("unable to add module '{s}' under name '{s}': conflicts with builtin module", .{
+ dep.name, dep.expose,
+ });
}
}
- const dep_mod = modules.get(dep.name) orelse
- fatal("module '{s}' depends on module '{s}' which does not exist", .{ kv.key_ptr.*, dep.name });
+ const dep_mod = modules.get(dep.name) orelse {
+ fatal("module '{s}' depends on module '{s}' which does not exist", .{
+ kv.key_ptr.*, dep.name,
+ });
+ };
- try kv.value_ptr.mod.add(gpa, dep.expose, dep_mod.mod);
+ try kv.value_ptr.mod.deps.put(arena, dep.expose, dep_mod.mod);
}
}
}
@@ -3229,31 +3232,35 @@ fn buildOutputType(
};
defer emit_implib_resolved.deinit();
- const main_pkg: ?*Package = if (root_src_file) |unresolved_src_path| blk: {
+ const main_mod: ?*Package.Module = if (root_src_file) |unresolved_src_path| blk: {
const src_path = try introspect.resolvePath(arena, unresolved_src_path);
- if (main_pkg_path) |unresolved_main_pkg_path| {
- const p = try introspect.resolvePath(arena, unresolved_main_pkg_path);
- if (p.len == 0) {
- break :blk try Package.create(gpa, null, src_path);
- } else {
- const rel_src_path = try fs.path.relative(arena, p, src_path);
- break :blk try Package.create(gpa, p, rel_src_path);
- }
+ if (main_mod_path) |unresolved_main_mod_path| {
+ const p = try introspect.resolvePath(arena, unresolved_main_mod_path);
+ break :blk try Package.Module.create(arena, .{
+ .root = .{
+ .root_dir = Cache.Directory.cwd(),
+ .sub_path = p,
+ },
+ .root_src_path = if (p.len == 0)
+ src_path
+ else
+ try fs.path.relative(arena, p, src_path),
+ .fully_qualified_name = "root",
+ });
} else {
- const root_src_dir_path = fs.path.dirname(src_path);
- break :blk Package.create(gpa, root_src_dir_path, fs.path.basename(src_path)) catch |err| {
- if (root_src_dir_path) |p| {
- fatal("unable to open '{s}': {s}", .{ p, @errorName(err) });
- } else {
- return err;
- }
- };
+ break :blk try Package.Module.create(arena, .{
+ .root = .{
+ .root_dir = Cache.Directory.cwd(),
+ .sub_path = fs.path.dirname(src_path) orelse "",
+ },
+ .root_src_path = fs.path.basename(src_path),
+ .fully_qualified_name = "root",
+ });
}
} else null;
- defer if (main_pkg) |p| p.destroy(gpa);
// Transfer packages added with --deps to the root package
- if (main_pkg) |mod| {
+ if (main_mod) |mod| {
var it = ModuleDepIterator.init(root_deps_str orelse "");
while (it.next()) |dep| {
if (dep.expose.len == 0) {
@@ -3269,7 +3276,7 @@ fn buildOutputType(
const dep_mod = modules.get(dep.name) orelse
fatal("root module depends on module '{s}' which does not exist", .{dep.name});
- try mod.add(gpa, dep.expose, dep_mod.mod);
+ try mod.deps.put(arena, dep.expose, dep_mod.mod);
}
}
@@ -3310,17 +3317,18 @@ fn buildOutputType(
if (arg_mode == .run) {
break :l global_cache_directory;
}
- if (main_pkg) |pkg| {
+ if (main_mod != null) {
// search upwards from cwd until we find directory with build.zig
const cwd_path = try process.getCwdAlloc(arena);
- const build_zig = "build.zig";
const zig_cache = "zig-cache";
var dirname: []const u8 = cwd_path;
while (true) {
- const joined_path = try fs.path.join(arena, &[_][]const u8{ dirname, build_zig });
+ const joined_path = try fs.path.join(arena, &.{
+ dirname, Package.build_zig_basename,
+ });
if (fs.cwd().access(joined_path, .{})) |_| {
- const cache_dir_path = try fs.path.join(arena, &[_][]const u8{ dirname, zig_cache });
- const dir = try pkg.root_src_directory.handle.makeOpenPath(cache_dir_path, .{});
+ const cache_dir_path = try fs.path.join(arena, &.{ dirname, zig_cache });
+ const dir = try fs.cwd().makeOpenPath(cache_dir_path, .{});
cleanup_local_cache_dir = dir;
break :l .{ .handle = dir, .path = cache_dir_path };
} else |err| switch (err) {
@@ -3389,7 +3397,7 @@ fn buildOutputType(
.dynamic_linker = target_info.dynamic_linker.get(),
.sysroot = sysroot,
.output_mode = output_mode,
- .main_pkg = main_pkg,
+ .main_mod = main_mod,
.emit_bin = emit_bin_loc,
.emit_h = emit_h_resolved.data,
.emit_asm = emit_asm_resolved.data,
@@ -4613,11 +4621,14 @@ pub const usage_build =
\\ --global-cache-dir [path] Override path to global Zig cache directory
\\ --zig-lib-dir [arg] Override path to Zig lib directory
\\ --build-runner [file] Override path to build runner
+ \\ --fetch Exit after fetching dependency tree
\\ -h, --help Print this help and exit
\\
;
pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
+ const work_around_btrfs_bug = builtin.os.tag == .linux and
+ std.process.hasEnvVarConstant("ZIG_BTRFS_WORKAROUND");
var color: Color = .auto;
// We want to release all the locks before executing the child process, so we make a nice
@@ -4633,6 +4644,7 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
var child_argv = std.ArrayList([]const u8).init(arena);
var reference_trace: ?u32 = null;
var debug_compile_errors = false;
+ var fetch_only = false;
const argv_index_exe = child_argv.items.len;
_ = try child_argv.addOne();
@@ -4682,6 +4694,8 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
} else if (mem.eql(u8, arg, "-freference-trace")) {
try child_argv.append(arg);
reference_trace = 256;
+ } else if (mem.eql(u8, arg, "--fetch")) {
+ fetch_only = true;
} else if (mem.startsWith(u8, arg, "-freference-trace=")) {
try child_argv.append(arg);
const num = arg["-freference-trace=".len..];
@@ -4714,8 +4728,8 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
defer if (cleanup_build_dir) |*dir| dir.close();
const cwd_path = try process.getCwdAlloc(arena);
- const build_zig_basename = if (build_file) |bf| fs.path.basename(bf) else "build.zig";
- const build_directory: Compilation.Directory = blk: {
+ const build_zig_basename = if (build_file) |bf| fs.path.basename(bf) else Package.build_zig_basename;
+ const build_root: Compilation.Directory = blk: {
if (build_file) |bf| {
if (fs.path.dirname(bf)) |dirname| {
const dir = fs.cwd().openDir(dirname, .{}) catch |err| {
@@ -4751,7 +4765,7 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
}
}
};
- child_argv.items[argv_index_build_file] = build_directory.path orelse cwd_path;
+ child_argv.items[argv_index_build_file] = build_root.path orelse cwd_path;
var global_cache_directory: Compilation.Directory = l: {
const p = override_global_cache_dir orelse try introspect.resolveGlobalCacheDir(arena);
@@ -4771,9 +4785,9 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
.path = local_cache_dir_path,
};
}
- const cache_dir_path = try build_directory.join(arena, &[_][]const u8{"zig-cache"});
+ const cache_dir_path = try build_root.join(arena, &[_][]const u8{"zig-cache"});
break :l .{
- .handle = try build_directory.handle.makeOpenPath("zig-cache", .{}),
+ .handle = try build_root.handle.makeOpenPath("zig-cache", .{}),
.path = cache_dir_path,
};
};
@@ -4799,97 +4813,150 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
try thread_pool.init(.{ .allocator = gpa });
defer thread_pool.deinit();
- var cleanup_build_runner_dir: ?fs.Dir = null;
- defer if (cleanup_build_runner_dir) |*dir| dir.close();
-
- var main_pkg: Package = if (override_build_runner) |build_runner_path|
+ var main_mod: Package.Module = if (override_build_runner) |build_runner_path|
.{
- .root_src_directory = blk: {
- if (std.fs.path.dirname(build_runner_path)) |dirname| {
- const dir = fs.cwd().openDir(dirname, .{}) catch |err| {
- fatal("unable to open directory to build runner from argument 'build-runner', '{s}': {s}", .{ dirname, @errorName(err) });
- };
- cleanup_build_runner_dir = dir;
- break :blk .{ .path = dirname, .handle = dir };
- }
-
- break :blk .{ .path = null, .handle = fs.cwd() };
+ .root = .{
+ .root_dir = Cache.Directory.cwd(),
+ .sub_path = fs.path.dirname(build_runner_path) orelse "",
},
- .root_src_path = std.fs.path.basename(build_runner_path),
+ .root_src_path = fs.path.basename(build_runner_path),
+ .fully_qualified_name = "root",
}
else
.{
- .root_src_directory = zig_lib_directory,
+ .root = .{ .root_dir = zig_lib_directory },
.root_src_path = "build_runner.zig",
+ .fully_qualified_name = "root",
};
- var build_pkg: Package = .{
- .root_src_directory = build_directory,
+ var build_mod: Package.Module = .{
+ .root = .{ .root_dir = build_root },
.root_src_path = build_zig_basename,
+ .fully_qualified_name = "root.@build",
};
if (build_options.only_core_functionality) {
- const deps_pkg = try Package.createFilePkg(gpa, local_cache_directory, "dependencies.zig",
- \\pub const packages = struct {};
- \\pub const root_deps: []const struct { []const u8, []const u8 } = &.{};
- \\
- );
- try main_pkg.add(gpa, "@dependencies", deps_pkg);
+ try createEmptyDependenciesModule(arena, &main_mod, local_cache_directory);
} else {
var http_client: std.http.Client = .{ .allocator = gpa };
defer http_client.deinit();
- // Here we provide an import to the build runner that allows using reflection to find
- // all of the dependencies. Without this, there would be no way to use `@import` to
- // access dependencies by name, since `@import` requires string literals.
- var dependencies_source = std.ArrayList(u8).init(gpa);
- defer dependencies_source.deinit();
-
- var all_modules: Package.AllModules = .{};
- defer all_modules.deinit(gpa);
-
- var wip_errors: std.zig.ErrorBundle.Wip = undefined;
- try wip_errors.init(gpa);
- defer wip_errors.deinit();
-
var progress: std.Progress = .{ .dont_print_on_dumb = true };
const root_prog_node = progress.start("Fetch Packages", 0);
defer root_prog_node.end();
- // Here we borrow main package's table and will replace it with a fresh
- // one after this process completes.
- const fetch_result = build_pkg.fetchAndAddDependencies(
- &main_pkg,
- arena,
- &thread_pool,
- &http_client,
- build_directory,
- global_cache_directory,
- local_cache_directory,
- &dependencies_source,
- &wip_errors,
- &all_modules,
- root_prog_node,
- null,
+ var job_queue: Package.Fetch.JobQueue = .{
+ .http_client = &http_client,
+ .thread_pool = &thread_pool,
+ .global_cache = global_cache_directory,
+ .recursive = true,
+ .work_around_btrfs_bug = work_around_btrfs_bug,
+ };
+ defer job_queue.deinit();
+
+ try job_queue.all_fetches.ensureUnusedCapacity(gpa, 1);
+ try job_queue.table.ensureUnusedCapacity(gpa, 1);
+
+ var fetch: Package.Fetch = .{
+ .arena = std.heap.ArenaAllocator.init(gpa),
+ .location = .{ .relative_path = build_mod.root },
+ .location_tok = 0,
+ .hash_tok = 0,
+ .parent_package_root = build_mod.root,
+ .parent_manifest_ast = null,
+ .prog_node = root_prog_node,
+ .job_queue = &job_queue,
+ .omit_missing_hash_error = true,
+ .allow_missing_paths_field = false,
+
+ .package_root = undefined,
+ .error_bundle = undefined,
+ .manifest = null,
+ .manifest_ast = undefined,
+ .actual_hash = undefined,
+ .has_build_zig = true,
+ .oom_flag = false,
+
+ .module = &build_mod,
+ };
+ job_queue.all_fetches.appendAssumeCapacity(&fetch);
+
+ job_queue.table.putAssumeCapacityNoClobber(
+ Package.Fetch.relativePathDigest(build_mod.root, global_cache_directory),
+ &fetch,
);
- if (wip_errors.root_list.items.len > 0) {
- var errors = try wip_errors.toOwnedBundle("");
- defer errors.deinit(gpa);
+
+ job_queue.wait_group.start();
+ try job_queue.thread_pool.spawn(Package.Fetch.workerRun, .{ &fetch, "root" });
+ job_queue.wait_group.wait();
+
+ try job_queue.consolidateErrors();
+
+ if (fetch.error_bundle.root_list.items.len > 0) {
+ var errors = try fetch.error_bundle.toOwnedBundle("");
errors.renderToStdErr(renderOptions(color));
process.exit(1);
}
- try fetch_result;
- const deps_pkg = try Package.createFilePkg(
- gpa,
+ if (fetch_only) return cleanExit();
+
+ var source_buf = std.ArrayList(u8).init(gpa);
+ defer source_buf.deinit();
+ try job_queue.createDependenciesSource(&source_buf);
+ const deps_mod = try createDependenciesModule(
+ arena,
+ source_buf.items,
+ &main_mod,
local_cache_directory,
- "dependencies.zig",
- dependencies_source.items,
);
- mem.swap(Package.Table, &main_pkg.table, &deps_pkg.table);
- try main_pkg.add(gpa, "@dependencies", deps_pkg);
+ {
+ // We need a Module for each package's build.zig.
+ const hashes = job_queue.table.keys();
+ const fetches = job_queue.table.values();
+ try deps_mod.deps.ensureUnusedCapacity(arena, @intCast(hashes.len));
+ for (hashes, fetches) |hash, f| {
+ if (f == &fetch) {
+ // The first one is a dummy package for the current project.
+ continue;
+ }
+ if (!f.has_build_zig)
+ continue;
+ const m = try Package.Module.create(arena, .{
+ .root = try f.package_root.clone(arena),
+ .root_src_path = Package.build_zig_basename,
+ .fully_qualified_name = try std.fmt.allocPrint(
+ arena,
+ "root.@dependencies.{s}",
+ .{&hash},
+ ),
+ });
+ const hash_cloned = try arena.dupe(u8, &hash);
+ deps_mod.deps.putAssumeCapacityNoClobber(hash_cloned, m);
+ f.module = m;
+ }
+
+ // Each build.zig module needs access to each of its
+ // dependencies' build.zig modules by name.
+ for (fetches) |f| {
+ const mod = f.module orelse continue;
+ const man = f.manifest orelse continue;
+ const dep_names = man.dependencies.keys();
+ try mod.deps.ensureUnusedCapacity(arena, @intCast(dep_names.len));
+ for (dep_names, man.dependencies.values()) |name, dep| {
+ const dep_digest = Package.Fetch.depDigest(
+ f.package_root,
+ global_cache_directory,
+ dep,
+ ) orelse continue;
+ const dep_mod = job_queue.table.get(dep_digest).?.module orelse continue;
+ const name_cloned = try arena.dupe(u8, name);
+ mod.deps.putAssumeCapacityNoClobber(name_cloned, dep_mod);
+ }
+ }
+ }
}
- try main_pkg.add(gpa, "@build", &build_pkg);
+
+ try main_mod.deps.put(arena, "@build", &build_mod);
const comp = Compilation.create(gpa, .{
.zig_lib_directory = zig_lib_directory,
@@ -4901,7 +4968,7 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
.is_native_abi = cross_target.isNativeAbi(),
.dynamic_linker = target_info.dynamic_linker.get(),
.output_mode = .Exe,
- .main_pkg = &main_pkg,
+ .main_mod = &main_mod,
.emit_bin = emit_bin,
.emit_h = null,
.optimize_mode = .Debug,
@@ -5115,12 +5182,15 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
.tree = tree,
.tree_loaded = true,
.zir = undefined,
- .pkg = undefined,
+ .mod = undefined,
.root_decl = .none,
};
- file.pkg = try Package.create(gpa, null, file.sub_file_path);
- defer file.pkg.destroy(gpa);
+ file.mod = try Package.Module.create(arena, .{
+ .root = Package.Path.cwd(),
+ .root_src_path = file.sub_file_path,
+ .fully_qualified_name = "root",
+ });
file.zir = try AstGen.generate(gpa, file.tree);
file.zir_loaded = true;
@@ -5321,12 +5391,15 @@ fn fmtPathFile(
.tree = tree,
.tree_loaded = true,
.zir = undefined,
- .pkg = undefined,
+ .mod = undefined,
.root_decl = .none,
};
- file.pkg = try Package.create(gpa, null, file.sub_file_path);
- defer file.pkg.destroy(gpa);
+ file.mod = try Package.Module.create(fmt.arena, .{
+ .root = Package.Path.cwd(),
+ .root_src_path = file.sub_file_path,
+ .fully_qualified_name = "root",
+ });
if (stat.size > max_src_size)
return error.FileTooBig;
@@ -5387,7 +5460,7 @@ pub fn putAstErrorsIntoBundle(
tree: Ast,
path: []const u8,
wip_errors: *std.zig.ErrorBundle.Wip,
-) !void {
+) Allocator.Error!void {
var file: Module.File = .{
.status = .never_loaded,
.source_loaded = true,
@@ -5402,12 +5475,16 @@ pub fn putAstErrorsIntoBundle(
.tree = tree,
.tree_loaded = true,
.zir = undefined,
- .pkg = undefined,
+ .mod = undefined,
.root_decl = .none,
};
- file.pkg = try Package.create(gpa, null, path);
- defer file.pkg.destroy(gpa);
+ file.mod = try Package.Module.create(gpa, .{
+ .root = Package.Path.cwd(),
+ .root_src_path = file.sub_file_path,
+ .fully_qualified_name = "root",
+ });
+ defer gpa.destroy(file.mod);
file.zir = try AstGen.generate(gpa, file.tree);
file.zir_loaded = true;
@@ -5933,7 +6010,7 @@ pub fn cmdAstCheck(
.stat = undefined,
.tree = undefined,
.zir = undefined,
- .pkg = undefined,
+ .mod = undefined,
.root_decl = .none,
};
if (zig_source_file) |file_name| {
@@ -5971,8 +6048,11 @@ pub fn cmdAstCheck(
file.stat.size = source.len;
}
- file.pkg = try Package.create(gpa, null, file.sub_file_path);
- defer file.pkg.destroy(gpa);
+ file.mod = try Package.Module.create(arena, .{
+ .root = Package.Path.cwd(),
+ .root_src_path = file.sub_file_path,
+ .fully_qualified_name = "root",
+ });
file.tree = try Ast.parse(gpa, file.source, .zig);
file.tree_loaded = true;
@@ -6067,7 +6147,7 @@ pub fn cmdDumpZir(
.stat = undefined,
.tree = undefined,
.zir = try Module.loadZirCache(gpa, f),
- .pkg = undefined,
+ .mod = undefined,
.root_decl = .none,
};
@@ -6136,12 +6216,15 @@ pub fn cmdChangelist(
},
.tree = undefined,
.zir = undefined,
- .pkg = undefined,
+ .mod = undefined,
.root_decl = .none,
};
- file.pkg = try Package.create(gpa, null, file.sub_file_path);
- defer file.pkg.destroy(gpa);
+ file.mod = try Package.Module.create(arena, .{
+ .root = Package.Path.cwd(),
+ .root_src_path = file.sub_file_path,
+ .fully_qualified_name = "root",
+ });
const source = try arena.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0);
const amt = try f.readAll(source);
@@ -6623,7 +6706,9 @@ fn cmdFetch(
args: []const []const u8,
) !void {
const color: Color = .auto;
- var opt_url: ?[]const u8 = null;
+ const work_around_btrfs_bug = builtin.os.tag == .linux and
+ std.process.hasEnvVarConstant("ZIG_BTRFS_WORKAROUND");
+ var opt_path_or_url: ?[]const u8 = null;
var override_global_cache_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_GLOBAL_CACHE_DIR");
{
@@ -6643,15 +6728,15 @@ fn cmdFetch(
} else {
fatal("unrecognized parameter: '{s}'", .{arg});
}
- } else if (opt_url != null) {
+ } else if (opt_path_or_url != null) {
fatal("unexpected extra parameter: '{s}'", .{arg});
} else {
- opt_url = arg;
+ opt_path_or_url = arg;
}
}
}
- const url = opt_url orelse fatal("missing url or path parameter", .{});
+ const path_or_url = opt_path_or_url orelse fatal("missing url or path parameter", .{});
var thread_pool: ThreadPool = undefined;
try thread_pool.init(.{ .allocator = gpa });
@@ -6664,19 +6749,6 @@ fn cmdFetch(
const root_prog_node = progress.start("Fetch", 0);
defer root_prog_node.end();
- var wip_errors: std.zig.ErrorBundle.Wip = undefined;
- try wip_errors.init(gpa);
- defer wip_errors.deinit();
-
- var report: Package.Report = .{
- .ast = null,
- .directory = .{
- .handle = fs.cwd(),
- .path = null,
- },
- .error_bundle = &wip_errors,
- };
-
var global_cache_directory: Compilation.Directory = l: {
const p = override_global_cache_dir orelse try introspect.resolveGlobalCacheDir(arena);
break :l .{
@@ -6686,56 +6758,51 @@ fn cmdFetch(
};
defer global_cache_directory.handle.close();
- var readable_resource: Package.ReadableResource = rr: {
- if (fs.cwd().openIterableDir(url, .{})) |dir| {
- break :rr .{
- .path = try gpa.dupe(u8, url),
- .resource = .{ .dir = dir },
- };
- } else |dir_err| {
- const file_err = if (dir_err == error.NotDir) e: {
- if (fs.cwd().openFile(url, .{})) |f| {
- break :rr .{
- .path = try gpa.dupe(u8, url),
- .resource = .{ .file = f },
- };
- } else |err| break :e err;
- } else dir_err;
-
- const uri = std.Uri.parse(url) catch |uri_err| {
- fatal("'{s}' could not be recognized as a file path ({s}) or an URL ({s})", .{
- url, @errorName(file_err), @errorName(uri_err),
- });
- };
- const fetch_location = try Package.FetchLocation.initUri(uri, 0, report);
- const cwd: Cache.Directory = .{
- .handle = fs.cwd(),
- .path = null,
- };
- break :rr try fetch_location.fetch(gpa, cwd, &http_client, 0, report);
- }
+ var job_queue: Package.Fetch.JobQueue = .{
+ .http_client = &http_client,
+ .thread_pool = &thread_pool,
+ .global_cache = global_cache_directory,
+ .recursive = false,
+ .work_around_btrfs_bug = work_around_btrfs_bug,
};
- defer readable_resource.deinit(gpa);
+ defer job_queue.deinit();
+
+ var fetch: Package.Fetch = .{
+ .arena = std.heap.ArenaAllocator.init(gpa),
+ .location = .{ .path_or_url = path_or_url },
+ .location_tok = 0,
+ .hash_tok = 0,
+ .parent_package_root = undefined,
+ .parent_manifest_ast = null,
+ .prog_node = root_prog_node,
+ .job_queue = &job_queue,
+ .omit_missing_hash_error = true,
+ .allow_missing_paths_field = false,
+
+ .package_root = undefined,
+ .error_bundle = undefined,
+ .manifest = null,
+ .manifest_ast = undefined,
+ .actual_hash = undefined,
+ .has_build_zig = false,
+ .oom_flag = false,
+
+ .module = null,
+ };
+ defer fetch.deinit();
- var package_location = readable_resource.unpack(
- gpa,
- &thread_pool,
- global_cache_directory,
- 0,
- report,
- root_prog_node,
- ) catch |err| {
- if (wip_errors.root_list.items.len > 0) {
- var errors = try wip_errors.toOwnedBundle("");
- defer errors.deinit(gpa);
- errors.renderToStdErr(renderOptions(color));
- process.exit(1);
- }
- fatal("unable to unpack '{s}': {s}", .{ url, @errorName(err) });
+ fetch.run() catch |err| switch (err) {
+ error.OutOfMemory => fatal("out of memory", .{}),
+ error.FetchFailed => {}, // error bundle checked below
};
- defer package_location.deinit(gpa);
- const hex_digest = Package.Manifest.hexDigest(package_location.hash);
+ if (fetch.error_bundle.root_list.items.len > 0) {
+ var errors = try fetch.error_bundle.toOwnedBundle("");
+ errors.renderToStdErr(renderOptions(color));
+ process.exit(1);
+ }
+
+ const hex_digest = Package.Manifest.hexDigest(fetch.actual_hash);
progress.done = true;
progress.refresh();
@@ -6744,3 +6811,56 @@ fn cmdFetch(
return cleanExit();
}
+
+fn createEmptyDependenciesModule(
+ arena: Allocator,
+ main_mod: *Package.Module,
+ local_cache_directory: Cache.Directory,
+) !void {
+ var source = std.ArrayList(u8).init(arena);
+ try Package.Fetch.JobQueue.createEmptyDependenciesSource(&source);
+ _ = try createDependenciesModule(arena, source.items, main_mod, local_cache_directory);
+}
+
+/// Creates the dependencies.zig file and corresponding `Package.Module` for the
+/// build runner to obtain via `@import("@dependencies")`.
+fn createDependenciesModule(
+ arena: Allocator,
+ source: []const u8,
+ main_mod: *Package.Module,
+ local_cache_directory: Cache.Directory,
+) !*Package.Module {
+ // Atomically create the file in a directory named after the hash of its contents.
+ const basename = "dependencies.zig";
+ const rand_int = std.crypto.random.int(u64);
+ const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++
+ Package.Manifest.hex64(rand_int);
+ {
+ var tmp_dir = try local_cache_directory.handle.makeOpenPath(tmp_dir_sub_path, .{});
+ defer tmp_dir.close();
+ try tmp_dir.writeFile(basename, source);
+ }
+
+ var hh: Cache.HashHelper = .{};
+ hh.addBytes(build_options.version);
+ hh.addBytes(source);
+ const hex_digest = hh.final();
+
+ const o_dir_sub_path = try arena.dupe(u8, "o" ++ fs.path.sep_str ++ hex_digest);
+ try Package.Fetch.renameTmpIntoCache(
+ local_cache_directory.handle,
+ tmp_dir_sub_path,
+ o_dir_sub_path,
+ );
+
+ const deps_mod = try Package.Module.create(arena, .{
+ .root = .{
+ .root_dir = local_cache_directory,
+ .sub_path = o_dir_sub_path,
+ },
+ .root_src_path = basename,
+ .fully_qualified_name = "root.@dependencies",
+ });
+ try main_mod.deps.put(arena, "@dependencies", deps_mod);
+ return deps_mod;
+}
diff --git a/src/musl.zig b/src/musl.zig
index c66f464f9d..bbb3c145bb 100644
--- a/src/musl.zig
+++ b/src/musl.zig
@@ -206,7 +206,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: *std.Progr
.zig_lib_directory = comp.zig_lib_directory,
.target = target,
.root_name = "c",
- .main_pkg = null,
+ .main_mod = null,
.output_mode = .Lib,
.link_mode = .Dynamic,
.thread_pool = comp.thread_pool,