diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2025-12-08 13:39:09 -0800 |
|---|---|---|
| committer | Andrew Kelley <andrew@ziglang.org> | 2025-12-23 22:15:08 -0800 |
| commit | f53248a40936ebc9aaf75ddbd16e67ebec05ab84 (patch) | |
| tree | af6a1a4fa4d3ff09dae241922a8f7c37cde43681 | |
| parent | 916998315967f73c91e682e9ea05dd3232818654 (diff) | |
| download | zig-f53248a40936ebc9aaf75ddbd16e67ebec05ab84.tar.gz zig-f53248a40936ebc9aaf75ddbd16e67ebec05ab84.zip | |
update all std.fs.cwd() to std.Io.Dir.cwd()
72 files changed, 398 insertions, 377 deletions
diff --git a/lib/compiler/aro/aro/Compilation.zig b/lib/compiler/aro/aro/Compilation.zig index 09e4861d13..b3e4d5544d 100644 --- a/lib/compiler/aro/aro/Compilation.zig +++ b/lib/compiler/aro/aro/Compilation.zig @@ -2253,7 +2253,7 @@ test "addSourceFromBuffer" { var arena: std.heap.ArenaAllocator = .init(std.testing.allocator); defer arena.deinit(); var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); const source = try comp.addSourceFromBuffer("path", str); @@ -2267,7 +2267,7 @@ test "addSourceFromBuffer" { var arena: std.heap.ArenaAllocator = .init(allocator); defer arena.deinit(); var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(allocator, arena.allocator(), std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(allocator, arena.allocator(), std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); _ = try comp.addSourceFromBuffer("path", "spliced\\\nbuffer\n"); @@ -2313,7 +2313,7 @@ test "addSourceFromBuffer - exhaustive check for carriage return elimination" { var buf: [alphabet.len]u8 = @splat(alphabet[0]); var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); var source_count: u32 = 0; @@ -2341,7 +2341,7 @@ test "ignore BOM at beginning of file" { const Test = struct { fn run(arena: Allocator, buf: []const u8) !void { var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); const source = try comp.addSourceFromBuffer("file.c", buf); diff --git a/lib/compiler/aro/aro/Driver.zig b/lib/compiler/aro/aro/Driver.zig index f933e3ce52..fec3cea0f8 100644 --- a/lib/compiler/aro/aro/Driver.zig +++ b/lib/compiler/aro/aro/Driver.zig @@ -1327,7 +1327,7 @@ fn processSource( const dep_file_name = try d.getDepFileName(source, writer_buf[0..std.fs.max_name_bytes]); const file = if (dep_file_name) |path| - d.comp.cwd.createFile(path, .{}) catch |er| + d.comp.cwd.createFile(io, path, .{}) catch |er| return d.fatal("unable to create dependency file '{s}': {s}", .{ path, errorDescription(er) }) else Io.File.stdout(); @@ -1352,7 +1352,7 @@ fn processSource( } const file = if (d.output_name) |some| - d.comp.cwd.createFile(some, .{}) catch |er| + d.comp.cwd.createFile(io, some, .{}) catch |er| return d.fatal("unable to create output file '{s}': {s}", .{ some, errorDescription(er) }) else Io.File.stdout(); @@ -1405,7 +1405,7 @@ fn processSource( defer assembly.deinit(gpa); if (d.only_preprocess_and_compile) { - const out_file = d.comp.cwd.createFile(out_file_name, .{}) catch |er| + const out_file = d.comp.cwd.createFile(io, out_file_name, .{}) catch |er| return d.fatal("unable to create output file '{s}': {s}", .{ out_file_name, errorDescription(er) }); defer out_file.close(io); @@ -1419,7 +1419,7 @@ fn processSource( // then assemble to out_file_name var assembly_name_buf: [std.fs.max_name_bytes]u8 = undefined; const assembly_out_file_name = try d.getRandomFilename(&assembly_name_buf, ".s"); - const out_file = d.comp.cwd.createFile(assembly_out_file_name, .{}) catch |er| + const out_file = d.comp.cwd.createFile(io, assembly_out_file_name, .{}) catch |er| return d.fatal("unable to create output file '{s}': {s}", .{ assembly_out_file_name, errorDescription(er) }); defer out_file.close(io); assembly.writeToFile(out_file) catch |er| @@ -1455,7 +1455,7 @@ fn processSource( }; defer obj.deinit(); - const out_file = d.comp.cwd.createFile(out_file_name, .{}) catch |er| + const out_file = d.comp.cwd.createFile(io, out_file_name, .{}) catch |er| return d.fatal("unable to create output file '{s}': {s}", .{ out_file_name, errorDescription(er) }); defer out_file.close(io); diff --git a/lib/compiler/aro/aro/Parser.zig b/lib/compiler/aro/aro/Parser.zig index 4a89e0d460..fc21ee4d0b 100644 --- a/lib/compiler/aro/aro/Parser.zig +++ b/lib/compiler/aro/aro/Parser.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const Io = std.Io; const mem = std.mem; const Allocator = mem.Allocator; const assert = std.debug.assert; @@ -211,7 +212,7 @@ fn checkIdentifierCodepointWarnings(p: *Parser, codepoint: u21, loc: Source.Loca const prev_total = p.diagnostics.total; var sf = std.heap.stackFallback(1024, p.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var allocating: Io.Writer.Allocating = .init(sf.get()); defer allocating.deinit(); if (!char_info.isC99IdChar(codepoint)) { @@ -425,7 +426,7 @@ pub fn err(p: *Parser, tok_i: TokenIndex, diagnostic: Diagnostic, args: anytype) if (p.diagnostics.effectiveKind(diagnostic) == .off) return; var sf = std.heap.stackFallback(1024, p.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var allocating: Io.Writer.Allocating = .init(sf.get()); defer allocating.deinit(); p.formatArgs(&allocating.writer, diagnostic.fmt, args) catch return error.OutOfMemory; @@ -447,7 +448,7 @@ pub fn err(p: *Parser, tok_i: TokenIndex, diagnostic: Diagnostic, args: anytype) }, p.pp.expansionSlice(tok_i), true); } -fn formatArgs(p: *Parser, w: *std.Io.Writer, fmt: []const u8, args: anytype) !void { +fn formatArgs(p: *Parser, w: *Io.Writer, fmt: []const u8, args: anytype) !void { var i: usize = 0; inline for (std.meta.fields(@TypeOf(args))) |arg_info| { const arg = @field(args, arg_info.name); @@ -476,13 +477,13 @@ fn formatArgs(p: *Parser, w: *std.Io.Writer, fmt: []const u8, args: anytype) !vo try w.writeAll(fmt[i..]); } -fn formatTokenId(w: *std.Io.Writer, fmt: []const u8, tok_id: Tree.Token.Id) !usize { +fn formatTokenId(w: *Io.Writer, fmt: []const u8, tok_id: Tree.Token.Id) !usize { const i = Diagnostics.templateIndex(w, fmt, "{tok_id}"); try w.writeAll(tok_id.symbol()); return i; } -fn formatQualType(p: *Parser, w: *std.Io.Writer, fmt: []const u8, qt: QualType) !usize { +fn formatQualType(p: *Parser, w: *Io.Writer, fmt: []const u8, qt: QualType) !usize { const i = Diagnostics.templateIndex(w, fmt, "{qt}"); try w.writeByte('\''); try qt.print(p.comp, w); @@ -501,7 +502,7 @@ fn formatQualType(p: *Parser, w: *std.Io.Writer, fmt: []const u8, qt: QualType) return i; } -fn formatResult(p: *Parser, w: *std.Io.Writer, fmt: []const u8, res: Result) !usize { +fn formatResult(p: *Parser, w: *Io.Writer, fmt: []const u8, res: Result) !usize { const i = Diagnostics.templateIndex(w, fmt, "{value}"); switch (res.val.opt_ref) { .none => try w.writeAll("(none)"), @@ -524,7 +525,7 @@ const Normalized = struct { return .{ .str = str }; } - pub fn format(ctx: Normalized, w: *std.Io.Writer, fmt: []const u8) !usize { + pub fn format(ctx: Normalized, w: *Io.Writer, fmt: []const u8) !usize { const i = Diagnostics.templateIndex(w, fmt, "{normalized}"); var it: std.unicode.Utf8Iterator = .{ .bytes = ctx.str, @@ -558,7 +559,7 @@ const Codepoint = struct { return .{ .codepoint = codepoint }; } - pub fn format(ctx: Codepoint, w: *std.Io.Writer, fmt: []const u8) !usize { + pub fn format(ctx: Codepoint, w: *Io.Writer, fmt: []const u8) !usize { const i = Diagnostics.templateIndex(w, fmt, "{codepoint}"); try w.print("{X:0>4}", .{ctx.codepoint}); return i; @@ -572,7 +573,7 @@ const Escaped = struct { return .{ .str = str }; } - pub fn format(ctx: Escaped, w: *std.Io.Writer, fmt: []const u8) !usize { + pub fn format(ctx: Escaped, w: *Io.Writer, fmt: []const u8) !usize { const i = Diagnostics.templateIndex(w, fmt, "{s}"); try std.zig.stringEscape(ctx.str, w); return i; @@ -1453,7 +1454,7 @@ fn decl(p: *Parser) Error!bool { return true; } -fn staticAssertMessage(p: *Parser, cond_node: Node.Index, maybe_message: ?Result, allocating: *std.Io.Writer.Allocating) !?[]const u8 { +fn staticAssertMessage(p: *Parser, cond_node: Node.Index, maybe_message: ?Result, allocating: *Io.Writer.Allocating) !?[]const u8 { const w = &allocating.writer; const cond = cond_node.get(&p.tree); @@ -1526,7 +1527,7 @@ fn staticAssert(p: *Parser) Error!bool { } else { if (!res.val.toBool(p.comp)) { var sf = std.heap.stackFallback(1024, gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var allocating: Io.Writer.Allocating = .init(sf.get()); defer allocating.deinit(); if (p.staticAssertMessage(res_node, str, &allocating) catch return error.OutOfMemory) |message| { @@ -9719,7 +9720,7 @@ fn primaryExpr(p: *Parser) Error!?Result { qt = some.qt; } else if (p.func.qt) |func_qt| { var sf = std.heap.stackFallback(1024, gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var allocating: Io.Writer.Allocating = .init(sf.get()); defer allocating.deinit(); func_qt.printNamed(p.tokSlice(p.func.name), p.comp, &allocating.writer) catch return error.OutOfMemory; @@ -10608,7 +10609,7 @@ test "Node locations" { const arena = arena_state.allocator(); var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); const file = try comp.addSourceFromBuffer("file.c", diff --git a/lib/compiler/aro/aro/Preprocessor.zig b/lib/compiler/aro/aro/Preprocessor.zig index d47727cbf0..e8343dc83a 100644 --- a/lib/compiler/aro/aro/Preprocessor.zig +++ b/lib/compiler/aro/aro/Preprocessor.zig @@ -3900,7 +3900,7 @@ test "Preserve pragma tokens sometimes" { defer arena.deinit(); var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(gpa, arena.allocator(), std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(gpa, arena.allocator(), std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); try comp.addDefaultPragmaHandlers(); @@ -3967,7 +3967,7 @@ test "destringify" { var arena: std.heap.ArenaAllocator = .init(gpa); defer arena.deinit(); var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(gpa, arena.allocator(), std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(gpa, arena.allocator(), std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); var pp = Preprocessor.init(&comp, .default); defer pp.deinit(); @@ -4030,7 +4030,7 @@ test "Include guards" { const arena = arena_state.allocator(); var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(gpa, arena, std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(gpa, arena, std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); var pp = Preprocessor.init(&comp, .default); defer pp.deinit(); diff --git a/lib/compiler/aro/aro/Tokenizer.zig b/lib/compiler/aro/aro/Tokenizer.zig index c497c5ce82..198d49364a 100644 --- a/lib/compiler/aro/aro/Tokenizer.zig +++ b/lib/compiler/aro/aro/Tokenizer.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const Io = std.Io; const assert = std.debug.assert; const Compilation = @import("Compilation.zig"); @@ -2326,7 +2327,7 @@ test "Tokenizer fuzz test" { fn testOne(_: @This(), input_bytes: []const u8) anyerror!void { var arena: std.heap.ArenaAllocator = .init(std.testing.allocator); defer arena.deinit(); - var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, undefined, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, undefined, Io.Dir.cwd()); defer comp.deinit(); const source = try comp.addSourceFromBuffer("fuzz.c", input_bytes); @@ -2351,7 +2352,7 @@ test "Tokenizer fuzz test" { fn expectTokensExtra(contents: []const u8, expected_tokens: []const Token.Id, langopts: ?LangOpts) !void { var arena: std.heap.ArenaAllocator = .init(std.testing.allocator); defer arena.deinit(); - var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, undefined, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, undefined, Io.Dir.cwd()); defer comp.deinit(); if (langopts) |provided| { comp.langopts = provided; diff --git a/lib/compiler/aro/aro/Value.zig b/lib/compiler/aro/aro/Value.zig index 25a2d1824f..14949ce03b 100644 --- a/lib/compiler/aro/aro/Value.zig +++ b/lib/compiler/aro/aro/Value.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const Io = std.Io; const assert = std.debug.assert; const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; @@ -80,7 +81,7 @@ test "minUnsignedBits" { defer arena_state.deinit(); const arena = arena_state.allocator(); - var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, undefined, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, undefined, Io.Dir.cwd()); defer comp.deinit(); const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" }); comp.target = .fromZigTarget(try std.zig.system.resolveTargetQuery(std.testing.io, target_query)); @@ -119,7 +120,7 @@ test "minSignedBits" { defer arena_state.deinit(); const arena = arena_state.allocator(); - var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, undefined, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, undefined, Io.Dir.cwd()); defer comp.deinit(); const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" }); comp.target = .fromZigTarget(try std.zig.system.resolveTargetQuery(std.testing.io, target_query)); @@ -1080,7 +1081,7 @@ const NestedPrint = union(enum) { }, }; -pub fn printPointer(offset: Value, base: []const u8, comp: *const Compilation, w: *std.Io.Writer) std.Io.Writer.Error!void { +pub fn printPointer(offset: Value, base: []const u8, comp: *const Compilation, w: *Io.Writer) Io.Writer.Error!void { try w.writeByte('&'); try w.writeAll(base); if (!offset.isZero(comp)) { @@ -1089,7 +1090,7 @@ pub fn printPointer(offset: Value, base: []const u8, comp: *const Compilation, w } } -pub fn print(v: Value, qt: QualType, comp: *const Compilation, w: *std.Io.Writer) std.Io.Writer.Error!?NestedPrint { +pub fn print(v: Value, qt: QualType, comp: *const Compilation, w: *Io.Writer) Io.Writer.Error!?NestedPrint { if (qt.is(comp, .bool)) { try w.writeAll(if (v.isZero(comp)) "false" else "true"); return null; @@ -1116,7 +1117,7 @@ pub fn print(v: Value, qt: QualType, comp: *const Compilation, w: *std.Io.Writer return null; } -pub fn printString(bytes: []const u8, qt: QualType, comp: *const Compilation, w: *std.Io.Writer) std.Io.Writer.Error!void { +pub fn printString(bytes: []const u8, qt: QualType, comp: *const Compilation, w: *Io.Writer) Io.Writer.Error!void { const size: Compilation.CharUnitSize = @enumFromInt(qt.childType(comp).sizeof(comp)); const without_null = bytes[0 .. bytes.len - @intFromEnum(size)]; try w.writeByte('"'); diff --git a/lib/compiler/aro/main.zig b/lib/compiler/aro/main.zig index 66c8add4a3..d1be1dbb21 100644 --- a/lib/compiler/aro/main.zig +++ b/lib/compiler/aro/main.zig @@ -59,7 +59,7 @@ pub fn main() u8 { } }, }; - var comp = Compilation.initDefault(gpa, arena, io, &diagnostics, std.fs.cwd()) catch |er| switch (er) { + var comp = Compilation.initDefault(gpa, arena, io, &diagnostics, Io.Dir.cwd()) catch |er| switch (er) { error.OutOfMemory => { std.debug.print("out of memory\n", .{}); if (fast_exit) process.exit(1); diff --git a/lib/compiler/objcopy.zig b/lib/compiler/objcopy.zig index 485e644daa..e48f76a6a6 100644 --- a/lib/compiler/objcopy.zig +++ b/lib/compiler/objcopy.zig @@ -152,7 +152,7 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void defer threaded.deinit(); const io = threaded.io(); - const input_file = fs.cwd().openFile(input, .{}) catch |err| fatal("failed to open {s}: {t}", .{ input, err }); + const input_file = Io.Dir.cwd().openFile(input, .{}) catch |err| fatal("failed to open {s}: {t}", .{ input, err }); defer input_file.close(io); const stat = input_file.stat() catch |err| fatal("failed to stat {s}: {t}", .{ input, err }); @@ -180,7 +180,7 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void const mode = if (out_fmt != .elf or only_keep_debug) Io.File.default_mode else stat.mode; - var output_file = try fs.cwd().createFile(output, .{ .mode = mode }); + var output_file = try Io.Dir.cwd().createFile(io, output, .{ .mode = mode }); defer output_file.close(io); var out = output_file.writer(&output_buffer); diff --git a/lib/compiler/reduce.zig b/lib/compiler/reduce.zig index bbd3d172b4..d3f33ad81a 100644 --- a/lib/compiler/reduce.zig +++ b/lib/compiler/reduce.zig @@ -233,7 +233,7 @@ pub fn main() !void { } } - try std.fs.cwd().writeFile(.{ .sub_path = root_source_file_path, .data = rendered.written() }); + try Io.Dir.cwd().writeFile(.{ .sub_path = root_source_file_path, .data = rendered.written() }); // std.debug.print("trying this code:\n{s}\n", .{rendered.items}); const interestingness = try runCheck(arena, interestingness_argv.items); @@ -274,7 +274,7 @@ pub fn main() !void { fixups.clearRetainingCapacity(); rendered.clearRetainingCapacity(); try tree.render(gpa, &rendered.writer, fixups); - try std.fs.cwd().writeFile(.{ .sub_path = root_source_file_path, .data = rendered.written() }); + try Io.Dir.cwd().writeFile(.{ .sub_path = root_source_file_path, .data = rendered.written() }); return std.process.cleanExit(); } @@ -398,7 +398,7 @@ fn transformationsToFixups( } fn parse(gpa: Allocator, file_path: []const u8) !Ast { - const source_code = std.fs.cwd().readFileAllocOptions( + const source_code = Io.Dir.cwd().readFileAllocOptions( file_path, gpa, .limited(std.math.maxInt(u32)), diff --git a/lib/compiler/resinator/cli.zig b/lib/compiler/resinator/cli.zig index bb54289e3e..ae4ece2968 100644 --- a/lib/compiler/resinator/cli.zig +++ b/lib/compiler/resinator/cli.zig @@ -2003,7 +2003,7 @@ test "maybeAppendRC" { // Create the file so that it's found. In this scenario, .rc should not get // appended. - var file = try tmp.dir.createFile("foo", .{}); + var file = try tmp.dir.createFile(io, "foo", .{}); file.close(io); try options.maybeAppendRC(tmp.dir); try std.testing.expectEqualStrings("foo", options.input_source.filename); diff --git a/lib/compiler/resinator/compile.zig b/lib/compiler/resinator/compile.zig index 7dc77e5ee1..3e046a10c1 100644 --- a/lib/compiler/resinator/compile.zig +++ b/lib/compiler/resinator/compile.zig @@ -111,7 +111,7 @@ pub fn compile(allocator: Allocator, io: Io, source: []const u8, writer: *std.Io try search_dirs.append(allocator, .{ .dir = root_dir, .path = try allocator.dupe(u8, root_dir_path) }); } } - // Re-open the passed in cwd since we want to be able to close it (std.fs.cwd() shouldn't be closed) + // Re-open the passed in cwd since we want to be able to close it (Io.Dir.cwd() shouldn't be closed) const cwd_dir = options.cwd.openDir(".", .{}) catch |err| { try options.diagnostics.append(.{ .err = .failed_to_open_cwd, @@ -406,7 +406,7 @@ pub const Compiler = struct { // `/test.bin` relative to include paths and instead only treats it as // an absolute path. if (std.fs.path.isAbsolute(path)) { - const file = try utils.openFileNotDir(std.fs.cwd(), path, .{}); + const file = try utils.openFileNotDir(Io.Dir.cwd(), path, .{}); errdefer file.close(io); if (self.dependencies) |dependencies| { diff --git a/lib/compiler/resinator/main.zig b/lib/compiler/resinator/main.zig index c726a06cf4..416abc2ab7 100644 --- a/lib/compiler/resinator/main.zig +++ b/lib/compiler/resinator/main.zig @@ -67,7 +67,7 @@ pub fn main() !void { }, else => |e| return e, }; - try options.maybeAppendRC(std.fs.cwd()); + try options.maybeAppendRC(Io.Dir.cwd()); if (!zig_integration) { // print any warnings/notes @@ -141,7 +141,7 @@ pub fn main() !void { if (!zig_integration) std.debug.unlockStderrWriter(); } - var comp = aro.Compilation.init(aro_arena, aro_arena, io, &diagnostics, std.fs.cwd()); + var comp = aro.Compilation.init(aro_arena, aro_arena, io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); var argv: std.ArrayList([]const u8) = .empty; @@ -196,7 +196,7 @@ pub fn main() !void { }; }, .filename => |input_filename| { - break :full_input std.fs.cwd().readFileAlloc(input_filename, gpa, .unlimited) catch |err| { + break :full_input Io.Dir.cwd().readFileAlloc(input_filename, gpa, .unlimited) catch |err| { try error_handler.emitMessage(gpa, .err, "unable to read input file path '{s}': {s}", .{ input_filename, @errorName(err) }); std.process.exit(1); }; @@ -212,7 +212,7 @@ pub fn main() !void { try output_file.writeAll(full_input); }, .filename => |output_filename| { - try std.fs.cwd().writeFile(.{ .sub_path = output_filename, .data = full_input }); + try Io.Dir.cwd().writeFile(.{ .sub_path = output_filename, .data = full_input }); }, } return; @@ -277,7 +277,7 @@ pub fn main() !void { const output_buffered_stream = res_stream_writer.interface(); compile(gpa, io, final_input, output_buffered_stream, .{ - .cwd = std.fs.cwd(), + .cwd = Io.Dir.cwd(), .diagnostics = &diagnostics, .source_mappings = &mapping_results.mappings, .dependencies = maybe_dependencies, @@ -294,7 +294,7 @@ pub fn main() !void { .warn_instead_of_error_on_invalid_code_page = options.warn_instead_of_error_on_invalid_code_page, }) catch |err| switch (err) { error.ParseError, error.CompileError => { - try error_handler.emitDiagnostics(gpa, std.fs.cwd(), final_input, &diagnostics, mapping_results.mappings); + try error_handler.emitDiagnostics(gpa, Io.Dir.cwd(), final_input, &diagnostics, mapping_results.mappings); // Delete the output file on error res_stream.cleanupAfterError(io); std.process.exit(1); @@ -306,12 +306,12 @@ pub fn main() !void { // print any warnings/notes if (!zig_integration) { - diagnostics.renderToStdErr(std.fs.cwd(), final_input, mapping_results.mappings); + diagnostics.renderToStdErr(Io.Dir.cwd(), final_input, mapping_results.mappings); } // write the depfile if (options.depfile_path) |depfile_path| { - var depfile = std.fs.cwd().createFile(depfile_path, .{}) catch |err| { + var depfile = Io.Dir.cwd().createFile(io, depfile_path, .{}) catch |err| { try error_handler.emitMessage(gpa, .err, "unable to create depfile '{s}': {s}", .{ depfile_path, @errorName(err) }); std.process.exit(1); }; @@ -440,7 +440,7 @@ const IoStream = struct { // Delete the output file on error file.close(io); // Failing to delete is not really a big deal, so swallow any errors - std.fs.cwd().deleteFile(self.name) catch {}; + Io.Dir.cwd().deleteFile(self.name) catch {}; }, .stdio, .memory, .closed => return, } @@ -457,8 +457,8 @@ const IoStream = struct { switch (source) { .filename => |filename| return .{ .file = switch (io) { - .input => try openFileNotDir(std.fs.cwd(), filename, .{}), - .output => try std.fs.cwd().createFile(filename, .{}), + .input => try openFileNotDir(Io.Dir.cwd(), filename, .{}), + .output => try Io.Dir.cwd().createFile(io, filename, .{}), }, }, .stdio => |file| return .{ .stdio = file }, diff --git a/lib/compiler/std-docs.zig b/lib/compiler/std-docs.zig index e4efac28cd..87c4da9faa 100644 --- a/lib/compiler/std-docs.zig +++ b/lib/compiler/std-docs.zig @@ -40,7 +40,7 @@ pub fn main() !void { const zig_exe_path = argv.next().?; const global_cache_path = argv.next().?; - var lib_dir = try std.fs.cwd().openDir(zig_lib_directory, .{}); + var lib_dir = try Io.Dir.cwd().openDir(zig_lib_directory, .{}); defer lib_dir.close(io); var listen_port: u16 = 0; diff --git a/lib/compiler/translate-c/main.zig b/lib/compiler/translate-c/main.zig index 830c70e424..d0a873fd78 100644 --- a/lib/compiler/translate-c/main.zig +++ b/lib/compiler/translate-c/main.zig @@ -47,7 +47,7 @@ pub fn main() u8 { }; defer diagnostics.deinit(); - var comp = aro.Compilation.initDefault(gpa, arena, io, &diagnostics, std.fs.cwd()) catch |err| switch (err) { + var comp = aro.Compilation.initDefault(gpa, arena, io, &diagnostics, Io.Dir.cwd()) catch |err| switch (err) { error.OutOfMemory => { std.debug.print("ran out of memory initializing C compilation\n", .{}); if (fast_exit) process.exit(1); @@ -226,7 +226,7 @@ fn translate(d: *aro.Driver, tc: *aro.Toolchain, args: [][:0]u8, zig_integration const dep_file_name = try d.getDepFileName(source, out_buf[0..std.fs.max_name_bytes]); const file = if (dep_file_name) |path| - d.comp.cwd.createFile(path, .{}) catch |er| + d.comp.cwd.createFile(io, path, .{}) catch |er| return d.fatal("unable to create dependency file '{s}': {s}", .{ path, aro.Driver.errorDescription(er) }) else Io.File.stdout(); @@ -253,10 +253,10 @@ fn translate(d: *aro.Driver, tc: *aro.Toolchain, args: [][:0]u8, zig_integration if (d.output_name) |path| blk: { if (std.mem.eql(u8, path, "-")) break :blk; if (std.fs.path.dirname(path)) |dirname| { - std.fs.cwd().makePath(dirname) catch |err| + Io.Dir.cwd().makePath(dirname) catch |err| return d.fatal("failed to create path to '{s}': {s}", .{ path, aro.Driver.errorDescription(err) }); } - out_file = std.fs.cwd().createFile(path, .{}) catch |err| { + out_file = Io.Dir.cwd().createFile(io, path, .{}) catch |err| { return d.fatal("failed to create output file '{s}': {s}", .{ path, aro.Driver.errorDescription(err) }); }; close_out_file = true; diff --git a/lib/std/Build.zig b/lib/std/Build.zig index 7bfdbb6449..cc2f70fd2f 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -1702,13 +1702,13 @@ pub fn addCheckFile( pub fn truncateFile(b: *Build, dest_path: []const u8) (Io.Dir.MakeError || Io.Dir.StatFileError)!void { const io = b.graph.io; if (b.verbose) log.info("truncate {s}", .{dest_path}); - const cwd = fs.cwd(); - var src_file = cwd.createFile(dest_path, .{}) catch |err| switch (err) { + const cwd = Io.Dir.cwd(); + var src_file = cwd.createFile(io, dest_path, .{}) catch |err| switch (err) { error.FileNotFound => blk: { if (fs.path.dirname(dest_path)) |dirname| { try cwd.makePath(dirname); } - break :blk try cwd.createFile(dest_path, .{}); + break :blk try cwd.createFile(io, dest_path, .{}); }, else => |e| return e, }; @@ -1846,7 +1846,7 @@ pub fn runAllowFail( }; errdefer b.allocator.free(stdout); - const term = try child.wait(); + const term = try child.wait(io); switch (term) { .Exited => |code| { if (code != 0) { @@ -2193,7 +2193,7 @@ fn dependencyInner( const build_root: std.Build.Cache.Directory = .{ .path = build_root_string, - .handle = fs.cwd().openDir(build_root_string, .{}) catch |err| { + .handle = Io.Dir.cwd().openDir(build_root_string, .{}) catch |err| { std.debug.print("unable to open '{s}': {s}\n", .{ build_root_string, @errorName(err), }); diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index 42459c033d..fdcb2ab714 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -508,7 +508,7 @@ pub const Manifest = struct { // and `want_shared_lock` is set, a shared lock might be sufficient, so we'll // open with a shared lock instead. while (true) { - if (self.cache.manifest_dir.createFile(&manifest_file_path, .{ + if (self.cache.manifest_dir.createFile(io, &manifest_file_path, .{ .read = true, .truncate = false, .lock = .exclusive, @@ -543,7 +543,7 @@ pub const Manifest = struct { return error.CacheCheckFailed; } - if (self.cache.manifest_dir.createFile(&manifest_file_path, .{ + if (self.cache.manifest_dir.createFile(io, &manifest_file_path, .{ .read = true, .truncate = false, .lock = .exclusive, @@ -873,7 +873,7 @@ pub const Manifest = struct { if (man.want_refresh_timestamp) { man.want_refresh_timestamp = false; - var file = man.cache.manifest_dir.createFile("timestamp", .{ + var file = man.cache.manifest_dir.createFile(io, "timestamp", .{ .read = true, .truncate = true, }) catch |err| switch (err) { @@ -1324,7 +1324,7 @@ fn hashFile(file: Io.File, bin_digest: *[Hasher.mac_length]u8) Io.File.PReadErro fn testGetCurrentFileTimestamp(io: Io, dir: Io.Dir) !Io.Timestamp { const test_out_file = "test-filetimestamp.tmp"; - var file = try dir.createFile(test_out_file, .{ + var file = try dir.createFile(io, test_out_file, .{ .read = true, .truncate = true, }); diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index 56ef32e8d8..2ec1c0ef31 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -401,6 +401,9 @@ pub fn evalZigProcess( web_server: ?*Build.WebServer, gpa: Allocator, ) !?Path { + const b = s.owner; + const io = b.graph.io; + // If an error occurs, it's happened in this command: assert(s.result_failed_command == null); s.result_failed_command = try allocPrintCmd(gpa, null, argv); @@ -411,7 +414,7 @@ pub fn evalZigProcess( const result = zigProcessUpdate(s, zp, watch, web_server, gpa) catch |err| switch (err) { error.BrokenPipe => { // Process restart required. - const term = zp.child.wait() catch |e| { + const term = zp.child.wait(io) catch |e| { return s.fail("unable to wait for {s}: {t}", .{ argv[0], e }); }; _ = term; @@ -427,7 +430,7 @@ pub fn evalZigProcess( if (s.result_error_msgs.items.len > 0 and result == null) { // Crash detected. - const term = zp.child.wait() catch |e| { + const term = zp.child.wait(io) catch |e| { return s.fail("unable to wait for {s}: {t}", .{ argv[0], e }); }; s.result_peak_rss = zp.child.resource_usage_statistics.getMaxRss() orelse 0; @@ -439,9 +442,7 @@ pub fn evalZigProcess( return result; } assert(argv.len != 0); - const b = s.owner; const arena = b.allocator; - const io = b.graph.io; try handleChildProcUnsupported(s); try handleVerbose(s.owner, null, argv); @@ -478,7 +479,7 @@ pub fn evalZigProcess( zp.child.stdin.?.close(io); zp.child.stdin = null; - const term = zp.child.wait() catch |err| { + const term = zp.child.wait(io) catch |err| { return s.fail("unable to wait for {s}: {t}", .{ argv[0], err }); }; s.result_peak_rss = zp.child.resource_usage_statistics.getMaxRss() orelse 0; @@ -519,7 +520,7 @@ pub fn installFile(s: *Step, src_lazy_path: Build.LazyPath, dest_path: []const u pub fn installDir(s: *Step, dest_path: []const u8) !Io.Dir.MakePathStatus { const b = s.owner; try handleVerbose(b, null, &.{ "install", "-d", dest_path }); - return std.fs.cwd().makePathStatus(dest_path) catch |err| + return Io.Dir.cwd().makePathStatus(dest_path) catch |err| return s.fail("unable to create dir '{s}': {t}", .{ dest_path, err }); } @@ -895,7 +896,7 @@ pub fn addWatchInput(step: *Step, lazy_file: Build.LazyPath) Allocator.Error!voi try addWatchInputFromPath(step, .{ .root_dir = .{ .path = null, - .handle = std.fs.cwd(), + .handle = Io.Dir.cwd(), }, .sub_path = std.fs.path.dirname(path_string) orelse "", }, std.fs.path.basename(path_string)); @@ -920,7 +921,7 @@ pub fn addDirectoryWatchInput(step: *Step, lazy_directory: Build.LazyPath) Alloc try addDirectoryWatchInputFromPath(step, .{ .root_dir = .{ .path = null, - .handle = std.fs.cwd(), + .handle = Io.Dir.cwd(), }, .sub_path = path_string, }); diff --git a/lib/std/Build/Step/CheckFile.zig b/lib/std/Build/Step/CheckFile.zig index efeedc8b80..560b6ad050 100644 --- a/lib/std/Build/Step/CheckFile.zig +++ b/lib/std/Build/Step/CheckFile.zig @@ -3,7 +3,9 @@ //! TODO: generalize the code in std.testing.expectEqualStrings and make this //! CheckFile step produce those helpful diagnostics when there is not a match. const CheckFile = @This(); + const std = @import("std"); +const Io = std.Io; const Step = std.Build.Step; const fs = std.fs; const mem = std.mem; @@ -53,7 +55,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { try step.singleUnchangingWatchInput(check_file.source); const src_path = check_file.source.getPath2(b, step); - const contents = fs.cwd().readFileAlloc(src_path, b.allocator, .limited(check_file.max_bytes)) catch |err| { + const contents = Io.Dir.cwd().readFileAlloc(src_path, b.allocator, .limited(check_file.max_bytes)) catch |err| { return step.fail("unable to read '{s}': {s}", .{ src_path, @errorName(err), }); diff --git a/lib/std/Build/Step/ConfigHeader.zig b/lib/std/Build/Step/ConfigHeader.zig index df2419764d..ea7d9d99ff 100644 --- a/lib/std/Build/Step/ConfigHeader.zig +++ b/lib/std/Build/Step/ConfigHeader.zig @@ -1,5 +1,7 @@ -const std = @import("std"); const ConfigHeader = @This(); + +const std = @import("std"); +const Io = std.Io; const Step = std.Build.Step; const Allocator = std.mem.Allocator; const Writer = std.Io.Writer; @@ -205,7 +207,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { .autoconf_undef, .autoconf_at => |file_source| { try bw.writeAll(c_generated_line); const src_path = file_source.getPath2(b, step); - const contents = std.fs.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| { + const contents = Io.Dir.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| { return step.fail("unable to read autoconf input file '{s}': {s}", .{ src_path, @errorName(err), }); @@ -219,7 +221,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { .cmake => |file_source| { try bw.writeAll(c_generated_line); const src_path = file_source.getPath2(b, step); - const contents = std.fs.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| { + const contents = Io.Dir.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| { return step.fail("unable to read cmake input file '{s}': {s}", .{ src_path, @errorName(err), }); diff --git a/lib/std/Build/Step/Options.zig b/lib/std/Build/Step/Options.zig index 9f5665e93a..1416e0e916 100644 --- a/lib/std/Build/Step/Options.zig +++ b/lib/std/Build/Step/Options.zig @@ -1,12 +1,13 @@ -const std = @import("std"); +const Options = @This(); const builtin = @import("builtin"); + +const std = @import("std"); +const Io = std.Io; const fs = std.fs; const Step = std.Build.Step; const GeneratedFile = std.Build.GeneratedFile; const LazyPath = std.Build.LazyPath; -const Options = @This(); - pub const base_id: Step.Id = .options; step: Step, @@ -542,11 +543,11 @@ test Options { .cache = .{ .io = io, .gpa = arena.allocator(), - .manifest_dir = std.fs.cwd(), + .manifest_dir = Io.Dir.cwd(), }, .zig_exe = "test", .env_map = std.process.EnvMap.init(arena.allocator()), - .global_cache_root = .{ .path = "test", .handle = std.fs.cwd() }, + .global_cache_root = .{ .path = "test", .handle = Io.Dir.cwd() }, .host = .{ .query = .{}, .result = try std.zig.system.resolveTargetQuery(io, .{}), @@ -557,8 +558,8 @@ test Options { var builder = try std.Build.create( &graph, - .{ .path = "test", .handle = std.fs.cwd() }, - .{ .path = "test", .handle = std.fs.cwd() }, + .{ .path = "test", .handle = Io.Dir.cwd() }, + .{ .path = "test", .handle = Io.Dir.cwd() }, &.{}, ); diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index 7c54c8048e..af6bc20438 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -1023,7 +1023,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { try runCommand(run, argv_list.items, has_side_effects, tmp_dir_path, options, null); - const dep_file_dir = std.fs.cwd(); + const dep_file_dir = Io.Dir.cwd(); const dep_file_basename = dep_output_file.generated_file.getPath2(b, step); if (has_side_effects) try man.addDepFile(dep_file_dir, dep_file_basename) diff --git a/lib/std/Build/Watch.zig b/lib/std/Build/Watch.zig index ff06ad3ff3..f7ac47961e 100644 --- a/lib/std/Build/Watch.zig +++ b/lib/std/Build/Watch.zig @@ -122,7 +122,7 @@ const Os = switch (builtin.os.tag) { }) catch return error.NameTooLong; const stack_ptr: *std.os.linux.file_handle = @ptrCast(&file_handle_buffer); stack_ptr.handle_bytes = file_handle_buffer.len - @sizeOf(std.os.linux.file_handle); - try posix.name_to_handle_at(path.root_dir.handle.fd, adjusted_path, stack_ptr, mount_id, std.os.linux.AT.HANDLE_FID); + try posix.name_to_handle_at(path.root_dir.handle.handle, adjusted_path, stack_ptr, mount_id, std.os.linux.AT.HANDLE_FID); const stack_lfh: FileHandle = .{ .handle = stack_ptr }; return stack_lfh.clone(gpa); } @@ -222,7 +222,7 @@ const Os = switch (builtin.os.tag) { posix.fanotify_mark(fan_fd, .{ .ADD = true, .ONLYDIR = true, - }, fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| { + }, fan_mask, path.root_dir.handle.handle, path.subPathOrDot()) catch |err| { fatal("unable to watch {f}: {s}", .{ path, @errorName(err) }); }; } @@ -275,7 +275,7 @@ const Os = switch (builtin.os.tag) { posix.fanotify_mark(fan_fd, .{ .REMOVE = true, .ONLYDIR = true, - }, fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| switch (err) { + }, fan_mask, path.root_dir.handle.handle, path.subPathOrDot()) catch |err| switch (err) { error.FileNotFound => {}, // Expected, harmless. else => |e| std.log.warn("unable to unwatch '{f}': {s}", .{ path, @errorName(e) }), }; @@ -353,7 +353,7 @@ const Os = switch (builtin.os.tag) { // The following code is a drawn out NtCreateFile call. (mostly adapted from Io.Dir.makeOpenDirAccessMaskW) // It's necessary in order to get the specific flags that are required when calling ReadDirectoryChangesW. var dir_handle: windows.HANDLE = undefined; - const root_fd = path.root_dir.handle.fd; + const root_fd = path.root_dir.handle.handle; const sub_path = path.subPathOrDot(); const sub_path_w = try windows.sliceToPrefixedFileW(root_fd, sub_path); const path_len_bytes = std.math.cast(u16, sub_path_w.len * 2) orelse return error.NameTooLong; @@ -681,9 +681,9 @@ const Os = switch (builtin.os.tag) { if (!gop.found_existing) { const skip_open_dir = path.sub_path.len == 0; const dir_fd = if (skip_open_dir) - path.root_dir.handle.fd + path.root_dir.handle.handle else - posix.openat(path.root_dir.handle.fd, path.sub_path, dir_open_flags, 0) catch |err| { + posix.openat(path.root_dir.handle.handle, path.sub_path, dir_open_flags, 0) catch |err| { fatal("failed to open directory {f}: {s}", .{ path, @errorName(err) }); }; // Empirically the dir has to stay open or else no events are triggered. @@ -750,7 +750,7 @@ const Os = switch (builtin.os.tag) { // to access that data via the dir_fd field. const path = w.dir_table.keys()[i]; const dir_fd = if (path.sub_path.len == 0) - path.root_dir.handle.fd + path.root_dir.handle.handle else handles.items(.dir_fd)[i]; assert(dir_fd != -1); @@ -761,7 +761,7 @@ const Os = switch (builtin.os.tag) { const last_dir_fd = fd: { const last_path = w.dir_table.keys()[handles.len - 1]; const last_dir_fd = if (last_path.sub_path.len == 0) - last_path.root_dir.handle.fd + last_path.root_dir.handle.handle else handles.items(.dir_fd)[handles.len - 1]; assert(last_dir_fd != -1); diff --git a/lib/std/Io/File.zig b/lib/std/Io/File.zig index 8e71f648e2..a9b4775772 100644 --- a/lib/std/Io/File.zig +++ b/lib/std/Io/File.zig @@ -527,6 +527,14 @@ pub fn writerStreaming(file: File, io: Io, buffer: []u8) Writer { return .initStreaming(file, io, buffer); } +/// Equivalent to creating a streaming writer, writing `bytes`, and then flushing. +pub fn writeStreamingAll(file: File, io: Io, bytes: []const u8) Writer.Error!void { + var index: usize = 0; + while (index < bytes.len) { + index += try io.vtable.fileWriteStreaming(io.userdata, file, &.{}, &.{bytes[index..]}, 1); + } +} + pub const LockError = error{ SystemResources, FileLocksUnsupported, diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index fb76002201..124f886515 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -2361,7 +2361,7 @@ fn dirCreateFilePosix( .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.NoDevice, .NOENT => return error.FileNotFound, - .SRCH => return error.ProcessNotFound, + .SRCH => return error.FileNotFound, // Linux when accessing procfs. .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, @@ -2670,7 +2670,7 @@ fn dirOpenFilePosix( .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.NoDevice, .NOENT => return error.FileNotFound, - .SRCH => return error.ProcessNotFound, + .SRCH => return error.FileNotFound, // Linux when opening procfs files. .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, @@ -3287,7 +3287,7 @@ fn dirRealPathPosix(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, out_b .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.NoDevice, .NOENT => return error.FileNotFound, - .SRCH => return error.ProcessNotFound, + .SRCH => return error.FileNotFound, // Linux when accessing procfs. .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, @@ -5548,7 +5548,6 @@ fn fileReadStreamingPosix(userdata: ?*anyopaque, file: File, data: [][]u8) File. switch (e) { .INVAL => |err| return errnoBug(err), .FAULT => |err| return errnoBug(err), - .SRCH => return error.ProcessNotFound, .AGAIN => return error.WouldBlock, .BADF => |err| { if (native_os == .wasi) return error.NotOpenForReading; // File operation on directory. @@ -5672,7 +5671,6 @@ fn fileReadPositionalPosix(userdata: ?*anyopaque, file: File, data: [][]u8, offs switch (e) { .INVAL => |err| return errnoBug(err), .FAULT => |err| return errnoBug(err), - .SRCH => return error.ProcessNotFound, .AGAIN => return error.WouldBlock, .BADF => |err| { if (native_os == .wasi) return error.NotOpenForReading; // File operation on directory. @@ -6312,7 +6310,6 @@ fn fileWriteStreaming( switch (e) { .INVAL => return error.InvalidArgument, .FAULT => |err| return errnoBug(err), - .SRCH => return error.ProcessNotFound, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForWriting, // Can be a race condition. .DESTADDRREQ => |err| return errnoBug(err), // `connect` was never called. diff --git a/lib/std/Io/Writer.zig b/lib/std/Io/Writer.zig index 3f25bc6a26..63ae6d93a0 100644 --- a/lib/std/Io/Writer.zig +++ b/lib/std/Io/Writer.zig @@ -2835,7 +2835,7 @@ test "discarding sendFile" { var tmp_dir = testing.tmpDir(.{}); defer tmp_dir.cleanup(); - const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true }); + const file = try tmp_dir.dir.createFile(io, "input.txt", .{ .read = true }); defer file.close(io); var r_buffer: [256]u8 = undefined; var file_writer: File.Writer = .init(file, &r_buffer); @@ -2857,7 +2857,7 @@ test "allocating sendFile" { var tmp_dir = testing.tmpDir(.{}); defer tmp_dir.cleanup(); - const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true }); + const file = try tmp_dir.dir.createFile(io, "input.txt", .{ .read = true }); defer file.close(io); var r_buffer: [2]u8 = undefined; var file_writer: File.Writer = .init(file, &r_buffer); @@ -2881,7 +2881,7 @@ test sendFileReading { var tmp_dir = testing.tmpDir(.{}); defer tmp_dir.cleanup(); - const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true }); + const file = try tmp_dir.dir.createFile(io, "input.txt", .{ .read = true }); defer file.close(io); var r_buffer: [2]u8 = undefined; var file_writer: File.Writer = .init(file, &r_buffer); diff --git a/lib/std/Io/net/test.zig b/lib/std/Io/net/test.zig index 5818f6c3f7..c9ed0d3284 100644 --- a/lib/std/Io/net/test.zig +++ b/lib/std/Io/net/test.zig @@ -278,7 +278,7 @@ test "listen on a unix socket, send bytes, receive bytes" { defer testing.allocator.free(socket_path); const socket_addr = try net.UnixAddress.init(socket_path); - defer std.fs.cwd().deleteFile(socket_path) catch {}; + defer Io.Dir.cwd().deleteFile(socket_path) catch {}; var server = try socket_addr.listen(io, .{}); defer server.socket.close(io); diff --git a/lib/std/Io/test.zig b/lib/std/Io/test.zig index 9f21fe50e7..e731dc18d7 100644 --- a/lib/std/Io/test.zig +++ b/lib/std/Io/test.zig @@ -27,7 +27,7 @@ test "write a file, read it, then delete it" { random.bytes(data[0..]); const tmp_file_name = "temp_test_file.txt"; { - var file = try tmp.dir.createFile(tmp_file_name, .{}); + var file = try tmp.dir.createFile(io, tmp_file_name, .{}); defer file.close(io); var file_writer = file.writer(&.{}); @@ -40,7 +40,7 @@ test "write a file, read it, then delete it" { { // Make sure the exclusive flag is honored. - try expectError(File.OpenError.PathAlreadyExists, tmp.dir.createFile(tmp_file_name, .{ .exclusive = true })); + try expectError(File.OpenError.PathAlreadyExists, tmp.dir.createFile(io, tmp_file_name, .{ .exclusive = true })); } { @@ -70,7 +70,7 @@ test "File seek ops" { const io = testing.io; const tmp_file_name = "temp_test_file.txt"; - var file = try tmp.dir.createFile(tmp_file_name, .{}); + var file = try tmp.dir.createFile(io, tmp_file_name, .{}); defer file.close(io); try file.writeAll(&([_]u8{0x55} ** 8192)); @@ -96,7 +96,7 @@ test "setEndPos" { defer tmp.cleanup(); const tmp_file_name = "temp_test_file.txt"; - var file = try tmp.dir.createFile(tmp_file_name, .{}); + var file = try tmp.dir.createFile(io, tmp_file_name, .{}); defer file.close(io); // Verify that the file size changes and the file offset is not moved @@ -121,7 +121,7 @@ test "updateTimes" { defer tmp.cleanup(); const tmp_file_name = "just_a_temporary_file.txt"; - var file = try tmp.dir.createFile(tmp_file_name, .{ .read = true }); + var file = try tmp.dir.createFile(io, tmp_file_name, .{ .read = true }); defer file.close(io); const stat_old = try file.stat(); diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index 102bb59415..8453bc4c81 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -208,7 +208,7 @@ pub fn setName(self: Thread, io: Io, name: []const u8) SetNameError!void { var buf: [32]u8 = undefined; const path = try std.fmt.bufPrint(&buf, "/proc/self/task/{d}/comm", .{self.getHandle()}); - const file = try std.fs.cwd().openFile(io, path, .{ .mode = .write_only }); + const file = try Io.Dir.cwd().openFile(io, path, .{ .mode = .write_only }); defer file.close(io); try file.writeAll(name); @@ -325,7 +325,7 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co var threaded: std.Io.Threaded = .init_single_threaded; const io = threaded.ioBasic(); - const file = try std.fs.cwd().openFile(io, path, .{}); + const file = try Io.Dir.cwd().openFile(io, path, .{}); defer file.close(io); var file_reader = file.readerStreaming(io, &.{}); diff --git a/lib/std/crypto/Certificate/Bundle/macos.zig b/lib/std/crypto/Certificate/Bundle/macos.zig index 473505ac51..444d8da675 100644 --- a/lib/std/crypto/Certificate/Bundle/macos.zig +++ b/lib/std/crypto/Certificate/Bundle/macos.zig @@ -19,7 +19,7 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp) RescanM _ = io; // TODO migrate file system to use std.Io for (keychain_paths) |keychain_path| { - const bytes = std.fs.cwd().readFileAlloc(keychain_path, gpa, .limited(std.math.maxInt(u32))) catch |err| switch (err) { + const bytes = Io.Dir.cwd().readFileAlloc(keychain_path, gpa, .limited(std.math.maxInt(u32))) catch |err| switch (err) { error.StreamTooLong => return error.FileTooBig, else => |e| return e, }; diff --git a/lib/std/crypto/codecs/asn1/test.zig b/lib/std/crypto/codecs/asn1/test.zig index ff854fcbde..3dbedb9f80 100644 --- a/lib/std/crypto/codecs/asn1/test.zig +++ b/lib/std/crypto/codecs/asn1/test.zig @@ -73,8 +73,8 @@ test AllTypes { try std.testing.expectEqualSlices(u8, encoded, buf); // Use this to update test file. - // const dir = try std.fs.cwd().openDir("lib/std/crypto/asn1", .{}); - // var file = try dir.createFile(path, .{}); + // const dir = try Io.Dir.cwd().openDir("lib/std/crypto/asn1", .{}); + // var file = try dir.createFile(io, path, .{}); // defer file.close(io); // try file.writeAll(buf); } diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 97741ecb40..5df0eef2d5 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -60,7 +60,7 @@ pub const cpu_context = @import("debug/cpu_context.zig"); /// }; /// /// Only required if `can_unwind == true`. Unwinds a single stack frame, returning the frame's /// /// return address, or 0 if the end of the stack has been reached. -/// pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, context: *UnwindContext) SelfInfoError!usize; +/// pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, io: Io, context: *UnwindContext) SelfInfoError!usize; /// ``` pub const SelfInfo = if (@hasDecl(root, "debug") and @hasDecl(root.debug, "SelfInfo")) root.debug.SelfInfo @@ -558,9 +558,9 @@ pub fn defaultPanic( stderr.print("{s}\n", .{msg}) catch break :trace; if (@errorReturnTrace()) |t| if (t.index > 0) { - stderr.writeAll("error return context:\n") catch break :trace; + stderr.writeStreamingAll("error return context:\n") catch break :trace; writeStackTrace(t, stderr, tty_config) catch break :trace; - stderr.writeAll("\nstack trace:\n") catch break :trace; + stderr.writeStreamingAll("\nstack trace:\n") catch break :trace; }; writeCurrentStackTrace(.{ .first_address = first_trace_addr orelse @returnAddress(), @@ -575,7 +575,7 @@ pub fn defaultPanic( // A panic happened while trying to print a previous panic message. // We're still holding the mutex but that's fine as we're going to // call abort(). - File.stderr().writeAll("aborting due to recursive panic\n") catch {}; + File.stderr().writeStreamingAll("aborting due to recursive panic\n") catch {}; }, else => {}, // Panicked while printing the recursive panic message. } @@ -960,7 +960,7 @@ const StackIterator = union(enum) { }, }; - fn next(it: *StackIterator) Result { + fn next(it: *StackIterator, io: Io) Result { switch (it.*) { .ctx_first => |context_ptr| { // After the first frame, start actually unwinding. @@ -976,7 +976,7 @@ const StackIterator = union(enum) { .di => |*unwind_context| { const di = getSelfDebugInfo() catch unreachable; const di_gpa = getDebugInfoAllocator(); - const ret_addr = di.unwindFrame(di_gpa, unwind_context) catch |err| { + const ret_addr = di.unwindFrame(di_gpa, io, unwind_context) catch |err| { const pc = unwind_context.pc; const fp = unwind_context.getFp(); it.* = .{ .fp = fp }; @@ -1297,7 +1297,7 @@ test printLineFromFile { aw.clearRetainingCapacity(); } { - const file = try test_dir.dir.createFile("line_overlaps_page_boundary.zig", .{}); + const file = try test_dir.dir.createFile(io, "line_overlaps_page_boundary.zig", .{}); defer file.close(io); const path = try fs.path.join(gpa, &.{ test_dir_path, "line_overlaps_page_boundary.zig" }); defer gpa.free(path); @@ -1316,7 +1316,7 @@ test printLineFromFile { aw.clearRetainingCapacity(); } { - const file = try test_dir.dir.createFile("file_ends_on_page_boundary.zig", .{}); + const file = try test_dir.dir.createFile(io, "file_ends_on_page_boundary.zig", .{}); defer file.close(io); const path = try fs.path.join(gpa, &.{ test_dir_path, "file_ends_on_page_boundary.zig" }); defer gpa.free(path); @@ -1330,7 +1330,7 @@ test printLineFromFile { aw.clearRetainingCapacity(); } { - const file = try test_dir.dir.createFile("very_long_first_line_spanning_multiple_pages.zig", .{}); + const file = try test_dir.dir.createFile(io, "very_long_first_line_spanning_multiple_pages.zig", .{}); defer file.close(io); const path = try fs.path.join(gpa, &.{ test_dir_path, "very_long_first_line_spanning_multiple_pages.zig" }); defer gpa.free(path); @@ -1356,7 +1356,7 @@ test printLineFromFile { aw.clearRetainingCapacity(); } { - const file = try test_dir.dir.createFile("file_of_newlines.zig", .{}); + const file = try test_dir.dir.createFile(io, "file_of_newlines.zig", .{}); defer file.close(io); const path = try fs.path.join(gpa, &.{ test_dir_path, "file_of_newlines.zig" }); defer gpa.free(path); diff --git a/lib/std/debug/ElfFile.zig b/lib/std/debug/ElfFile.zig index a0f1188ade..203ee8effb 100644 --- a/lib/std/debug/ElfFile.zig +++ b/lib/std/debug/ElfFile.zig @@ -375,7 +375,7 @@ fn loadSeparateDebugFile( args: anytype, ) Allocator.Error!?[]align(std.heap.page_size_min) const u8 { const path = try std.fmt.allocPrint(arena, fmt, args); - const elf_file = std.fs.cwd().openFile(io, path, .{}) catch return null; + const elf_file = Io.Dir.cwd().openFile(io, path, .{}) catch return null; defer elf_file.close(io); const result = loadInner(arena, elf_file, opt_crc) catch |err| switch (err) { diff --git a/lib/std/debug/MachOFile.zig b/lib/std/debug/MachOFile.zig index ae904c0aec..18126a1c29 100644 --- a/lib/std/debug/MachOFile.zig +++ b/lib/std/debug/MachOFile.zig @@ -512,7 +512,7 @@ fn loadOFile(gpa: Allocator, io: Io, o_file_name: []const u8) !OFile { /// Uses `mmap` to map the file at `path` into memory. fn mapDebugInfoFile(io: Io, path: []const u8) ![]align(std.heap.page_size_min) const u8 { - const file = std.fs.cwd().openFile(io, path, .{}) catch |err| switch (err) { + const file = Io.Dir.cwd().openFile(io, path, .{}) catch |err| switch (err) { error.FileNotFound => return error.MissingDebugInfo, else => return error.ReadFailed, }; diff --git a/lib/std/debug/SelfInfo/Elf.zig b/lib/std/debug/SelfInfo/Elf.zig index 213389bf04..6ed18bcb80 100644 --- a/lib/std/debug/SelfInfo/Elf.zig +++ b/lib/std/debug/SelfInfo/Elf.zig @@ -29,13 +29,12 @@ pub fn deinit(si: *SelfInfo, gpa: Allocator) void { } pub fn getSymbol(si: *SelfInfo, gpa: Allocator, io: Io, address: usize) Error!std.debug.Symbol { - _ = io; const module = try si.findModule(gpa, address, .exclusive); defer si.rwlock.unlock(); const vaddr = address - module.load_offset; - const loaded_elf = try module.getLoadedElf(gpa); + const loaded_elf = try module.getLoadedElf(gpa, io); if (loaded_elf.file.dwarf) |*dwarf| { if (!loaded_elf.scanned_dwarf) { dwarf.open(gpa, native_endian) catch |err| switch (err) { @@ -180,7 +179,7 @@ comptime { } } pub const UnwindContext = Dwarf.SelfUnwinder; -pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, context: *UnwindContext) Error!usize { +pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, io: Io, context: *UnwindContext) Error!usize { comptime assert(can_unwind); { @@ -201,7 +200,7 @@ pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, context: *UnwindContext) Error @memset(si.unwind_cache.?, .empty); } - const unwind_sections = try module.getUnwindSections(gpa); + const unwind_sections = try module.getUnwindSections(gpa, io); for (unwind_sections) |*unwind| { if (context.computeRules(gpa, unwind, module.load_offset, null)) |entry| { entry.populate(si.unwind_cache.?); @@ -261,12 +260,12 @@ const Module = struct { }; /// Assumes we already hold an exclusive lock. - fn getUnwindSections(mod: *Module, gpa: Allocator) Error![]Dwarf.Unwind { - if (mod.unwind == null) mod.unwind = loadUnwindSections(mod, gpa); + fn getUnwindSections(mod: *Module, gpa: Allocator, io: Io) Error![]Dwarf.Unwind { + if (mod.unwind == null) mod.unwind = loadUnwindSections(mod, gpa, io); const us = &(mod.unwind.? catch |err| return err); return us.buf[0..us.len]; } - fn loadUnwindSections(mod: *Module, gpa: Allocator) Error!UnwindSections { + fn loadUnwindSections(mod: *Module, gpa: Allocator, io: Io) Error!UnwindSections { var us: UnwindSections = .{ .buf = undefined, .len = 0, @@ -284,7 +283,7 @@ const Module = struct { } else { // There is no `.eh_frame_hdr` section. There may still be an `.eh_frame` or `.debug_frame` // section, but we'll have to load the binary to get at it. - const loaded = try mod.getLoadedElf(gpa); + const loaded = try mod.getLoadedElf(gpa, io); // If both are present, we can't just pick one -- the info could be split between them. // `.debug_frame` is likely to be the more complete section, so we'll prioritize that one. if (loaded.file.debug_frame) |*debug_frame| { @@ -325,7 +324,7 @@ const Module = struct { } fn loadElf(mod: *Module, gpa: Allocator, io: Io) Error!LoadedElf { const load_result = if (mod.name.len > 0) res: { - var file = std.fs.cwd().openFile(io, mod.name, .{}) catch return error.MissingDebugInfo; + var file = Io.Dir.cwd().openFile(io, mod.name, .{}) catch return error.MissingDebugInfo; defer file.close(io); break :res std.debug.ElfFile.load(gpa, file, mod.build_id, &.native(mod.name)); } else res: { @@ -334,7 +333,7 @@ const Module = struct { else => return error.ReadFailed, }; defer gpa.free(path); - var file = std.fs.cwd().openFile(io, path, .{}) catch return error.MissingDebugInfo; + var file = Io.Dir.cwd().openFile(io, path, .{}) catch return error.MissingDebugInfo; defer file.close(io); break :res std.debug.ElfFile.load(gpa, file, mod.build_id, &.native(path)); }; diff --git a/lib/std/debug/SelfInfo/MachO.zig b/lib/std/debug/SelfInfo/MachO.zig index 15da616f3b..db8e5334e6 100644 --- a/lib/std/debug/SelfInfo/MachO.zig +++ b/lib/std/debug/SelfInfo/MachO.zig @@ -616,7 +616,7 @@ test { /// Uses `mmap` to map the file at `path` into memory. fn mapDebugInfoFile(io: Io, path: []const u8) ![]align(std.heap.page_size_min) const u8 { - const file = std.fs.cwd().openFile(io, path, .{}) catch |err| switch (err) { + const file = Io.Dir.cwd().openFile(io, path, .{}) catch |err| switch (err) { error.FileNotFound => return error.MissingDebugInfo, else => return error.ReadFailed, }; diff --git a/lib/std/debug/SelfInfo/Windows.zig b/lib/std/debug/SelfInfo/Windows.zig index f0ac30cca2..9874efd497 100644 --- a/lib/std/debug/SelfInfo/Windows.zig +++ b/lib/std/debug/SelfInfo/Windows.zig @@ -432,7 +432,7 @@ const Module = struct { break :pdb null; }; const pdb_file_open_result = if (fs.path.isAbsolute(path)) res: { - break :res std.fs.cwd().openFile(io, path, .{}); + break :res Io.Dir.cwd().openFile(io, path, .{}); } else res: { const self_dir = std.process.executableDirPathAlloc(io, gpa) catch |err| switch (err) { error.OutOfMemory, error.Unexpected => |e| return e, @@ -441,7 +441,7 @@ const Module = struct { defer gpa.free(self_dir); const abs_path = try fs.path.join(gpa, &.{ self_dir, path }); defer gpa.free(abs_path); - break :res std.fs.cwd().openFile(io, abs_path, .{}); + break :res Io.Dir.cwd().openFile(io, abs_path, .{}); }; const pdb_file = pdb_file_open_result catch |err| switch (err) { error.FileNotFound, error.IsDir => break :pdb null, diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig index 7db177ad70..a1801d00d0 100644 --- a/lib/std/dynamic_library.zig +++ b/lib/std/dynamic_library.zig @@ -160,7 +160,7 @@ pub const ElfDynLib = struct { fn openPath(path: []const u8, io: Io) !Io.Dir { if (path.len == 0) return error.NotDir; var parts = std.mem.tokenizeScalar(u8, path, '/'); - var parent = if (path[0] == '/') try std.fs.cwd().openDir("/", .{}) else std.fs.cwd(); + var parent = if (path[0] == '/') try Io.Dir.cwd().openDir("/", .{}) else Io.Dir.cwd(); while (parts.next()) |part| { const child = try parent.openDir(part, .{}); parent.close(io); @@ -174,7 +174,7 @@ pub const ElfDynLib = struct { while (paths.next()) |p| { var dir = openPath(p) catch continue; defer dir.close(io); - const fd = posix.openat(dir.fd, file_name, .{ + const fd = posix.openat(dir.handle, file_name, .{ .ACCMODE = .RDONLY, .CLOEXEC = true, }, 0) catch continue; @@ -184,9 +184,9 @@ pub const ElfDynLib = struct { } fn resolveFromParent(io: Io, dir_path: []const u8, file_name: []const u8) ?posix.fd_t { - var dir = std.fs.cwd().openDir(dir_path, .{}) catch return null; + var dir = Io.Dir.cwd().openDir(dir_path, .{}) catch return null; defer dir.close(io); - return posix.openat(dir.fd, file_name, .{ + return posix.openat(dir.handle, file_name, .{ .ACCMODE = .RDONLY, .CLOEXEC = true, }, 0) catch null; diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index f4bdecf89d..aab86d40a6 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -46,7 +46,7 @@ const PathType = enum { // The final path may not actually exist which would cause realpath to fail. // So instead, we get the path of the dir and join it with the relative path. var fd_path_buf: [fs.max_path_bytes]u8 = undefined; - const dir_path = try std.os.getFdPath(dir.fd, &fd_path_buf); + const dir_path = try std.os.getFdPath(dir.handle, &fd_path_buf); return fs.path.joinZ(allocator, &.{ dir_path, relative_path }); } }.transform, @@ -55,7 +55,7 @@ const PathType = enum { // Any drive absolute path (C:\foo) can be converted into a UNC path by // using '127.0.0.1' as the server name and '<drive letter>$' as the share name. var fd_path_buf: [fs.max_path_bytes]u8 = undefined; - const dir_path = try std.os.getFdPath(dir.fd, &fd_path_buf); + const dir_path = try std.os.getFdPath(dir.handle, &fd_path_buf); const windows_path_type = windows.getWin32PathType(u8, dir_path); switch (windows_path_type) { .unc_absolute => return fs.path.joinZ(allocator, &.{ dir_path, relative_path }), @@ -256,7 +256,7 @@ fn testReadLinkW(allocator: mem.Allocator, dir: Dir, target_path: []const u8, sy const target_path_w = try std.unicode.wtf8ToWtf16LeAlloc(allocator, target_path); defer allocator.free(target_path_w); // Calling the W functions directly requires the path to be NT-prefixed - const symlink_path_w = try std.os.windows.sliceToPrefixedFileW(dir.fd, symlink_path); + const symlink_path_w = try std.os.windows.sliceToPrefixedFileW(dir.handle, symlink_path); const wtf16_buffer = try allocator.alloc(u16, target_path_w.len); defer allocator.free(wtf16_buffer); const actual = try dir.readLinkW(symlink_path_w.span(), wtf16_buffer); @@ -288,9 +288,11 @@ test "File.stat on a File that is a symlink returns Kind.sym_link" { var symlink: Dir = switch (builtin.target.os.tag) { .windows => windows_symlink: { - const sub_path_w = try windows.cStrToPrefixedFileW(ctx.dir.fd, "symlink"); + const sub_path_w = try windows.cStrToPrefixedFileW(ctx.dir.handle, "symlink"); - var handle: windows.HANDLE = undefined; + var result: Dir = .{ + .handle = undefined, + }; const path_len_bytes = @as(u16, @intCast(sub_path_w.span().len * 2)); var nt_name = windows.UNICODE_STRING{ @@ -300,26 +302,16 @@ test "File.stat on a File that is a symlink returns Kind.sym_link" { }; var attr: windows.OBJECT_ATTRIBUTES = .{ .Length = @sizeOf(windows.OBJECT_ATTRIBUTES), - .RootDirectory = if (fs.path.isAbsoluteWindowsW(sub_path_w.span())) null else ctx.dir.fd, - .Attributes = .{}, + .RootDirectory = if (fs.path.isAbsoluteWindowsW(sub_path_w.span())) null else ctx.dir.handle, + .Attributes = 0, .ObjectName = &nt_name, .SecurityDescriptor = null, .SecurityQualityOfService = null, }; var io_status_block: windows.IO_STATUS_BLOCK = undefined; const rc = windows.ntdll.NtCreateFile( - &handle, - .{ - .SPECIFIC = .{ .FILE_DIRECTORY = .{ - .READ_EA = true, - .TRAVERSE = true, - .READ_ATTRIBUTES = true, - } }, - .STANDARD = .{ - .RIGHTS = .READ, - .SYNCHRONIZE = true, - }, - }, + &result.handle, + windows.STANDARD_RIGHTS_READ | windows.FILE_READ_ATTRIBUTES | windows.FILE_READ_EA | windows.SYNCHRONIZE | windows.FILE_TRAVERSE, &attr, &io_status_block, null, @@ -337,7 +329,7 @@ test "File.stat on a File that is a symlink returns Kind.sym_link" { ); switch (rc) { - .SUCCESS => break :windows_symlink .{ .fd = handle }, + .SUCCESS => break :windows_symlink .{ .fd = result.handle }, else => return windows.unexpectedStatus(rc), } }, @@ -351,8 +343,8 @@ test "File.stat on a File that is a symlink returns Kind.sym_link" { .ACCMODE = .RDONLY, .CLOEXEC = true, }; - const fd = try posix.openatZ(ctx.dir.fd, &sub_path_c, flags, 0); - break :linux_symlink Dir{ .fd = fd }; + const fd = try posix.openatZ(ctx.dir.handle, &sub_path_c, flags, 0); + break :linux_symlink .{ .handle = fd }; }, else => unreachable, }; @@ -456,7 +448,7 @@ test "openDirAbsolute" { test "openDir cwd parent '..'" { const io = testing.io; - var dir = fs.cwd().openDir("..", .{}) catch |err| { + var dir = Io.Dir.cwd().openDir("..", .{}) catch |err| { if (native_os == .wasi and err == error.PermissionDenied) { return; // This is okay. WASI disallows escaping from the fs sandbox } @@ -534,7 +526,7 @@ test "Dir.Iterator" { defer tmp_dir.cleanup(); // First, create a couple of entries to iterate over. - const file = try tmp_dir.dir.createFile("some_file", .{}); + const file = try tmp_dir.dir.createFile(io, "some_file", .{}); file.close(io); try tmp_dir.dir.makeDir("some_dir"); @@ -570,7 +562,7 @@ test "Dir.Iterator many entries" { var buf: [4]u8 = undefined; // Enough to store "1024". while (i < num) : (i += 1) { const name = try std.fmt.bufPrint(&buf, "{}", .{i}); - const file = try tmp_dir.dir.createFile(name, .{}); + const file = try tmp_dir.dir.createFile(io, name, .{}); file.close(io); } @@ -603,7 +595,7 @@ test "Dir.Iterator twice" { defer tmp_dir.cleanup(); // First, create a couple of entries to iterate over. - const file = try tmp_dir.dir.createFile("some_file", .{}); + const file = try tmp_dir.dir.createFile(io, "some_file", .{}); file.close(io); try tmp_dir.dir.makeDir("some_dir"); @@ -638,7 +630,7 @@ test "Dir.Iterator reset" { defer tmp_dir.cleanup(); // First, create a couple of entries to iterate over. - const file = try tmp_dir.dir.createFile("some_file", .{}); + const file = try tmp_dir.dir.createFile(io, "some_file", .{}); file.close(io); try tmp_dir.dir.makeDir("some_dir"); @@ -769,7 +761,7 @@ test "readFileAlloc" { var tmp_dir = tmpDir(.{}); defer tmp_dir.cleanup(); - var file = try tmp_dir.dir.createFile("test_file", .{ .read = true }); + var file = try tmp_dir.dir.createFile(io, "test_file", .{ .read = true }); defer file.close(io); const buf1 = try tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(1024)); @@ -843,7 +835,7 @@ test "directory operations on files" { const test_file_name = try ctx.transformPath("test_file"); - var file = try ctx.dir.createFile(test_file_name, .{ .read = true }); + var file = try ctx.dir.createFile(io, test_file_name, .{ .read = true }); file.close(io); try testing.expectError(error.PathAlreadyExists, ctx.dir.makeDir(test_file_name)); @@ -876,7 +868,7 @@ test "file operations on directories" { try ctx.dir.makeDir(test_dir_name); - try testing.expectError(error.IsDir, ctx.dir.createFile(test_dir_name, .{})); + try testing.expectError(error.IsDir, ctx.dir.createFile(io, test_dir_name, .{})); try testing.expectError(error.IsDir, ctx.dir.deleteFile(test_dir_name)); switch (native_os) { .dragonfly, .netbsd => { @@ -969,7 +961,7 @@ test "Dir.rename files" { // Renaming files const test_file_name = try ctx.transformPath("test_file"); const renamed_test_file_name = try ctx.transformPath("test_file_renamed"); - var file = try ctx.dir.createFile(test_file_name, .{ .read = true }); + var file = try ctx.dir.createFile(io, test_file_name, .{ .read = true }); file.close(io); try ctx.dir.rename(test_file_name, renamed_test_file_name); @@ -983,7 +975,7 @@ test "Dir.rename files" { // Rename to existing file succeeds const existing_file_path = try ctx.transformPath("existing_file"); - var existing_file = try ctx.dir.createFile(existing_file_path, .{ .read = true }); + var existing_file = try ctx.dir.createFile(io, existing_file_path, .{ .read = true }); existing_file.close(io); try ctx.dir.rename(renamed_test_file_name, existing_file_path); @@ -1017,7 +1009,7 @@ test "Dir.rename directories" { var dir = try ctx.dir.openDir(test_dir_renamed_path, .{}); // Put a file in the directory - var file = try dir.createFile("test_file", .{ .read = true }); + var file = try dir.createFile(io, "test_file", .{ .read = true }); file.close(io); dir.close(io); @@ -1070,7 +1062,7 @@ test "Dir.rename directory onto non-empty dir" { try ctx.dir.makeDir(test_dir_path); var target_dir = try ctx.dir.makeOpenPath(target_dir_path, .{}); - var file = try target_dir.createFile("test_file", .{ .read = true }); + var file = try target_dir.createFile(io, "test_file", .{ .read = true }); file.close(io); target_dir.close(io); @@ -1094,7 +1086,7 @@ test "Dir.rename file <-> dir" { const test_file_path = try ctx.transformPath("test_file"); const test_dir_path = try ctx.transformPath("test_dir"); - var file = try ctx.dir.createFile(test_file_path, .{ .read = true }); + var file = try ctx.dir.createFile(io, test_file_path, .{ .read = true }); file.close(io); try ctx.dir.makeDir(test_dir_path); try testing.expectError(error.IsDir, ctx.dir.rename(test_file_path, test_dir_path)); @@ -1115,7 +1107,7 @@ test "rename" { // Renaming files const test_file_name = "test_file"; const renamed_test_file_name = "test_file_renamed"; - var file = try tmp_dir1.dir.createFile(test_file_name, .{ .read = true }); + var file = try tmp_dir1.dir.createFile(io, test_file_name, .{ .read = true }); file.close(io); try fs.rename(tmp_dir1.dir, test_file_name, tmp_dir2.dir, renamed_test_file_name); @@ -1149,7 +1141,7 @@ test "renameAbsolute" { // Renaming files const test_file_name = "test_file"; const renamed_test_file_name = "test_file_renamed"; - var file = try tmp_dir.dir.createFile(test_file_name, .{ .read = true }); + var file = try tmp_dir.dir.createFile(io, test_file_name, .{ .read = true }); file.close(io); try fs.renameAbsolute( try fs.path.join(allocator, &.{ base_path, test_file_name }), @@ -1454,7 +1446,7 @@ test "writev, readv" { var write_vecs: [2][]const u8 = .{ line1, line2 }; var read_vecs: [2][]u8 = .{ &buf2, &buf1 }; - var src_file = try tmp.dir.createFile("test.txt", .{ .read = true }); + var src_file = try tmp.dir.createFile(io, "test.txt", .{ .read = true }); defer src_file.close(io); var writer = src_file.writerStreaming(&.{}); @@ -1484,7 +1476,7 @@ test "pwritev, preadv" { var buf2: [line2.len]u8 = undefined; var read_vecs: [2][]u8 = .{ &buf2, &buf1 }; - var src_file = try tmp.dir.createFile("test.txt", .{ .read = true }); + var src_file = try tmp.dir.createFile(io, "test.txt", .{ .read = true }); defer src_file.close(io); var writer = src_file.writer(&.{}); @@ -1584,14 +1576,14 @@ test "sendfile" { const line2 = "second line\n"; var vecs = [_][]const u8{ line1, line2 }; - var src_file = try dir.createFile("sendfile1.txt", .{ .read = true }); + var src_file = try dir.createFile(io, "sendfile1.txt", .{ .read = true }); defer src_file.close(io); { var fw = src_file.writer(&.{}); try fw.interface.writeVecAll(&vecs); } - var dest_file = try dir.createFile("sendfile2.txt", .{ .read = true }); + var dest_file = try dir.createFile(io, "sendfile2.txt", .{ .read = true }); defer dest_file.close(io); const header1 = "header1\n"; @@ -1627,12 +1619,12 @@ test "sendfile with buffered data" { var dir = try tmp.dir.openDir("os_test_tmp", .{}); defer dir.close(io); - var src_file = try dir.createFile("sendfile1.txt", .{ .read = true }); + var src_file = try dir.createFile(io, "sendfile1.txt", .{ .read = true }); defer src_file.close(io); try src_file.writeAll("AAAABBBB"); - var dest_file = try dir.createFile("sendfile2.txt", .{ .read = true }); + var dest_file = try dir.createFile(io, "sendfile2.txt", .{ .read = true }); defer dest_file.close(io); var src_buffer: [32]u8 = undefined; @@ -1718,10 +1710,10 @@ test "open file with exclusive nonblocking lock twice" { const io = ctx.io; const filename = try ctx.transformPath("file_nonblocking_lock_test.txt"); - const file1 = try ctx.dir.createFile(filename, .{ .lock = .exclusive, .lock_nonblocking = true }); + const file1 = try ctx.dir.createFile(io, filename, .{ .lock = .exclusive, .lock_nonblocking = true }); defer file1.close(io); - const file2 = ctx.dir.createFile(filename, .{ .lock = .exclusive, .lock_nonblocking = true }); + const file2 = ctx.dir.createFile(io, filename, .{ .lock = .exclusive, .lock_nonblocking = true }); try testing.expectError(error.WouldBlock, file2); } }.impl); @@ -1735,10 +1727,10 @@ test "open file with shared and exclusive nonblocking lock" { const io = ctx.io; const filename = try ctx.transformPath("file_nonblocking_lock_test.txt"); - const file1 = try ctx.dir.createFile(filename, .{ .lock = .shared, .lock_nonblocking = true }); + const file1 = try ctx.dir.createFile(io, filename, .{ .lock = .shared, .lock_nonblocking = true }); defer file1.close(io); - const file2 = ctx.dir.createFile(filename, .{ .lock = .exclusive, .lock_nonblocking = true }); + const file2 = ctx.dir.createFile(io, filename, .{ .lock = .exclusive, .lock_nonblocking = true }); try testing.expectError(error.WouldBlock, file2); } }.impl); @@ -1752,10 +1744,10 @@ test "open file with exclusive and shared nonblocking lock" { const io = ctx.io; const filename = try ctx.transformPath("file_nonblocking_lock_test.txt"); - const file1 = try ctx.dir.createFile(filename, .{ .lock = .exclusive, .lock_nonblocking = true }); + const file1 = try ctx.dir.createFile(io, filename, .{ .lock = .exclusive, .lock_nonblocking = true }); defer file1.close(io); - const file2 = ctx.dir.createFile(filename, .{ .lock = .shared, .lock_nonblocking = true }); + const file2 = ctx.dir.createFile(io, filename, .{ .lock = .shared, .lock_nonblocking = true }); try testing.expectError(error.WouldBlock, file2); } }.impl); @@ -1769,13 +1761,13 @@ test "open file with exclusive lock twice, make sure second lock waits" { const io = ctx.io; const filename = try ctx.transformPath("file_lock_test.txt"); - const file = try ctx.dir.createFile(filename, .{ .lock = .exclusive }); + const file = try ctx.dir.createFile(io, filename, .{ .lock = .exclusive }); errdefer file.close(io); const S = struct { fn checkFn(dir: *Io.Dir, path: []const u8, started: *std.Thread.ResetEvent, locked: *std.Thread.ResetEvent) !void { started.set(); - const file1 = try dir.createFile(path, .{ .lock = .exclusive }); + const file1 = try dir.createFile(io, path, .{ .lock = .exclusive }); locked.set(); file1.close(io); @@ -1847,13 +1839,13 @@ test "read from locked file" { const filename = try ctx.transformPath("read_lock_file_test.txt"); { - const f = try ctx.dir.createFile(filename, .{ .read = true }); + const f = try ctx.dir.createFile(io, filename, .{ .read = true }); defer f.close(io); var buffer: [1]u8 = undefined; _ = try f.read(&buffer); } { - const f = try ctx.dir.createFile(filename, .{ + const f = try ctx.dir.createFile(io, filename, .{ .read = true, .lock = .exclusive, }); @@ -2037,7 +2029,7 @@ test "'.' and '..' in Io.Dir functions" { var created_subdir = try ctx.dir.openDir(subdir_path, .{}); created_subdir.close(io); - const created_file = try ctx.dir.createFile(file_path, .{}); + const created_file = try ctx.dir.createFile(io, file_path, .{}); created_file.close(io); try ctx.dir.access(file_path, .{}); @@ -2103,7 +2095,7 @@ test "chmod" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - const file = try tmp.dir.createFile("test_file", .{ .mode = 0o600 }); + const file = try tmp.dir.createFile(io, "test_file", .{ .mode = 0o600 }); defer file.close(io); try testing.expectEqual(@as(File.Mode, 0o600), (try file.stat()).mode & 0o7777); @@ -2127,7 +2119,7 @@ test "chown" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - const file = try tmp.dir.createFile("test_file", .{}); + const file = try tmp.dir.createFile(io, "test_file", .{}); defer file.close(io); try file.chown(null, null); @@ -2228,7 +2220,7 @@ test "read file non vectored" { const contents = "hello, world!\n"; - const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true }); + const file = try tmp_dir.dir.createFile(io, "input.txt", .{ .read = true }); defer file.close(io); { var file_writer: File.Writer = .init(file, &.{}); @@ -2260,7 +2252,7 @@ test "seek keeping partial buffer" { const contents = "0123456789"; - const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true }); + const file = try tmp_dir.dir.createFile(io, "input.txt", .{ .read = true }); defer file.close(io); { var file_writer: File.Writer = .init(file, &.{}); @@ -2321,7 +2313,7 @@ test "seekTo flushes buffered data" { const contents = "data"; - const file = try tmp.dir.createFile("seek.bin", .{ .read = true }); + const file = try tmp.dir.createFile(io, "seek.bin", .{ .read = true }); defer file.close(io); { var buf: [16]u8 = undefined; @@ -2350,7 +2342,7 @@ test "File.Writer sendfile with buffered contents" { try tmp_dir.dir.writeFile(.{ .sub_path = "a", .data = "bcd" }); const in = try tmp_dir.dir.openFile(io, "a", .{}); defer in.close(io); - const out = try tmp_dir.dir.createFile("b", .{}); + const out = try tmp_dir.dir.createFile(io, "b", .{}); defer out.close(io); var in_buf: [2]u8 = undefined; @@ -2397,7 +2389,7 @@ test "readlinkat" { // create a symbolic link if (native_os == .windows) { std.os.windows.CreateSymbolicLink( - tmp.dir.fd, + tmp.dir.handle, &[_]u16{ 'l', 'i', 'n', 'k' }, &[_:0]u16{ 'f', 'i', 'l', 'e', '.', 't', 'x', 't' }, false, @@ -2407,7 +2399,7 @@ test "readlinkat" { else => return err, }; } else { - try posix.symlinkat("file.txt", tmp.dir.fd, "link"); + try posix.symlinkat("file.txt", tmp.dir.handle, "link"); } // read the link diff --git a/lib/std/os/linux/IoUring.zig b/lib/std/os/linux/IoUring.zig index c7d3f35d40..0972a302da 100644 --- a/lib/std/os/linux/IoUring.zig +++ b/lib/std/os/linux/IoUring.zig @@ -1991,7 +1991,7 @@ test "writev/fsync/readv" { defer tmp.cleanup(); const path = "test_io_uring_writev_fsync_readv"; - const file = try tmp.dir.createFile(path, .{ .read = true, .truncate = true }); + const file = try tmp.dir.createFile(io, path, .{ .read = true, .truncate = true }); defer file.close(io); const fd = file.handle; @@ -2062,7 +2062,7 @@ test "write/read" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); const path = "test_io_uring_write_read"; - const file = try tmp.dir.createFile(path, .{ .read = true, .truncate = true }); + const file = try tmp.dir.createFile(io, path, .{ .read = true, .truncate = true }); defer file.close(io); const fd = file.handle; @@ -2110,12 +2110,12 @@ test "splice/read" { var tmp = std.testing.tmpDir(.{}); const path_src = "test_io_uring_splice_src"; - const file_src = try tmp.dir.createFile(path_src, .{ .read = true, .truncate = true }); + const file_src = try tmp.dir.createFile(io, path_src, .{ .read = true, .truncate = true }); defer file_src.close(io); const fd_src = file_src.handle; const path_dst = "test_io_uring_splice_dst"; - const file_dst = try tmp.dir.createFile(path_dst, .{ .read = true, .truncate = true }); + const file_dst = try tmp.dir.createFile(io, path_dst, .{ .read = true, .truncate = true }); defer file_dst.close(io); const fd_dst = file_dst.handle; @@ -2185,7 +2185,7 @@ test "write_fixed/read_fixed" { defer tmp.cleanup(); const path = "test_io_uring_write_read_fixed"; - const file = try tmp.dir.createFile(path, .{ .read = true, .truncate = true }); + const file = try tmp.dir.createFile(io, path, .{ .read = true, .truncate = true }); defer file.close(io); const fd = file.handle; @@ -2306,7 +2306,7 @@ test "close" { defer tmp.cleanup(); const path = "test_io_uring_close"; - const file = try tmp.dir.createFile(path, .{}); + const file = try tmp.dir.createFile(io, path, .{}); errdefer file.close(io); const sqe_close = try ring.close(0x44444444, file.handle); @@ -2652,7 +2652,7 @@ test "fallocate" { defer tmp.cleanup(); const path = "test_io_uring_fallocate"; - const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 }); + const file = try tmp.dir.createFile(io, path, .{ .truncate = true, .mode = 0o666 }); defer file.close(io); try testing.expectEqual(@as(u64, 0), (try file.stat()).size); @@ -2699,7 +2699,7 @@ test "statx" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); const path = "test_io_uring_statx"; - const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 }); + const file = try tmp.dir.createFile(io, path, .{ .truncate = true, .mode = 0o666 }); defer file.close(io); try testing.expectEqual(@as(u64, 0), (try file.stat()).size); @@ -2969,7 +2969,7 @@ test "renameat" { // Write old file with data - const old_file = try tmp.dir.createFile(old_path, .{ .truncate = true, .mode = 0o666 }); + const old_file = try tmp.dir.createFile(io, old_path, .{ .truncate = true, .mode = 0o666 }); defer old_file.close(io); try old_file.writeAll("hello"); @@ -3028,7 +3028,7 @@ test "unlinkat" { // Write old file with data - const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 }); + const file = try tmp.dir.createFile(io, path, .{ .truncate = true, .mode = 0o666 }); defer file.close(io); // Submit unlinkat @@ -3125,7 +3125,7 @@ test "symlinkat" { const path = "test_io_uring_symlinkat"; const link_path = "test_io_uring_symlinkat_link"; - const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 }); + const file = try tmp.dir.createFile(io, path, .{ .truncate = true, .mode = 0o666 }); defer file.close(io); // Submit symlinkat @@ -3177,7 +3177,7 @@ test "linkat" { // Write file with data - const first_file = try tmp.dir.createFile(first_path, .{ .truncate = true, .mode = 0o666 }); + const first_file = try tmp.dir.createFile(io, first_path, .{ .truncate = true, .mode = 0o666 }); defer first_file.close(io); try first_file.writeAll("hello"); diff --git a/lib/std/os/linux/test.zig b/lib/std/os/linux/test.zig index 39606ddfac..d7cfb4e138 100644 --- a/lib/std/os/linux/test.zig +++ b/lib/std/os/linux/test.zig @@ -18,7 +18,7 @@ test "fallocate" { defer tmp.cleanup(); const path = "test_fallocate"; - const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 }); + const file = try tmp.dir.createFile(io, path, .{ .truncate = true, .mode = 0o666 }); defer file.close(io); try expect((try file.stat()).size == 0); @@ -85,7 +85,7 @@ test "statx" { defer tmp.cleanup(); const tmp_file_name = "just_a_temporary_file.txt"; - var file = try tmp.dir.createFile(tmp_file_name, .{}); + var file = try tmp.dir.createFile(io, tmp_file_name, .{}); defer file.close(io); var buf: linux.Statx = undefined; @@ -121,7 +121,7 @@ test "fadvise" { defer tmp.cleanup(); const tmp_file_name = "temp_posix_fadvise.txt"; - var file = try tmp.dir.createFile(tmp_file_name, .{}); + var file = try tmp.dir.createFile(io, tmp_file_name, .{}); defer file.close(io); var buf: [2048]u8 = undefined; diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig index 6b5b678b20..ba5282256f 100644 --- a/lib/std/os/windows.zig +++ b/lib/std/os/windows.zig @@ -4639,8 +4639,8 @@ pub fn wToPrefixedFileW(dir: ?HANDLE, path: [:0]const u16) Wtf16ToPrefixedFileWE break :path_to_get path; } // We can also skip GetFinalPathNameByHandle if the handle matches - // the handle returned by fs.cwd() - if (dir.? == std.fs.cwd().fd) { + // the handle returned by Io.Dir.cwd() + if (dir.? == Io.Dir.cwd().fd) { break :path_to_get path; } // At this point, we know we have a relative path that had too many diff --git a/lib/std/posix.zig b/lib/std/posix.zig index 392987ec50..f4aa970413 100644 --- a/lib/std/posix.zig +++ b/lib/std/posix.zig @@ -15,15 +15,16 @@ //! deal with the exception. const builtin = @import("builtin"); -const root = @import("root"); +const native_os = builtin.os.tag; + const std = @import("std.zig"); +const Io = std.Io; const mem = std.mem; const fs = std.fs; -const max_path_bytes = fs.max_path_bytes; +const max_path_bytes = std.fs.max_path_bytes; const maxInt = std.math.maxInt; const cast = std.math.cast; const assert = std.debug.assert; -const native_os = builtin.os.tag; const page_size_min = std.heap.page_size_min; test { @@ -797,7 +798,6 @@ pub fn read(fd: fd_t, buf: []u8) ReadError!usize { .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, - .SRCH => return error.ProcessNotFound, .AGAIN => return error.WouldBlock, .CANCELED => return error.Canceled, .BADF => return error.NotOpenForReading, // Can be a race condition. @@ -917,7 +917,6 @@ pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize { .INTR => continue, .INVAL => return error.InvalidArgument, .FAULT => unreachable, - .SRCH => return error.ProcessNotFound, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForWriting, // can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. @@ -985,7 +984,8 @@ pub fn openZ(file_path: [*:0]const u8, flags: O, perm: mode_t) OpenError!fd_t { .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.NoDevice, .NOENT => return error.FileNotFound, - .SRCH => return error.ProcessNotFound, + // Can happen on Linux when opening procfs files. + .SRCH => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, @@ -1560,7 +1560,7 @@ pub fn mkdirZ(dir_path: [*:0]const u8, mode: mode_t) MakeDirError!void { pub fn mkdirW(dir_path_w: []const u16, mode: mode_t) MakeDirError!void { _ = mode; const sub_dir_handle = windows.OpenFile(dir_path_w, .{ - .dir = fs.cwd().fd, + .dir = Io.Dir.cwd().handle, .access_mask = .{ .STANDARD = .{ .SYNCHRONIZE = true }, .GENERIC = .{ .READ = true }, diff --git a/lib/std/posix/test.zig b/lib/std/posix/test.zig index 0071a72a26..dc63be6e14 100644 --- a/lib/std/posix/test.zig +++ b/lib/std/posix/test.zig @@ -148,7 +148,7 @@ test "linkat with different directories" { try tmp.dir.writeFile(.{ .sub_path = target_name, .data = "example" }); // Test 1: link from file in subdir back up to target in parent directory - try posix.linkat(tmp.dir.fd, target_name, subdir.fd, link_name, 0); + try posix.linkat(tmp.dir.handle, target_name, subdir.handle, link_name, 0); const efd = try tmp.dir.openFile(io, target_name, .{}); defer efd.close(io); @@ -164,7 +164,7 @@ test "linkat with different directories" { } // Test 2: remove link - try posix.unlinkat(subdir.fd, link_name, 0); + try posix.unlinkat(subdir.handle, link_name, 0); _, const elink = try getLinkInfo(efd.handle); try testing.expectEqual(@as(posix.nlink_t, 1), elink); } @@ -373,7 +373,7 @@ test "mmap" { // Create a file used for testing mmap() calls with a file descriptor { - const file = try tmp.dir.createFile(test_out_file, .{}); + const file = try tmp.dir.createFile(io, test_out_file, .{}); defer file.close(io); var stream = file.writer(&.{}); @@ -444,7 +444,7 @@ test "fcntl" { const test_out_file = "os_tmp_test"; - const file = try tmp.dir.createFile(test_out_file, .{}); + const file = try tmp.dir.createFile(io, test_out_file, .{}); defer file.close(io); // Note: The test assumes createFile opens the file with CLOEXEC @@ -495,7 +495,7 @@ test "fsync" { defer tmp.cleanup(); const test_out_file = "os_tmp_test"; - const file = try tmp.dir.createFile(test_out_file, .{}); + const file = try tmp.dir.createFile(io, test_out_file, .{}); defer file.close(io); try posix.fsync(file.handle); @@ -617,7 +617,7 @@ test "dup & dup2" { defer tmp.cleanup(); { - var file = try tmp.dir.createFile("os_dup_test", .{}); + var file = try tmp.dir.createFile(io, "os_dup_test", .{}); defer file.close(io); var duped = Io.File{ .handle = try posix.dup(file.handle) }; @@ -659,7 +659,7 @@ test "writev longer than IOV_MAX" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - var file = try tmp.dir.createFile("pwritev", .{}); + var file = try tmp.dir.createFile(io, "pwritev", .{}); defer file.close(io); const iovecs = [_]posix.iovec_const{.{ .base = "a", .len = 1 }} ** (posix.IOV_MAX + 1); @@ -684,7 +684,7 @@ test "POSIX file locking with fcntl" { defer tmp.cleanup(); // Create a temporary lock file - var file = try tmp.dir.createFile("lock", .{ .read = true }); + var file = try tmp.dir.createFile(io, "lock", .{ .read = true }); defer file.close(io); try file.setEndPos(2); const fd = file.handle; @@ -881,7 +881,7 @@ test "isatty" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - var file = try tmp.dir.createFile("foo", .{}); + var file = try tmp.dir.createFile(io, "foo", .{}); defer file.close(io); try expectEqual(posix.isatty(file.handle), false); @@ -893,7 +893,7 @@ test "pread with empty buffer" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - var file = try tmp.dir.createFile("pread_empty", .{ .read = true }); + var file = try tmp.dir.createFile(io, "pread_empty", .{ .read = true }); defer file.close(io); const bytes = try a.alloc(u8, 0); @@ -909,7 +909,7 @@ test "write with empty buffer" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - var file = try tmp.dir.createFile("write_empty", .{}); + var file = try tmp.dir.createFile(io, "write_empty", .{}); defer file.close(io); const bytes = try a.alloc(u8, 0); @@ -925,7 +925,7 @@ test "pwrite with empty buffer" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - var file = try tmp.dir.createFile("pwrite_empty", .{}); + var file = try tmp.dir.createFile(io, "pwrite_empty", .{}); defer file.close(io); const bytes = try a.alloc(u8, 0); @@ -965,35 +965,35 @@ test "fchmodat smoke test" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - try expectError(error.FileNotFound, posix.fchmodat(tmp.dir.fd, "regfile", 0o666, 0)); + try expectError(error.FileNotFound, posix.fchmodat(tmp.dir.handle, "regfile", 0o666, 0)); const fd = try posix.openat( - tmp.dir.fd, + tmp.dir.handle, "regfile", .{ .ACCMODE = .WRONLY, .CREAT = true, .EXCL = true, .TRUNC = true }, 0o644, ); posix.close(fd); - try posix.symlinkat("regfile", tmp.dir.fd, "symlink"); - const sym_mode = try getFileMode(tmp.dir.fd, "symlink"); + try posix.symlinkat("regfile", tmp.dir.handle, "symlink"); + const sym_mode = try getFileMode(tmp.dir.handle, "symlink"); - try posix.fchmodat(tmp.dir.fd, "regfile", 0o640, 0); - try expectMode(tmp.dir.fd, "regfile", 0o640); - try posix.fchmodat(tmp.dir.fd, "regfile", 0o600, posix.AT.SYMLINK_NOFOLLOW); - try expectMode(tmp.dir.fd, "regfile", 0o600); + try posix.fchmodat(tmp.dir.handle, "regfile", 0o640, 0); + try expectMode(tmp.dir.handle, "regfile", 0o640); + try posix.fchmodat(tmp.dir.handle, "regfile", 0o600, posix.AT.SYMLINK_NOFOLLOW); + try expectMode(tmp.dir.handle, "regfile", 0o600); - try posix.fchmodat(tmp.dir.fd, "symlink", 0o640, 0); - try expectMode(tmp.dir.fd, "regfile", 0o640); - try expectMode(tmp.dir.fd, "symlink", sym_mode); + try posix.fchmodat(tmp.dir.handle, "symlink", 0o640, 0); + try expectMode(tmp.dir.handle, "regfile", 0o640); + try expectMode(tmp.dir.handle, "symlink", sym_mode); var test_link = true; - posix.fchmodat(tmp.dir.fd, "symlink", 0o600, posix.AT.SYMLINK_NOFOLLOW) catch |err| switch (err) { + posix.fchmodat(tmp.dir.handle, "symlink", 0o600, posix.AT.SYMLINK_NOFOLLOW) catch |err| switch (err) { error.OperationNotSupported => test_link = false, else => |e| return e, }; if (test_link) - try expectMode(tmp.dir.fd, "symlink", 0o600); - try expectMode(tmp.dir.fd, "regfile", 0o640); + try expectMode(tmp.dir.handle, "symlink", 0o600); + try expectMode(tmp.dir.handle, "regfile", 0o640); } const CommonOpenFlags = packed struct { diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index b774303901..33faeef061 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -677,7 +677,7 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { setUpChildIo(self.stderr_behavior, stderr_pipe[1], posix.STDERR_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err); if (self.cwd_dir) |cwd| { - posix.fchdir(cwd.fd) catch |err| forkChildErrReport(err_pipe[1], err); + posix.fchdir(cwd.handle) catch |err| forkChildErrReport(err_pipe[1], err); } else if (self.cwd) |cwd| { posix.chdir(cwd) catch |err| forkChildErrReport(err_pipe[1], err); } diff --git a/lib/std/std.zig b/lib/std/std.zig index 106811859b..1690c0575c 100644 --- a/lib/std/std.zig +++ b/lib/std/std.zig @@ -114,7 +114,7 @@ pub const options: Options = if (@hasDecl(root, "std_options")) root.std_options pub const Options = struct { enable_segfault_handler: bool = debug.default_enable_segfault_handler, - /// Function used to implement `std.fs.cwd` for WASI. + /// Function used to implement `std.Io.Dir.cwd` for WASI. wasiCwd: fn () os.wasi.fd_t = os.defaultWasiCwd, /// The current log level. diff --git a/lib/std/tar.zig b/lib/std/tar.zig index d861314fec..8a0bbb342f 100644 --- a/lib/std/tar.zig +++ b/lib/std/tar.zig @@ -610,7 +610,7 @@ pub fn pipeToFileSystem(io: Io, dir: Io.Dir, reader: *Io.Reader, options: PipeOp } }, .file => { - if (createDirAndFile(dir, file_name, fileMode(file.mode, options))) |fs_file| { + if (createDirAndFile(io, dir, file_name, fileMode(file.mode, options))) |fs_file| { defer fs_file.close(io); var file_writer = fs_file.writer(&file_contents_buffer); try it.streamRemaining(file, &file_writer.interface); @@ -638,12 +638,12 @@ pub fn pipeToFileSystem(io: Io, dir: Io.Dir, reader: *Io.Reader, options: PipeOp } } -fn createDirAndFile(dir: Io.Dir, file_name: []const u8, mode: Io.File.Mode) !Io.File { - const fs_file = dir.createFile(file_name, .{ .exclusive = true, .mode = mode }) catch |err| { +fn createDirAndFile(io: Io, dir: Io.Dir, file_name: []const u8, mode: Io.File.Mode) !Io.File { + const fs_file = dir.createFile(io, file_name, .{ .exclusive = true, .mode = mode }) catch |err| { if (err == error.FileNotFound) { if (std.fs.path.dirname(file_name)) |dir_name| { try dir.makePath(dir_name); - return try dir.createFile(file_name, .{ .exclusive = true, .mode = mode }); + return try dir.createFile(io, file_name, .{ .exclusive = true, .mode = mode }); } } return err; @@ -880,9 +880,9 @@ test "create file and symlink" { var root = testing.tmpDir(.{}); defer root.cleanup(); - var file = try createDirAndFile(root.dir, "file1", default_mode); + var file = try createDirAndFile(io, root.dir, "file1", default_mode); file.close(io); - file = try createDirAndFile(root.dir, "a/b/c/file2", default_mode); + file = try createDirAndFile(io, root.dir, "a/b/c/file2", default_mode); file.close(io); createDirAndSymlink(root.dir, "a/b/c/file2", "symlink1") catch |err| { @@ -894,7 +894,7 @@ test "create file and symlink" { // Danglink symlnik, file created later try createDirAndSymlink(root.dir, "../../../g/h/i/file4", "j/k/l/symlink3"); - file = try createDirAndFile(root.dir, "g/h/i/file4", default_mode); + file = try createDirAndFile(io, root.dir, "g/h/i/file4", default_mode); file.close(io); } diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 19038543a6..99d67ec132 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -628,7 +628,7 @@ pub fn tmpDir(opts: Io.Dir.OpenOptions) TmpDir { var sub_path: [TmpDir.sub_path_len]u8 = undefined; _ = std.fs.base64_encoder.encode(&sub_path, &random_bytes); - const cwd = std.fs.cwd(); + const cwd = Io.Dir.cwd(); var cache_dir = cwd.makeOpenPath(".zig-cache", .{}) catch @panic("unable to make tmp dir for testing: unable to make and open .zig-cache dir"); defer cache_dir.close(io); diff --git a/lib/std/zig/LibCInstallation.zig b/lib/std/zig/LibCInstallation.zig index c8bde2ab02..80317850df 100644 --- a/lib/std/zig/LibCInstallation.zig +++ b/lib/std/zig/LibCInstallation.zig @@ -57,7 +57,7 @@ pub fn parse( } } - const contents = try std.fs.cwd().readFileAlloc(libc_file, allocator, .limited(std.math.maxInt(usize))); + const contents = try Io.Dir.cwd().readFileAlloc(libc_file, allocator, .limited(std.math.maxInt(usize))); defer allocator.free(contents); var it = std.mem.tokenizeScalar(u8, contents, '\n'); @@ -337,7 +337,7 @@ fn findNativeIncludeDirPosix(self: *LibCInstallation, args: FindNativeOptions) F // search in reverse order const search_path_untrimmed = search_paths.items[search_paths.items.len - path_i - 1]; const search_path = std.mem.trimStart(u8, search_path_untrimmed, " "); - var search_dir = fs.cwd().openDir(search_path, .{}) catch |err| switch (err) { + var search_dir = Io.Dir.cwd().openDir(search_path, .{}) catch |err| switch (err) { error.FileNotFound, error.NotDir, error.NoDevice, @@ -392,7 +392,7 @@ fn findNativeIncludeDirWindows( result_buf.shrinkAndFree(0); try result_buf.print("{s}\\Include\\{s}\\ucrt", .{ install.path, install.version }); - var dir = fs.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) { + var dir = Io.Dir.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) { error.FileNotFound, error.NotDir, error.NoDevice, @@ -440,7 +440,7 @@ fn findNativeCrtDirWindows( result_buf.shrinkAndFree(0); try result_buf.print("{s}\\Lib\\{s}\\ucrt\\{s}", .{ install.path, install.version, arch_sub_dir }); - var dir = fs.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) { + var dir = Io.Dir.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) { error.FileNotFound, error.NotDir, error.NoDevice, @@ -508,7 +508,7 @@ fn findNativeKernel32LibDir( result_buf.shrinkAndFree(0); try result_buf.print("{s}\\Lib\\{s}\\um\\{s}", .{ install.path, install.version, arch_sub_dir }); - var dir = fs.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) { + var dir = Io.Dir.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) { error.FileNotFound, error.NotDir, error.NoDevice, @@ -544,7 +544,7 @@ fn findNativeMsvcIncludeDir( const dir_path = try fs.path.join(allocator, &[_][]const u8{ up2, "include" }); errdefer allocator.free(dir_path); - var dir = fs.cwd().openDir(dir_path, .{}) catch |err| switch (err) { + var dir = Io.Dir.cwd().openDir(dir_path, .{}) catch |err| switch (err) { error.FileNotFound, error.NotDir, error.NoDevice, diff --git a/lib/std/zig/WindowsSdk.zig b/lib/std/zig/WindowsSdk.zig index 6b6e4fa9f7..dca474020a 100644 --- a/lib/std/zig/WindowsSdk.zig +++ b/lib/std/zig/WindowsSdk.zig @@ -828,7 +828,7 @@ const MsvcLibDir = struct { try lib_dir_buf.appendSlice("VC\\Auxiliary\\Build\\Microsoft.VCToolsVersion.default.txt"); var default_tools_version_buf: [512]u8 = undefined; - const default_tools_version_contents = std.fs.cwd().readFile(lib_dir_buf.items, &default_tools_version_buf) catch { + const default_tools_version_contents = Io.Dir.cwd().readFile(lib_dir_buf.items, &default_tools_version_buf) catch { return error.PathNotFound; }; var tokenizer = std.mem.tokenizeAny(u8, default_tools_version_contents, " \r\n"); diff --git a/lib/std/zig/system.zig b/lib/std/zig/system.zig index d3bafc16f2..9fa0546c3b 100644 --- a/lib/std/zig/system.zig +++ b/lib/std/zig/system.zig @@ -1,11 +1,12 @@ const builtin = @import("builtin"); +const native_endian = builtin.cpu.arch.endian(); + const std = @import("../std.zig"); const mem = std.mem; const elf = std.elf; const fs = std.fs; const assert = std.debug.assert; const Target = std.Target; -const native_endian = builtin.cpu.arch.endian(); const posix = std.posix; const Io = std.Io; @@ -69,7 +70,7 @@ pub fn getExternalExecutor( if (os_match and cpu_ok) native: { if (options.link_libc) { if (candidate.dynamic_linker.get()) |candidate_dl| { - fs.cwd().access(candidate_dl, .{}) catch { + Io.Dir.cwd().access(candidate_dl, .{}) catch { bad_result = .{ .bad_dl = candidate_dl }; break :native; }; @@ -710,6 +711,7 @@ fn abiAndDynamicLinkerFromFile( error.SystemResources, error.FileSystem, error.SymLinkLoop, + error.Canceled, error.Unexpected, => |e| return e, }; diff --git a/lib/std/zig/system/darwin/macos.zig b/lib/std/zig/system/darwin/macos.zig index 9bb4e34e3b..4ff6846a09 100644 --- a/lib/std/zig/system/darwin/macos.zig +++ b/lib/std/zig/system/darwin/macos.zig @@ -1,9 +1,10 @@ -const std = @import("std"); const builtin = @import("builtin"); + +const std = @import("std"); +const Io = std.Io; const assert = std.debug.assert; const mem = std.mem; const testing = std.testing; - const Target = std.Target; /// Detect macOS version. @@ -54,7 +55,7 @@ pub fn detect(target_os: *Target.Os) !void { // approx. 4 times historical file size var buf: [2048]u8 = undefined; - if (std.fs.cwd().readFile(path, &buf)) |bytes| { + if (Io.Dir.cwd().readFile(path, &buf)) |bytes| { if (parseSystemVersion(bytes)) |ver| { // never return non-canonical `10.(16+)` if (!(ver.major == 10 and ver.minor >= 16)) { diff --git a/lib/std/zip.zig b/lib/std/zip.zig index 0ca77c98a1..9d08847092 100644 --- a/lib/std/zip.zig +++ b/lib/std/zip.zig @@ -564,9 +564,9 @@ pub const Iterator = struct { defer parent_dir.close(io); const basename = std.fs.path.basename(filename); - break :blk try parent_dir.createFile(basename, .{ .exclusive = true }); + break :blk try parent_dir.createFile(io, basename, .{ .exclusive = true }); } - break :blk try dest.createFile(filename, .{ .exclusive = true }); + break :blk try dest.createFile(io, filename, .{ .exclusive = true }); }; defer out_file.close(io); var out_file_buffer: [1024]u8 = undefined; diff --git a/src/Compilation.zig b/src/Compilation.zig index 24b994f608..5f15ef5f74 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -450,7 +450,7 @@ pub const Path = struct { const dir = switch (p.root) { .none => { const cwd_sub_path = absToCwdRelative(p.sub_path, dirs.cwd); - return .{ fs.cwd(), cwd_sub_path }; + return .{ Io.Dir.cwd(), cwd_sub_path }; }, .zig_lib => dirs.zig_lib.handle, .global_cache => dirs.global_cache.handle, @@ -723,7 +723,7 @@ pub const Directories = struct { pub fn deinit(dirs: *Directories, io: Io) void { // The local and global caches could be the same. - const close_local = dirs.local_cache.handle.fd != dirs.global_cache.handle.fd; + const close_local = dirs.local_cache.handle.handle != dirs.global_cache.handle.handle; dirs.global_cache.handle.close(io); if (close_local) dirs.local_cache.handle.close(io); @@ -814,7 +814,7 @@ pub const Directories = struct { return .{ .path = if (std.mem.eql(u8, name, ".")) null else name, .handle = .{ - .fd = preopens.find(name) orelse fatal("WASI preopen not found: '{s}'", .{name}), + .handle = preopens.find(name) orelse fatal("WASI preopen not found: '{s}'", .{name}), }, }; } @@ -824,8 +824,8 @@ pub const Directories = struct { }; const nonempty_path = if (path.len == 0) "." else path; const handle_or_err = switch (thing) { - .@"zig lib" => fs.cwd().openDir(nonempty_path, .{}), - .@"global cache", .@"local cache" => fs.cwd().makeOpenPath(nonempty_path, .{}), + .@"zig lib" => Io.Dir.cwd().openDir(nonempty_path, .{}), + .@"global cache", .@"local cache" => Io.Dir.cwd().makeOpenPath(nonempty_path, .{}), }; return .{ .path = if (path.len == 0) null else path, @@ -1104,7 +1104,7 @@ pub const CObject = struct { const source_line = source_line: { if (diag.src_loc.offset == 0 or diag.src_loc.column == 0) break :source_line 0; - const file = fs.cwd().openFile(io, file_name, .{}) catch break :source_line 0; + const file = Io.Dir.cwd().openFile(io, file_name, .{}) catch break :source_line 0; defer file.close(io); var buffer: [1024]u8 = undefined; var file_reader = file.reader(io, &buffer); @@ -1179,7 +1179,7 @@ pub const CObject = struct { }; var buffer: [1024]u8 = undefined; - const file = try fs.cwd().openFile(io, path, .{}); + const file = try Io.Dir.cwd().openFile(io, path, .{}); defer file.close(io); var file_reader = file.reader(io, &buffer); var bc = std.zig.llvm.BitcodeReader.init(gpa, .{ .reader = &file_reader.interface }); @@ -2109,7 +2109,7 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic, }, }; // These correspond to std.zig.Server.Message.PathPrefix. - cache.addPrefix(.{ .path = null, .handle = fs.cwd() }); + cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() }); cache.addPrefix(options.dirs.zig_lib); cache.addPrefix(options.dirs.local_cache); cache.addPrefix(options.dirs.global_cache); @@ -5220,7 +5220,7 @@ fn createDepFile( binfile: Cache.Path, ) anyerror!void { var buf: [4096]u8 = undefined; - var af = try std.fs.cwd().atomicFile(depfile, .{ .write_buffer = &buf }); + var af = try Io.Dir.cwd().atomicFile(depfile, .{ .write_buffer = &buf }); defer af.deinit(); comp.writeDepFile(binfile, &af.file_writer.interface) catch return af.file_writer.err.?; @@ -5284,7 +5284,7 @@ fn docsCopyFallible(comp: *Compilation) anyerror!void { }; } - var tar_file = out_dir.createFile("sources.tar", .{}) catch |err| { + var tar_file = out_dir.createFile(io, "sources.tar", .{}) catch |err| { return comp.lockAndSetMiscFailure( .docs_copy, "unable to create '{f}/sources.tar': {s}", diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index 3bd05120ff..8a30529bc5 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -383,14 +383,14 @@ pub fn run(f: *Fetch) RunError!void { }, .remote => |remote| remote, .path_or_url => |path_or_url| { - if (fs.cwd().openDir(path_or_url, .{ .iterate = true })) |dir| { + if (Io.Dir.cwd().openDir(path_or_url, .{ .iterate = true })) |dir| { var resource: Resource = .{ .dir = dir }; return f.runResource(path_or_url, &resource, null); } else |dir_err| { var server_header_buffer: [init_resource_buffer_size]u8 = undefined; const file_err = if (dir_err == error.NotDir) e: { - if (fs.cwd().openFile(io, path_or_url, .{})) |file| { + if (Io.Dir.cwd().openFile(io, path_or_url, .{})) |file| { var resource: Resource = .{ .file = file.reader(io, &server_header_buffer) }; return f.runResource(path_or_url, &resource, null); } else |err| break :e err; @@ -1303,7 +1303,7 @@ fn unzip( const random_integer = std.crypto.random.int(u64); zip_path[prefix.len..][0..random_len].* = std.fmt.hex(random_integer); - break cache_root.handle.createFile(&zip_path, .{ + break cache_root.handle.createFile(io, &zip_path, .{ .exclusive = true, .read = true, }) catch |err| switch (err) { @@ -1365,7 +1365,7 @@ fn unpackGitPack(f: *Fetch, out_dir: Io.Dir, resource: *Resource.Git) anyerror!U { var pack_dir = try out_dir.makeOpenPath(".git", .{}); defer pack_dir.close(io); - var pack_file = try pack_dir.createFile("pkg.pack", .{ .read = true }); + var pack_file = try pack_dir.createFile(io, "pkg.pack", .{ .read = true }); defer pack_file.close(io); var pack_file_buffer: [4096]u8 = undefined; var pack_file_reader = b: { @@ -1376,7 +1376,7 @@ fn unpackGitPack(f: *Fetch, out_dir: Io.Dir, resource: *Resource.Git) anyerror!U break :b pack_file_writer.moveToReader(io); }; - var index_file = try pack_dir.createFile("pkg.idx", .{ .read = true }); + var index_file = try pack_dir.createFile(io, "pkg.idx", .{ .read = true }); defer index_file.close(io); var index_file_buffer: [2000]u8 = undefined; var index_file_writer = index_file.writer(&index_file_buffer); @@ -2235,7 +2235,7 @@ test "set executable bit based on file content" { fn saveEmbedFile(io: Io, comptime tarball_name: []const u8, dir: Io.Dir) !void { //const tarball_name = "duplicate_paths_excluded.tar.gz"; const tarball_content = @embedFile("Fetch/testdata/" ++ tarball_name); - var tmp_file = try dir.createFile(tarball_name, .{}); + var tmp_file = try dir.createFile(io, tarball_name, .{}); defer tmp_file.close(io); try tmp_file.writeAll(tarball_content); } diff --git a/src/Package/Fetch/git.zig b/src/Package/Fetch/git.zig index ccae9440e2..7b08a89cae 100644 --- a/src/Package/Fetch/git.zig +++ b/src/Package/Fetch/git.zig @@ -264,7 +264,7 @@ pub const Repository = struct { try repository.odb.seekOid(entry.oid); const file_object = try repository.odb.readObject(); if (file_object.type != .blob) return error.InvalidFile; - var file = dir.createFile(entry.name, .{ .exclusive = true }) catch |e| { + var file = dir.createFile(io, entry.name, .{ .exclusive = true }) catch |e| { const file_name = try std.fs.path.join(diagnostics.allocator, &.{ current_path, entry.name }); errdefer diagnostics.allocator.free(file_name); try diagnostics.errors.append(diagnostics.allocator, .{ .unable_to_create_file = .{ @@ -1584,14 +1584,14 @@ fn runRepositoryTest(io: Io, comptime format: Oid.Format, head_commit: []const u var git_dir = testing.tmpDir(.{}); defer git_dir.cleanup(); - var pack_file = try git_dir.dir.createFile("testrepo.pack", .{ .read = true }); + var pack_file = try git_dir.dir.createFile(io, "testrepo.pack", .{ .read = true }); defer pack_file.close(io); try pack_file.writeAll(testrepo_pack); var pack_file_buffer: [2000]u8 = undefined; var pack_file_reader = pack_file.reader(io, &pack_file_buffer); - var index_file = try git_dir.dir.createFile("testrepo.idx", .{ .read = true }); + var index_file = try git_dir.dir.createFile(io, "testrepo.idx", .{ .read = true }); defer index_file.close(io); var index_file_buffer: [2000]u8 = undefined; var index_file_writer = index_file.writer(&index_file_buffer); @@ -1714,20 +1714,20 @@ pub fn main() !void { const format = std.meta.stringToEnum(Oid.Format, args[1]) orelse return error.InvalidFormat; - var pack_file = try std.fs.cwd().openFile(io, args[2], .{}); + var pack_file = try Io.Dir.cwd().openFile(io, args[2], .{}); defer pack_file.close(io); var pack_file_buffer: [4096]u8 = undefined; var pack_file_reader = pack_file.reader(io, &pack_file_buffer); const commit = try Oid.parse(format, args[3]); - var worktree = try std.fs.cwd().makeOpenPath(args[4], .{}); + var worktree = try Io.Dir.cwd().makeOpenPath(args[4], .{}); defer worktree.close(io); var git_dir = try worktree.makeOpenPath(".git", .{}); defer git_dir.close(io); std.debug.print("Starting index...\n", .{}); - var index_file = try git_dir.createFile("idx", .{ .read = true }); + var index_file = try git_dir.createFile(io, "idx", .{ .read = true }); defer index_file.close(io); var index_file_buffer: [4096]u8 = undefined; var index_file_writer = index_file.writer(&index_file_buffer); diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 45b1302138..9a75b2096e 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -170,7 +170,7 @@ pub fn updateFile( // version. Likewise if we're working on AstGen and another process asks for // the cached file, they'll get it. const cache_file = while (true) { - break zir_dir.createFile(&hex_digest, .{ + break zir_dir.createFile(io, &hex_digest, .{ .read = true, .truncate = false, .lock = lock, @@ -196,7 +196,7 @@ pub fn updateFile( cache_directory, }); } - break zir_dir.createFile(&hex_digest, .{ + break zir_dir.createFile(io, &hex_digest, .{ .read = true, .truncate = false, .lock = lock, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index cb4fe0459f..4fc58c2c4b 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1,19 +1,22 @@ -const std = @import("std"); const builtin = @import("builtin"); + +const std = @import("std"); +const Io = std.Io; const assert = std.debug.assert; const Allocator = std.mem.Allocator; const log = std.log.scoped(.codegen); const math = std.math; const DW = std.dwarf; - const Builder = std.zig.llvm.Builder; + +const build_options = @import("build_options"); const llvm = if (build_options.have_llvm) @import("llvm/bindings.zig") else @compileError("LLVM unavailable"); + const link = @import("../link.zig"); const Compilation = @import("../Compilation.zig"); -const build_options = @import("build_options"); const Zcu = @import("../Zcu.zig"); const InternPool = @import("../InternPool.zig"); const Package = @import("../Package.zig"); @@ -964,7 +967,7 @@ pub const Object = struct { if (std.mem.eql(u8, path, "-")) { o.builder.dump(); } else { - o.builder.printToFilePath(std.fs.cwd(), path) catch |err| { + o.builder.printToFilePath(Io.Dir.cwd(), path) catch |err| { log.err("failed printing LLVM module to \"{s}\": {s}", .{ path, @errorName(err) }); }; } @@ -978,7 +981,7 @@ pub const Object = struct { o.builder.clearAndFree(); if (options.pre_bc_path) |path| { - var file = std.fs.cwd().createFile(path, .{}) catch |err| + var file = Io.Dir.cwd().createFile(io, path, .{}) catch |err| return diags.fail("failed to create '{s}': {s}", .{ path, @errorName(err) }); defer file.close(io); @@ -991,7 +994,7 @@ pub const Object = struct { options.post_ir_path == null and options.post_bc_path == null) return; if (options.post_bc_path) |path| { - var file = std.fs.cwd().createFile(path, .{}) catch |err| + var file = Io.Dir.cwd().createFile(io, path, .{}) catch |err| return diags.fail("failed to create '{s}': {s}", .{ path, @errorName(err) }); defer file.close(io); @@ -2711,7 +2714,7 @@ pub const Object = struct { } fn allocTypeName(o: *Object, pt: Zcu.PerThread, ty: Type) Allocator.Error![:0]const u8 { - var aw: std.Io.Writer.Allocating = .init(o.gpa); + var aw: Io.Writer.Allocating = .init(o.gpa); defer aw.deinit(); ty.print(&aw.writer, pt, null) catch |err| switch (err) { error.WriteFailed => return error.OutOfMemory, diff --git a/src/fmt.zig b/src/fmt.zig index ce8a31fa4c..36a3833986 100644 --- a/src/fmt.zig +++ b/src/fmt.zig @@ -182,11 +182,11 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! // Mark any excluded files/directories as already seen, // so that they are skipped later during actual processing for (excluded_files.items) |file_path| { - const stat = fs.cwd().statFile(file_path) catch |err| switch (err) { + const stat = Io.Dir.cwd().statFile(file_path) catch |err| switch (err) { error.FileNotFound => continue, // On Windows, statFile does not work for directories error.IsDir => dir: { - var dir = try fs.cwd().openDir(file_path, .{}); + var dir = try Io.Dir.cwd().openDir(file_path, .{}); defer dir.close(io); break :dir try dir.stat(); }, @@ -196,7 +196,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! } for (input_files.items) |file_path| { - try fmtPath(&fmt, file_path, check_flag, fs.cwd(), file_path); + try fmtPath(&fmt, file_path, check_flag, Io.Dir.cwd(), file_path); } try fmt.stdout_writer.interface.flush(); if (fmt.any_error) { diff --git a/src/introspect.zig b/src/introspect.zig index d2faa9a55c..04ddf47e8a 100644 --- a/src/introspect.zig +++ b/src/introspect.zig @@ -82,7 +82,7 @@ pub fn findZigLibDirFromSelfExe( cwd_path: []const u8, self_exe_path: []const u8, ) error{ OutOfMemory, FileNotFound }!Cache.Directory { - const cwd = fs.cwd(); + const cwd = Io.Dir.cwd(); var cur_path: []const u8 = self_exe_path; while (fs.path.dirname(cur_path)) |dirname| : (cur_path = dirname) { var base_dir = cwd.openDir(dirname, .{}) catch continue; @@ -206,7 +206,7 @@ pub fn resolveSuitableLocalCacheDir(arena: Allocator, cwd: []const u8) Allocator var cur_dir = cwd; while (true) { const joined = try fs.path.join(arena, &.{ cur_dir, Package.build_zig_basename }); - if (fs.cwd().access(joined, .{})) |_| { + if (Io.Dir.cwd().access(joined, .{})) |_| { return try fs.path.join(arena, &.{ cur_dir, default_local_zig_cache_basename }); } else |err| switch (err) { error.FileNotFound => { diff --git a/src/libs/freebsd.zig b/src/libs/freebsd.zig index 8c5e0afe4b..cfd8d5554c 100644 --- a/src/libs/freebsd.zig +++ b/src/libs/freebsd.zig @@ -1,9 +1,9 @@ const std = @import("std"); +const Io = std.Io; const Allocator = std.mem.Allocator; const mem = std.mem; const log = std.log; -const fs = std.fs; -const path = fs.path; +const path = std.Io.Dir.path; const assert = std.debug.assert; const Version = std.SemanticVersion; const Path = std.Build.Cache.Path; @@ -446,7 +446,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .io = io, .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}), }; - cache.addPrefix(.{ .path = null, .handle = fs.cwd() }); + cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() }); cache.addPrefix(comp.dirs.zig_lib); cache.addPrefix(comp.dirs.global_cache); defer cache.manifest_dir.close(io); @@ -468,7 +468,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .lock = man.toOwnedLock(), .dir_path = .{ .root_dir = comp.dirs.global_cache, - .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest), + .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest), }, }); } @@ -986,7 +986,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .lock = man.toOwnedLock(), .dir_path = .{ .root_dir = comp.dirs.global_cache, - .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest), + .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest), }, }); } @@ -1014,7 +1014,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) std.Io.C const so_path: Path = .{ .root_dir = so_files.dir_path.root_dir, .sub_path = std.fmt.allocPrint(comp.arena, "{s}{c}lib{s}.so.{d}", .{ - so_files.dir_path.sub_path, fs.path.sep, lib.name, lib.getSoVersion(&target.os), + so_files.dir_path.sub_path, path.sep, lib.name, lib.getSoVersion(&target.os), }) catch return comp.setAllocFailure(), }; task_buffer[task_buffer_i] = .{ .load_dso = so_path }; diff --git a/src/libs/glibc.zig b/src/libs/glibc.zig index bec20ff3d4..e3d8ce1f7f 100644 --- a/src/libs/glibc.zig +++ b/src/libs/glibc.zig @@ -1,9 +1,9 @@ const std = @import("std"); +const Io = std.Io; const Allocator = std.mem.Allocator; const mem = std.mem; const log = std.log; -const fs = std.fs; -const path = fs.path; +const path = std.Io.Dir.path; const assert = std.debug.assert; const Version = std.SemanticVersion; const Path = std.Build.Cache.Path; @@ -681,7 +681,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .io = io, .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}), }; - cache.addPrefix(.{ .path = null, .handle = fs.cwd() }); + cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() }); cache.addPrefix(comp.dirs.zig_lib); cache.addPrefix(comp.dirs.global_cache); defer cache.manifest_dir.close(io); @@ -703,7 +703,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .lock = man.toOwnedLock(), .dir_path = .{ .root_dir = comp.dirs.global_cache, - .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest), + .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest), }, }); } @@ -775,7 +775,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye try stubs_asm.appendSlice(".text\n"); var sym_i: usize = 0; - var sym_name_buf: std.Io.Writer.Allocating = .init(arena); + var sym_name_buf: Io.Writer.Allocating = .init(arena); var opt_symbol_name: ?[]const u8 = null; var versions_buffer: [32]u8 = undefined; var versions_len: usize = undefined; @@ -796,7 +796,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye // twice, which causes a "duplicate symbol" assembler error. var versions_written = std.AutoArrayHashMap(Version, void).init(arena); - var inc_reader: std.Io.Reader = .fixed(metadata.inclusions); + var inc_reader: Io.Reader = .fixed(metadata.inclusions); const fn_inclusions_len = try inc_reader.takeInt(u16, .little); @@ -1130,7 +1130,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .lock = man.toOwnedLock(), .dir_path = .{ .root_dir = comp.dirs.global_cache, - .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest), + .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest), }, }); } @@ -1156,7 +1156,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) std.Io.C const so_path: Path = .{ .root_dir = so_files.dir_path.root_dir, .sub_path = std.fmt.allocPrint(comp.arena, "{s}{c}lib{s}.so.{d}", .{ - so_files.dir_path.sub_path, fs.path.sep, lib.name, lib.sover, + so_files.dir_path.sub_path, path.sep, lib.name, lib.sover, }) catch return comp.setAllocFailure(), }; task_buffer[task_buffer_i] = .{ .load_dso = so_path }; diff --git a/src/libs/mingw.zig b/src/libs/mingw.zig index 005696e1fc..b3ca51e833 100644 --- a/src/libs/mingw.zig +++ b/src/libs/mingw.zig @@ -1,7 +1,8 @@ const std = @import("std"); +const Io = std.Io; const Allocator = std.mem.Allocator; const mem = std.mem; -const path = std.fs.path; +const path = std.Io.Dir.path; const assert = std.debug.assert; const log = std.log.scoped(.mingw); @@ -259,7 +260,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { .io = io, .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}), }; - cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() }); + cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() }); cache.addPrefix(comp.dirs.zig_lib); cache.addPrefix(comp.dirs.global_cache); defer cache.manifest_dir.close(io); @@ -304,7 +305,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { .output = .{ .to_list = .{ .arena = .init(gpa) } }, }; defer diagnostics.deinit(); - var aro_comp = aro.Compilation.init(gpa, arena, io, &diagnostics, std.fs.cwd()); + var aro_comp = aro.Compilation.init(gpa, arena, io, &diagnostics, Io.Dir.cwd()); defer aro_comp.deinit(); aro_comp.target = .fromZigTarget(target.*); @@ -343,7 +344,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { } const members = members: { - var aw: std.Io.Writer.Allocating = .init(gpa); + var aw: Io.Writer.Allocating = .init(gpa); errdefer aw.deinit(); try pp.prettyPrintTokens(&aw.writer, .result_only); @@ -376,7 +377,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { errdefer gpa.free(lib_final_path); { - const lib_final_file = try o_dir.createFile(final_lib_basename, .{ .truncate = true }); + const lib_final_file = try o_dir.createFile(io, final_lib_basename, .{ .truncate = true }); defer lib_final_file.close(io); var buffer: [1024]u8 = undefined; var file_writer = lib_final_file.writer(&buffer); @@ -442,7 +443,7 @@ fn findDef( } else { try override_path.print(fmt_path, .{ lib_path, lib_name }); } - if (std.fs.cwd().access(override_path.items, .{})) |_| { + if (Io.Dir.cwd().access(override_path.items, .{})) |_| { return override_path.toOwnedSlice(); } else |err| switch (err) { error.FileNotFound => {}, @@ -459,7 +460,7 @@ fn findDef( } else { try override_path.print(fmt_path, .{lib_name}); } - if (std.fs.cwd().access(override_path.items, .{})) |_| { + if (Io.Dir.cwd().access(override_path.items, .{})) |_| { return override_path.toOwnedSlice(); } else |err| switch (err) { error.FileNotFound => {}, @@ -476,7 +477,7 @@ fn findDef( } else { try override_path.print(fmt_path, .{lib_name}); } - if (std.fs.cwd().access(override_path.items, .{})) |_| { + if (Io.Dir.cwd().access(override_path.items, .{})) |_| { return override_path.toOwnedSlice(); } else |err| switch (err) { error.FileNotFound => {}, diff --git a/src/libs/netbsd.zig b/src/libs/netbsd.zig index 67e6a2f903..cb6a80d69d 100644 --- a/src/libs/netbsd.zig +++ b/src/libs/netbsd.zig @@ -1,9 +1,9 @@ const std = @import("std"); +const Io = std.Io; const Allocator = std.mem.Allocator; const mem = std.mem; const log = std.log; -const fs = std.fs; -const path = fs.path; +const path = std.Io.Dir.path; const assert = std.debug.assert; const Version = std.SemanticVersion; const Path = std.Build.Cache.Path; @@ -387,7 +387,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .io = io, .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}), }; - cache.addPrefix(.{ .path = null, .handle = fs.cwd() }); + cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() }); cache.addPrefix(comp.dirs.zig_lib); cache.addPrefix(comp.dirs.global_cache); defer cache.manifest_dir.close(io); @@ -409,7 +409,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .lock = man.toOwnedLock(), .dir_path = .{ .root_dir = comp.dirs.global_cache, - .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest), + .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest), }, }); } @@ -640,7 +640,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .lock = man.toOwnedLock(), .dir_path = .{ .root_dir = comp.dirs.global_cache, - .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest), + .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest), }, }); } @@ -661,7 +661,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) std.Io.C const so_path: Path = .{ .root_dir = so_files.dir_path.root_dir, .sub_path = std.fmt.allocPrint(comp.arena, "{s}{c}lib{s}.so.{d}", .{ - so_files.dir_path.sub_path, fs.path.sep, lib.name, lib.sover, + so_files.dir_path.sub_path, path.sep, lib.name, lib.sover, }) catch return comp.setAllocFailure(), }; task_buffer[task_buffer_i] = .{ .load_dso = so_path }; diff --git a/src/link/C.zig b/src/link/C.zig index 04c92443e5..a001f8fdd9 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -136,7 +136,7 @@ pub fn createEmpty( assert(!use_lld); assert(!use_llvm); - const file = try emit.root_dir.handle.createFile(emit.sub_path, .{ + const file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{ // Truncation is done on `flush`. .truncate = false, }); @@ -792,7 +792,7 @@ pub fn flushEmitH(zcu: *Zcu) !void { } const directory = emit_h.loc.directory orelse zcu.comp.local_cache_directory; - const file = try directory.handle.createFile(emit_h.loc.basename, .{ + const file = try directory.handle.createFile(io, emit_h.loc.basename, .{ // We set the end position explicitly below; by not truncating the file, we possibly // make it easier on the file system by doing 1 reallocation instead of two. .truncate = false, diff --git a/src/link/Coff.zig b/src/link/Coff.zig index e1d52fb7c4..009e59ed0d 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -631,12 +631,14 @@ fn create( else => return error.UnsupportedCOFFArchitecture, }; + const io = comp.io; + const coff = try arena.create(Coff); - const file = try path.root_dir.handle.createFile(comp.io, path.sub_path, .{ + const file = try path.root_dir.handle.createFile(io, path.sub_path, .{ .read = true, .mode = link.File.determineMode(comp.config.output_mode, comp.config.link_mode), }); - errdefer file.close(comp.io); + errdefer file.close(io); coff.* = .{ .base = .{ .tag = .coff2, diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 584a50c7f2..53812a37ec 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -313,9 +313,11 @@ pub fn createEmpty( const is_obj = output_mode == .Obj; const is_obj_or_ar = is_obj or (output_mode == .Lib and link_mode == .static); + const io = comp.io; + // What path should this ELF linker code output to? const sub_path = emit.sub_path; - self.base.file = try emit.root_dir.handle.createFile(sub_path, .{ + self.base.file = try emit.root_dir.handle.createFile(io, sub_path, .{ .truncate = true, .read = true, .mode = link.File.determineMode(output_mode, link_mode), diff --git a/src/link/Lld.zig b/src/link/Lld.zig index 49f6d3f7c7..b25b9da9d9 100644 --- a/src/link/Lld.zig +++ b/src/link/Lld.zig @@ -1572,7 +1572,7 @@ fn wasmLink(lld: *Lld, arena: Allocator) !void { // report a nice error here with the file path if it fails instead of // just returning the error code. // chmod does not interact with umask, so we use a conservative -rwxr--r-- here. - std.posix.fchmodat(fs.cwd().fd, full_out_path, 0o744, 0) catch |err| switch (err) { + std.posix.fchmodat(Io.Dir.cwd().handle, full_out_path, 0o744, 0) catch |err| switch (err) { error.OperationNotSupported => unreachable, // Not a symlink. else => |e| return e, }; @@ -1624,7 +1624,7 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi const rand_int = std.crypto.random.int(u64); const rsp_path = "tmp" ++ s ++ std.fmt.hex(rand_int) ++ ".rsp"; - const rsp_file = try comp.dirs.local_cache.handle.createFile(rsp_path, .{}); + const rsp_file = try comp.dirs.local_cache.handle.createFile(io, rsp_path, .{}); defer comp.dirs.local_cache.handle.deleteFileZ(rsp_path) catch |err| log.warn("failed to delete response file {s}: {s}", .{ rsp_path, @errorName(err) }); { diff --git a/src/link/MachO.zig b/src/link/MachO.zig index e837cc853a..0f6127e10e 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -219,7 +219,9 @@ pub fn createEmpty( }; errdefer self.base.destroy(); - self.base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{ + const io = comp.io; + + self.base.file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{ .truncate = true, .read = true, .mode = link.File.determineMode(output_mode, link_mode), @@ -1082,7 +1084,7 @@ fn accessLibPath( test_path.clearRetainingCapacity(); try test_path.print("{s}" ++ sep ++ "lib{s}{s}", .{ search_dir, name, ext }); try checked_paths.append(try arena.dupe(u8, test_path.items)); - fs.cwd().access(test_path.items, .{}) catch |err| switch (err) { + Io.Dir.cwd().access(test_path.items, .{}) catch |err| switch (err) { error.FileNotFound => continue, else => |e| return e, }; @@ -1110,7 +1112,7 @@ fn accessFrameworkPath( ext, }); try checked_paths.append(try arena.dupe(u8, test_path.items)); - fs.cwd().access(test_path.items, .{}) catch |err| switch (err) { + Io.Dir.cwd().access(test_path.items, .{}) catch |err| switch (err) { error.FileNotFound => continue, else => |e| return e, }; @@ -1191,7 +1193,7 @@ fn parseDependentDylibs(self: *MachO) !void { try test_path.print("{s}{s}", .{ path, ext }); } try checked_paths.append(try arena.dupe(u8, test_path.items)); - fs.cwd().access(test_path.items, .{}) catch |err| switch (err) { + Io.Dir.cwd().access(test_path.items, .{}) catch |err| switch (err) { error.FileNotFound => continue, else => |e| return e, }; @@ -3289,7 +3291,7 @@ pub fn reopenDebugInfo(self: *MachO) !void { var d_sym_bundle = try self.base.emit.root_dir.handle.makeOpenPath(d_sym_path, .{}); defer d_sym_bundle.close(io); - self.d_sym.?.file = try d_sym_bundle.createFile(fs.path.basename(self.base.emit.sub_path), .{ + self.d_sym.?.file = try d_sym_bundle.createFile(io, fs.path.basename(self.base.emit.sub_path), .{ .truncate = false, .read = true, }); @@ -4370,7 +4372,7 @@ fn inferSdkVersion(comp: *Compilation, sdk_layout: SdkLayout) ?std.SemanticVersi // The file/property is also available with vendored libc. fn readSdkVersionFromSettings(arena: Allocator, dir: []const u8) ![]const u8 { const sdk_path = try fs.path.join(arena, &.{ dir, "SDKSettings.json" }); - const contents = try fs.cwd().readFileAlloc(sdk_path, arena, .limited(std.math.maxInt(u16))); + const contents = try Io.Dir.cwd().readFileAlloc(sdk_path, arena, .limited(std.math.maxInt(u16))); const parsed = try std.json.parseFromSlice(std.json.Value, arena, contents, .{}); if (parsed.value.object.get("MinimalDisplayName")) |ver| return ver.string; return error.SdkVersionFailure; diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig index 5f9a9ecac9..814faf234a 100644 --- a/src/link/MachO/CodeSignature.zig +++ b/src/link/MachO/CodeSignature.zig @@ -247,7 +247,7 @@ pub fn deinit(self: *CodeSignature, allocator: Allocator) void { } pub fn addEntitlements(self: *CodeSignature, allocator: Allocator, path: []const u8) !void { - const inner = try fs.cwd().readFileAlloc(path, allocator, .limited(std.math.maxInt(u32))); + const inner = try Io.Dir.cwd().readFileAlloc(path, allocator, .limited(std.math.maxInt(u32))); self.entitlements = .{ .inner = inner }; } diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 7e28dc0a8b..d13caaa315 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -33,6 +33,7 @@ pub fn createEmpty( options: link.File.OpenOptions, ) !*Linker { const gpa = comp.gpa; + const io = comp.io; const target = &comp.root_mod.resolved_target.result; assert(!comp.config.use_lld); // Caught by Compilation.Config.resolve @@ -78,7 +79,7 @@ pub fn createEmpty( }; errdefer linker.deinit(); - linker.base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{ + linker.base.file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{ .truncate = true, .read = true, }); diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 7ab1e0eb4b..5f89625d56 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -2997,7 +2997,9 @@ pub fn createEmpty( .named => |name| (try wasm.internString(name)).toOptional(), }; - wasm.base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{ + const io = comp.io; + + wasm.base.file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{ .truncate = true, .read = true, .mode = if (fs.has_executable_bit) diff --git a/src/main.zig b/src/main.zig index b040b6c8ef..67b7384b57 100644 --- a/src/main.zig +++ b/src/main.zig @@ -713,7 +713,7 @@ const Emit = union(enum) { } else e: { // If there's a dirname, check that dir exists. This will give a more descriptive error than `Compilation` otherwise would. if (fs.path.dirname(path)) |dir_path| { - var dir = fs.cwd().openDir(dir_path, .{}) catch |err| { + var dir = Io.Dir.cwd().openDir(dir_path, .{}) catch |err| { fatal("unable to open output directory '{s}': {s}", .{ dir_path, @errorName(err) }); }; dir.close(io); @@ -3304,7 +3304,7 @@ fn buildOutputType( } else emit: { // If there's a dirname, check that dir exists. This will give a more descriptive error than `Compilation` otherwise would. if (fs.path.dirname(path)) |dir_path| { - var dir = fs.cwd().openDir(dir_path, .{}) catch |err| { + var dir = Io.Dir.cwd().openDir(dir_path, .{}) catch |err| { fatal("unable to open output directory '{s}': {s}", .{ dir_path, @errorName(err) }); }; dir.close(io); @@ -3389,7 +3389,7 @@ fn buildOutputType( // file will not run and this temp file will be leaked. The filename // will be a hash of its contents — so multiple invocations of // `zig cc -` will result in the same temp file name. - var f = try dirs.local_cache.handle.createFile(dump_path, .{}); + var f = try dirs.local_cache.handle.createFile(io, dump_path, .{}); defer f.close(io); // Re-using the hasher from Cache, since the functional requirements @@ -4773,7 +4773,7 @@ fn cmdInit(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! var ok_count: usize = 0; for (template_paths) |template_path| { - if (templates.write(arena, fs.cwd(), sanitized_root_name, template_path, fingerprint)) |_| { + if (templates.write(arena, Io.Dir.cwd(), sanitized_root_name, template_path, fingerprint)) |_| { std.log.info("created {s}", .{template_path}); ok_count += 1; } else |err| switch (err) { @@ -5227,7 +5227,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) if (system_pkg_dir_path) |p| { job_queue.global_cache = .{ .path = p, - .handle = fs.cwd().openDir(p, .{}) catch |err| { + .handle = Io.Dir.cwd().openDir(p, .{}) catch |err| { fatal("unable to open system package directory '{s}': {s}", .{ p, @errorName(err), }); @@ -5823,7 +5823,7 @@ const ArgIteratorResponseFile = process.ArgIteratorGeneral(.{ .comments = true, /// Initialize the arguments from a Response File. "*.rsp" fn initArgIteratorResponseFile(allocator: Allocator, resp_file_path: []const u8) !ArgIteratorResponseFile { const max_bytes = 10 * 1024 * 1024; // 10 MiB of command line arguments is a reasonable limit - const cmd_line = try fs.cwd().readFileAlloc(resp_file_path, allocator, .limited(max_bytes)); + const cmd_line = try Io.Dir.cwd().readFileAlloc(resp_file_path, allocator, .limited(max_bytes)); errdefer allocator.free(cmd_line); return ArgIteratorResponseFile.initTakeOwnership(allocator, cmd_line); @@ -6187,7 +6187,7 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void { const display_path = zig_source_path orelse "<stdin>"; const source: [:0]const u8 = s: { var f = if (zig_source_path) |p| file: { - break :file fs.cwd().openFile(io, p, .{}) catch |err| { + break :file Io.Dir.cwd().openFile(io, p, .{}) catch |err| { fatal("unable to open file '{s}' for ast-check: {s}", .{ display_path, @errorName(err) }); }; } else Io.File.stdin(); @@ -6494,7 +6494,7 @@ fn cmdDumpZir(arena: Allocator, io: Io, args: []const []const u8) !void { const cache_file = args[0]; - var f = fs.cwd().openFile(io, cache_file, .{}) catch |err| { + var f = Io.Dir.cwd().openFile(io, cache_file, .{}) catch |err| { fatal("unable to open zir cache file for dumping '{s}': {s}", .{ cache_file, @errorName(err) }); }; defer f.close(io); @@ -6541,7 +6541,7 @@ fn cmdChangelist(arena: Allocator, io: Io, args: []const []const u8) !void { const new_source_path = args[1]; const old_source = source: { - var f = fs.cwd().openFile(io, old_source_path, .{}) catch |err| + var f = Io.Dir.cwd().openFile(io, old_source_path, .{}) catch |err| fatal("unable to open old source file '{s}': {s}", .{ old_source_path, @errorName(err) }); defer f.close(io); var file_reader: Io.File.Reader = f.reader(io, &stdin_buffer); @@ -6549,7 +6549,7 @@ fn cmdChangelist(arena: Allocator, io: Io, args: []const []const u8) !void { fatal("unable to read old source file '{s}': {s}", .{ old_source_path, @errorName(err) }); }; const new_source = source: { - var f = fs.cwd().openFile(io, new_source_path, .{}) catch |err| + var f = Io.Dir.cwd().openFile(io, new_source_path, .{}) catch |err| fatal("unable to open new source file '{s}': {s}", .{ new_source_path, @errorName(err) }); defer f.close(io); var file_reader: Io.File.Reader = f.reader(io, &stdin_buffer); @@ -6845,7 +6845,7 @@ fn accessFrameworkPath( framework_dir_path, framework_name, framework_name, ext, }); try checked_paths.print("\n {s}", .{test_path.items}); - fs.cwd().access(test_path.items, .{}) catch |err| switch (err) { + Io.Dir.cwd().access(test_path.items, .{}) catch |err| switch (err) { error.FileNotFound => continue, else => |e| fatal("unable to search for {s} framework '{s}': {s}", .{ ext, test_path.items, @errorName(e), @@ -6957,7 +6957,7 @@ fn cmdFetch( var global_cache_directory: Directory = l: { const p = override_global_cache_dir orelse try introspect.resolveGlobalCacheDir(arena); break :l .{ - .handle = try fs.cwd().makeOpenPath(p, .{}), + .handle = try Io.Dir.cwd().makeOpenPath(p, .{}), .path = p, }; }; @@ -7260,7 +7260,7 @@ fn findBuildRoot(arena: Allocator, options: FindBuildRootOptions) !BuildRoot { if (options.build_file) |bf| { if (fs.path.dirname(bf)) |dirname| { - const dir = fs.cwd().openDir(dirname, .{}) catch |err| { + const dir = Io.Dir.cwd().openDir(dirname, .{}) catch |err| { fatal("unable to open directory to build file from argument 'build-file', '{s}': {s}", .{ dirname, @errorName(err) }); }; return .{ @@ -7272,7 +7272,7 @@ fn findBuildRoot(arena: Allocator, options: FindBuildRootOptions) !BuildRoot { return .{ .build_zig_basename = build_zig_basename, - .directory = .{ .path = null, .handle = fs.cwd() }, + .directory = .{ .path = null, .handle = Io.Dir.cwd() }, .cleanup_build_dir = null, }; } @@ -7280,8 +7280,8 @@ fn findBuildRoot(arena: Allocator, options: FindBuildRootOptions) !BuildRoot { var dirname: []const u8 = cwd_path; while (true) { const joined_path = try fs.path.join(arena, &[_][]const u8{ dirname, build_zig_basename }); - if (fs.cwd().access(joined_path, .{})) |_| { - const dir = fs.cwd().openDir(dirname, .{}) catch |err| { + if (Io.Dir.cwd().access(joined_path, .{})) |_| { + const dir = Io.Dir.cwd().openDir(dirname, .{}) catch |err| { fatal("unable to open directory while searching for build.zig file, '{s}': {s}", .{ dirname, @errorName(err) }); }; return .{ @@ -7443,7 +7443,7 @@ const Templates = struct { } }; fn writeSimpleTemplateFile(io: Io, file_name: []const u8, comptime fmt: []const u8, args: anytype) !void { - const f = try fs.cwd().createFile(file_name, .{ .exclusive = true }); + const f = try Io.Dir.cwd().createFile(io, file_name, .{ .exclusive = true }); defer f.close(io); var buf: [4096]u8 = undefined; var fw = f.writer(&buf); @@ -7591,7 +7591,7 @@ fn addLibDirectoryWarn2( ignore_not_found: bool, ) void { lib_directories.appendAssumeCapacity(.{ - .handle = fs.cwd().openDir(path, .{}) catch |err| { + .handle = Io.Dir.cwd().openDir(path, .{}) catch |err| { if (err == error.FileNotFound and ignore_not_found) return; warn("unable to open library directory '{s}': {s}", .{ path, @errorName(err) }); return; |
