diff options
Diffstat (limited to 'lib/std')
80 files changed, 5052 insertions, 4293 deletions
diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index e68c3e7892..5e8412cfcf 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -123,7 +123,7 @@ pub const HexDigest = [hex_digest_len]u8; /// This is currently just an arbitrary non-empty string that can't match another manifest line. const manifest_header = "0"; -const manifest_file_size_max = 100 * 1024 * 1024; +pub const manifest_file_size_max = 100 * 1024 * 1024; /// The type used for hashing file contents. Currently, this is SipHash128(1, 3), because it /// provides enough collision resistance for the Manifest use cases, while being one of our diff --git a/lib/std/Build/Fuzz.zig b/lib/std/Build/Fuzz.zig index bb4a960a73..2897b29969 100644 --- a/lib/std/Build/Fuzz.zig +++ b/lib/std/Build/Fuzz.zig @@ -124,7 +124,7 @@ pub fn init( .coverage_files = .empty, .coverage_mutex = .init, .queue_mutex = .init, - .queue_cond = .{}, + .queue_cond = .init, .msg_queue = .empty, }; } diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig index 60a8741f0b..c57c7750be 100644 --- a/lib/std/Build/Step/Compile.zig +++ b/lib/std/Build/Step/Compile.zig @@ -188,9 +188,6 @@ force_undefined_symbols: std.StringHashMap(void), /// Overrides the default stack size stack_size: ?u64 = null, -/// Deprecated; prefer using `lto`. -want_lto: ?bool = null, - use_llvm: ?bool, use_lld: ?bool, use_new_linker: ?bool, @@ -540,7 +537,7 @@ pub fn installHeadersDirectory( /// When a module links with this artifact, all headers marked for installation are added to that /// module's include search path. pub fn installConfigHeader(cs: *Compile, config_header: *Step.ConfigHeader) void { - cs.installHeader(config_header.getOutput(), config_header.include_path); + cs.installHeader(config_header.getOutputFile(), config_header.include_path); } /// Forwards all headers marked for installation from `lib` to this artifact. @@ -683,18 +680,6 @@ pub fn producesImplib(compile: *Compile) bool { return compile.isDll(); } -/// Deprecated; use `compile.root_module.link_libc = true` instead. -/// To be removed after 0.15.0 is tagged. -pub fn linkLibC(compile: *Compile) void { - compile.root_module.link_libc = true; -} - -/// Deprecated; use `compile.root_module.link_libcpp = true` instead. -/// To be removed after 0.15.0 is tagged. -pub fn linkLibCpp(compile: *Compile) void { - compile.root_module.link_libcpp = true; -} - const PkgConfigResult = struct { cflags: []const []const u8, libs: []const []const u8, @@ -808,46 +793,6 @@ fn runPkgConfig(compile: *Compile, lib_name: []const u8) !PkgConfigResult { }; } -/// Deprecated; use `compile.root_module.linkSystemLibrary(name, .{})` instead. -/// To be removed after 0.15.0 is tagged. -pub fn linkSystemLibrary(compile: *Compile, name: []const u8) void { - return compile.root_module.linkSystemLibrary(name, .{}); -} - -/// Deprecated; use `compile.root_module.linkSystemLibrary(name, options)` instead. -/// To be removed after 0.15.0 is tagged. -pub fn linkSystemLibrary2( - compile: *Compile, - name: []const u8, - options: Module.LinkSystemLibraryOptions, -) void { - return compile.root_module.linkSystemLibrary(name, options); -} - -/// Deprecated; use `c.root_module.linkFramework(name, .{})` instead. -/// To be removed after 0.15.0 is tagged. -pub fn linkFramework(c: *Compile, name: []const u8) void { - c.root_module.linkFramework(name, .{}); -} - -/// Deprecated; use `compile.root_module.addCSourceFiles(options)` instead. -/// To be removed after 0.15.0 is tagged. -pub fn addCSourceFiles(compile: *Compile, options: Module.AddCSourceFilesOptions) void { - compile.root_module.addCSourceFiles(options); -} - -/// Deprecated; use `compile.root_module.addCSourceFile(source)` instead. -/// To be removed after 0.15.0 is tagged. -pub fn addCSourceFile(compile: *Compile, source: Module.CSourceFile) void { - compile.root_module.addCSourceFile(source); -} - -/// Deprecated; use `compile.root_module.addWin32ResourceFile(source)` instead. -/// To be removed after 0.15.0 is tagged. -pub fn addWin32ResourceFile(compile: *Compile, source: Module.RcSourceFile) void { - compile.root_module.addWin32ResourceFile(source); -} - pub fn setVerboseLink(compile: *Compile, value: bool) void { compile.verbose_link = value; } @@ -929,84 +874,6 @@ pub fn getEmittedLlvmBc(compile: *Compile) LazyPath { return compile.getEmittedFileGeneric(&compile.generated_llvm_bc); } -/// Deprecated; use `compile.root_module.addAssemblyFile(source)` instead. -/// To be removed after 0.15.0 is tagged. -pub fn addAssemblyFile(compile: *Compile, source: LazyPath) void { - compile.root_module.addAssemblyFile(source); -} - -/// Deprecated; use `compile.root_module.addObjectFile(source)` instead. -/// To be removed after 0.15.0 is tagged. -pub fn addObjectFile(compile: *Compile, source: LazyPath) void { - compile.root_module.addObjectFile(source); -} - -/// Deprecated; use `compile.root_module.addObject(object)` instead. -/// To be removed after 0.15.0 is tagged. -pub fn addObject(compile: *Compile, object: *Compile) void { - compile.root_module.addObject(object); -} - -/// Deprecated; use `compile.root_module.linkLibrary(library)` instead. -/// To be removed after 0.15.0 is tagged. -pub fn linkLibrary(compile: *Compile, library: *Compile) void { - compile.root_module.linkLibrary(library); -} - -/// Deprecated; use `compile.root_module.addAfterIncludePath(lazy_path)` instead. -/// To be removed after 0.15.0 is tagged. -pub fn addAfterIncludePath(compile: *Compile, lazy_path: LazyPath) void { - compile.root_module.addAfterIncludePath(lazy_path); -} - -/// Deprecated; use `compile.root_module.addSystemIncludePath(lazy_path)` instead. -/// To be removed after 0.15.0 is tagged. -pub fn addSystemIncludePath(compile: *Compile, lazy_path: LazyPath) void { - compile.root_module.addSystemIncludePath(lazy_path); -} - -/// Deprecated; use `compile.root_module.addIncludePath(lazy_path)` instead. -/// To be removed after 0.15.0 is tagged. -pub fn addIncludePath(compile: *Compile, lazy_path: LazyPath) void { - compile.root_module.addIncludePath(lazy_path); -} - -/// Deprecated; use `compile.root_module.addConfigHeader(config_header)` instead. -/// To be removed after 0.15.0 is tagged. -pub fn addConfigHeader(compile: *Compile, config_header: *Step.ConfigHeader) void { - compile.root_module.addConfigHeader(config_header); -} - -/// Deprecated; use `compile.root_module.addEmbedPath(lazy_path)` instead. -/// To be removed after 0.15.0 is tagged. -pub fn addEmbedPath(compile: *Compile, lazy_path: LazyPath) void { - compile.root_module.addEmbedPath(lazy_path); -} - -/// Deprecated; use `compile.root_module.addLibraryPath(directory_path)` instead. -/// To be removed after 0.15.0 is tagged. -pub fn addLibraryPath(compile: *Compile, directory_path: LazyPath) void { - compile.root_module.addLibraryPath(directory_path); -} - -/// Deprecated; use `compile.root_module.addRPath(directory_path)` instead. -/// To be removed after 0.15.0 is tagged. -pub fn addRPath(compile: *Compile, directory_path: LazyPath) void { - compile.root_module.addRPath(directory_path); -} - -/// Deprecated; use `compile.root_module.addSystemFrameworkPath(directory_path)` instead. -/// To be removed after 0.15.0 is tagged. -pub fn addSystemFrameworkPath(compile: *Compile, directory_path: LazyPath) void { - compile.root_module.addSystemFrameworkPath(directory_path); -} - -/// Deprecated; use `compile.root_module.addFrameworkPath(directory_path)` instead. -/// To be removed after 0.15.0 is tagged. -pub fn addFrameworkPath(compile: *Compile, directory_path: LazyPath) void { - compile.root_module.addFrameworkPath(directory_path); -} - pub fn setExecCmd(compile: *Compile, args: []const ?[]const u8) void { const b = compile.step.owner; assert(compile.kind == .@"test"); @@ -1763,7 +1630,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 { .thin => "-flto=thin", .none => "-fno-lto", }); - } else try addFlag(&zig_args, "lto", compile.want_lto); + } try addFlag(&zig_args, "sanitize-coverage-trace-pc-guard", compile.sanitize_coverage_trace_pc_guard); diff --git a/lib/std/Build/Step/ConfigHeader.zig b/lib/std/Build/Step/ConfigHeader.zig index 0c42449697..df2419764d 100644 --- a/lib/std/Build/Step/ConfigHeader.zig +++ b/lib/std/Build/Step/ConfigHeader.zig @@ -124,9 +124,6 @@ pub fn getOutputFile(ch: *ConfigHeader) std.Build.LazyPath { return ch.getOutputDir().path(ch.step.owner, ch.include_path); } -/// Deprecated; use `getOutputFile`. -pub const getOutput = getOutputFile; - fn addValueInner(config_header: *ConfigHeader, name: []const u8, comptime T: type, value: T) !void { switch (@typeInfo(T)) { .null => { diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index 1cc798bf30..28c09e1faf 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -88,9 +88,6 @@ skip_foreign_checks: bool, /// external executor (such as qemu) but not fail if the executor is unavailable. failing_to_execute_foreign_is_an_error: bool, -/// Deprecated in favor of `stdio_limit`. -max_stdio_size: usize, - /// If stderr or stdout exceeds this amount, the child process is killed and /// the step fails. stdio_limit: std.Io.Limit, @@ -223,7 +220,6 @@ pub fn create(owner: *std.Build, name: []const u8) *Run { .rename_step_with_output_arg = true, .skip_foreign_checks = false, .failing_to_execute_foreign_is_an_error = true, - .max_stdio_size = 10 * 1024 * 1024, .stdio_limit = .unlimited, .captured_stdout = null, .captured_stderr = null, @@ -2217,7 +2213,6 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !EvalGenericResult { var stdout_bytes: ?[]const u8 = null; var stderr_bytes: ?[]const u8 = null; - run.stdio_limit = run.stdio_limit.min(.limited(run.max_stdio_size)); if (child.stdout) |stdout| { if (child.stderr) |stderr| { var poller = std.Io.poll(arena, enum { stdout, stderr }, .{ diff --git a/lib/std/Build/Watch.zig b/lib/std/Build/Watch.zig index 3213a9a24d..ca01376e02 100644 --- a/lib/std/Build/Watch.zig +++ b/lib/std/Build/Watch.zig @@ -366,7 +366,7 @@ const Os = switch (builtin.os.tag) { var attr = windows.OBJECT_ATTRIBUTES{ .Length = @sizeOf(windows.OBJECT_ATTRIBUTES), .RootDirectory = if (std.fs.path.isAbsoluteWindowsW(sub_path_w.span())) null else root_fd, - .Attributes = 0, // Note we do not use OBJ_CASE_INSENSITIVE here. + .Attributes = .{}, .ObjectName = &nt_name, .SecurityDescriptor = null, .SecurityQualityOfService = null, @@ -375,14 +375,23 @@ const Os = switch (builtin.os.tag) { switch (windows.ntdll.NtCreateFile( &dir_handle, - windows.SYNCHRONIZE | windows.GENERIC_READ | windows.FILE_LIST_DIRECTORY, + .{ + .SPECIFIC = .{ .FILE_DIRECTORY = .{ + .LIST = true, + } }, + .STANDARD = .{ .SYNCHRONIZE = true }, + .GENERIC = .{ .READ = true }, + }, &attr, &io, null, - 0, - windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE | windows.FILE_SHARE_DELETE, - windows.FILE_OPEN, - windows.FILE_DIRECTORY_FILE | windows.FILE_OPEN_FOR_BACKUP_INTENT, + .{}, + .VALID_FLAGS, + .OPEN, + .{ + .DIRECTORY_FILE = true, + .OPEN_FOR_BACKUP_INTENT = true, + }, null, 0, )) { @@ -437,13 +446,13 @@ const Os = switch (builtin.os.tag) { fn getFileId(handle: windows.HANDLE) !FileId { var file_id: FileId = undefined; var io_status: windows.IO_STATUS_BLOCK = undefined; - var volume_info: windows.FILE_FS_VOLUME_INFORMATION = undefined; + var volume_info: windows.FILE.FS_VOLUME_INFORMATION = undefined; switch (windows.ntdll.NtQueryVolumeInformationFile( handle, &io_status, &volume_info, - @sizeOf(windows.FILE_FS_VOLUME_INFORMATION), - .FileFsVolumeInformation, + @sizeOf(windows.FILE.FS_VOLUME_INFORMATION), + .Volume, )) { .SUCCESS => {}, // Buffer overflow here indicates that there is more information available than was able to be stored in the buffer @@ -453,13 +462,13 @@ const Os = switch (builtin.os.tag) { else => |rc| return windows.unexpectedStatus(rc), } file_id.volumeSerialNumber = volume_info.VolumeSerialNumber; - var internal_info: windows.FILE_INTERNAL_INFORMATION = undefined; + var internal_info: windows.FILE.INTERNAL_INFORMATION = undefined; switch (windows.ntdll.NtQueryInformationFile( handle, &io_status, &internal_info, - @sizeOf(windows.FILE_INTERNAL_INFORMATION), - .FileInternalInformation, + @sizeOf(windows.FILE.INTERNAL_INFORMATION), + .Internal, )) { .SUCCESS => {}, else => |rc| return windows.unexpectedStatus(rc), diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig index 4d649e6f9b..2c865a8889 100644 --- a/lib/std/Build/WebServer.zig +++ b/lib/std/Build/WebServer.zig @@ -122,8 +122,8 @@ pub fn init(opts: Options) WebServer { .update_id = .init(0), .runner_request_mutex = .init, - .runner_request_ready_cond = .{}, - .runner_request_empty_cond = .{}, + .runner_request_ready_cond = .init, + .runner_request_empty_cond = .init, .runner_request = null, }; } diff --git a/lib/std/Io.zig b/lib/std/Io.zig index ffd7837baf..367871787c 100644 --- a/lib/std/Io.zig +++ b/lib/std/Io.zig @@ -650,17 +650,17 @@ pub const VTable = struct { groupWait: *const fn (?*anyopaque, *Group, token: *anyopaque) void, groupCancel: *const fn (?*anyopaque, *Group, token: *anyopaque) void, + recancel: *const fn (?*anyopaque) void, + swapCancelProtection: *const fn (?*anyopaque, new: CancelProtection) CancelProtection, + checkCancel: *const fn (?*anyopaque) Cancelable!void, + /// Blocks until one of the futures from the list has a result ready, such /// that awaiting it will not block. Returns that index. select: *const fn (?*anyopaque, futures: []const *AnyFuture) Cancelable!usize, - mutexLock: *const fn (?*anyopaque, prev_state: Mutex.State, mutex: *Mutex) Cancelable!void, - mutexLockUncancelable: *const fn (?*anyopaque, prev_state: Mutex.State, mutex: *Mutex) void, - mutexUnlock: *const fn (?*anyopaque, prev_state: Mutex.State, mutex: *Mutex) void, - - conditionWait: *const fn (?*anyopaque, cond: *Condition, mutex: *Mutex) Cancelable!void, - conditionWaitUncancelable: *const fn (?*anyopaque, cond: *Condition, mutex: *Mutex) void, - conditionWake: *const fn (?*anyopaque, cond: *Condition, wake: Condition.Wake) void, + futexWait: *const fn (?*anyopaque, ptr: *const u32, expected: u32, Timeout) Cancelable!void, + futexWaitUncancelable: *const fn (?*anyopaque, ptr: *const u32, expected: u32) void, + futexWake: *const fn (?*anyopaque, ptr: *const u32, max_waiters: u32) void, dirMake: *const fn (?*anyopaque, Dir, sub_path: []const u8, Dir.Mode) Dir.MakeError!void, dirMakePath: *const fn (?*anyopaque, Dir, sub_path: []const u8, Dir.Mode) Dir.MakeError!void, @@ -701,7 +701,7 @@ pub const VTable = struct { netClose: *const fn (?*anyopaque, handle: net.Socket.Handle) void, netInterfaceNameResolve: *const fn (?*anyopaque, *const net.Interface.Name) net.Interface.Name.ResolveError!net.Interface, netInterfaceName: *const fn (?*anyopaque, net.Interface) net.Interface.NameError!net.Interface.Name, - netLookup: *const fn (?*anyopaque, net.HostName, *Queue(net.HostName.LookupResult), net.HostName.LookupOptions) void, + netLookup: *const fn (?*anyopaque, net.HostName, *Queue(net.HostName.LookupResult), net.HostName.LookupOptions) net.HostName.LookupError!void, }; pub const Cancelable = error{ @@ -986,7 +986,14 @@ pub fn Future(Result: type) type { any_future: ?*AnyFuture, result: Result, - /// Equivalent to `await` but places a cancellation request. + /// Equivalent to `await` but places a cancellation request. This causes the task to receive + /// `error.Canceled` from its next "cancelation point" (if any). A cancelation point is a + /// call to a function in `Io` which can return `error.Canceled`. + /// + /// After cancelation of a task is requested, only the next cancelation point in that task + /// will return `error.Canceled`: future points will not re-signal the cancelation. As such, + /// it is usually a bug to ignore `error.Canceled`. However, to defer handling cancelation + /// requests, see also `recancel` and `CancelProtection`. /// /// Idempotent. Not threadsafe. pub fn cancel(f: *@This(), io: Io) Result { @@ -1083,6 +1090,8 @@ pub const Group = struct { /// Equivalent to `wait` but immediately requests cancellation on all /// members of the group. /// + /// For a description of cancelation and cancelation points, see `Future.cancel`. + /// /// Idempotent. Not threadsafe. pub fn cancel(g: *Group, io: Io) void { const token = g.token orelse return; @@ -1091,6 +1100,61 @@ pub const Group = struct { } }; +/// Asserts that `error.Canceled` was returned from a prior cancelation point, and "re-arms" the +/// cancelation request, so that `error.Canceled` will be returned again from the next cancelation +/// point. +/// +/// For a description of cancelation and cancelation points, see `Future.cancel`. +pub fn recancel(io: Io) void { + io.vtable.recancel(io.userdata); +} + +/// In rare cases, it is desirable to completely block cancelation notification, so that a region +/// of code can run uninterrupted before `error.Canceled` is potentially observed. Therefore, every +/// task has a "cancel protection" state which indicates whether or not `Io` functions can introduce +/// cancelation points. +/// +/// To modify a task's cancel protection state, see `swapCancelProtection`. +/// +/// For a description of cancelation and cancelation points, see `Future.cancel`. +pub const CancelProtection = enum { + /// Any call to an `Io` function with `error.Canceled` in its error set is a cancelation point. + /// + /// This is the default state, which all tasks are created in. + unblocked, + /// No `Io` function introduces a cancelation point (`error.Canceled` will never be returned). + blocked, +}; +/// Updates the current task's cancel protection state (see `CancelProtection`). +/// +/// The typical usage for this function is to protect a block of code from cancelation: +/// ``` +/// const old_cancel_protect = io.swapCancelProtection(.blocked); +/// defer _ = io.swapCancelProtection(old_cancel_protect); +/// doSomeWork() catch |err| switch (err) { +/// error.Canceled => unreachable, +/// }; +/// ``` +/// +/// For a description of cancelation and cancelation points, see `Future.cancel`. +pub fn swapCancelProtection(io: Io, new: CancelProtection) CancelProtection { + return io.vtable.swapCancelProtection(io.userdata, new); +} + +/// This function acts as a pure cancelation point (subject to protection; see `CancelProtection`) +/// and does nothing else. In other words, it returns `error.Canceled` if there is an outstanding +/// non-blocked cancelation request, but otherwise is a no-op. +/// +/// It is rarely necessary to call this function. The primary use case is in long-running CPU-bound +/// tasks which may need to respond to cancelation before completing. Short tasks, or those which +/// perform other `Io` operations (and hence have other cancelation points), will typically already +/// respond quickly to cancelation requests. +/// +/// For a description of cancelation and cancelation points, see `Future.cancel`. +pub fn checkCancel(io: Io) Cancelable!void { + return io.vtable.checkCancel(io.userdata); +} + pub fn Select(comptime U: type) type { return struct { io: Io, @@ -1144,7 +1208,9 @@ pub fn Select(comptime U: type) type { const args_casted: *const Args = @ptrCast(@alignCast(context)); const unerased_select: *S = @fieldParentPtr("group", group); const elem = @unionInit(U, @tagName(field), @call(.auto, function, args_casted.*)); - unerased_select.queue.putOneUncancelable(unerased_select.io, elem); + unerased_select.queue.putOneUncancelable(unerased_select.io, elem) catch |err| switch (err) { + error.Closed => unreachable, + }; } }; _ = @atomicRmw(usize, &s.outstanding, .Add, 1, .monotonic); @@ -1158,12 +1224,17 @@ pub fn Select(comptime U: type) type { /// Not threadsafe. pub fn wait(s: *S) Cancelable!U { s.outstanding -= 1; - return s.queue.getOne(s.io); + return s.queue.getOne(s.io) catch |err| switch (err) { + error.Canceled => |e| return e, + error.Closed => unreachable, + }; } /// Equivalent to `wait` but requests cancellation on all remaining /// tasks owned by the select. /// + /// For a description of cancelation and cancelation points, see `Future.cancel`. + /// /// It is illegal to call `wait` after this. /// /// Idempotent. Not threadsafe. @@ -1174,104 +1245,340 @@ pub fn Select(comptime U: type) type { }; } +/// Atomically checks if the value at `ptr` equals `expected`, and if so, blocks until either: +/// +/// * a matching (same `ptr` argument) `futexWake` call occurs, or +/// * a spurious ("random") wakeup occurs. +/// +/// Typically, `futexWake` should be called immediately after updating the value at `ptr.*`, to +/// unblock tasks using `futexWait` to wait for the value to change from what it previously was. +/// +/// The caller is responsible for identifying spurious wakeups if necessary, typically by checking +/// the value at `ptr.*`. +/// +/// Asserts that `T` is 4 bytes in length and has a well-defined layout with no padding bits. +pub fn futexWait(io: Io, comptime T: type, ptr: *align(@alignOf(u32)) const T, expected: T) Cancelable!void { + return futexWaitTimeout(io, T, ptr, expected, .none); +} +/// Same as `futexWait`, except also unblocks if `timeout` expires. As with `futexWait`, spurious +/// wakeups are possible. It remains the caller's responsibility to differentiate between these +/// three possible wake-up reasons if necessary. +pub fn futexWaitTimeout(io: Io, comptime T: type, ptr: *align(@alignOf(u32)) const T, expected: T, timeout: Timeout) Cancelable!void { + comptime assert(@sizeOf(T) == 4); + const expected_raw: *align(1) const u32 = @ptrCast(&expected); + return io.vtable.futexWait(io.userdata, @ptrCast(ptr), expected_raw.*, timeout); +} +/// Same as `futexWait`, except does not introduce a cancelation point. +/// +/// For a description of cancelation and cancelation points, see `Future.cancel`. +pub fn futexWaitUncancelable(io: Io, comptime T: type, ptr: *align(@alignOf(u32)) const T, expected: T) void { + comptime assert(@sizeOf(T) == @sizeOf(u32)); + const expected_raw: *align(1) const u32 = @ptrCast(&expected); + io.vtable.futexWaitUncancelable(io.userdata, @ptrCast(ptr), expected_raw.*); +} +/// Unblocks pending futex waits on `ptr`, up to a limit of `max_waiters` calls. +pub fn futexWake(io: Io, comptime T: type, ptr: *align(@alignOf(u32)) const T, max_waiters: u32) void { + comptime assert(@sizeOf(T) == @sizeOf(u32)); + if (max_waiters == 0) return; + return io.vtable.futexWake(io.userdata, @ptrCast(ptr), max_waiters); +} + pub const Mutex = struct { - state: State, + state: std.atomic.Value(State), - pub const State = enum(usize) { - locked_once = 0b00, - unlocked = 0b01, - contended = 0b10, - /// contended - _, + pub const init: Mutex = .{ .state = .init(.unlocked) }; - pub fn isUnlocked(state: State) bool { - return @intFromEnum(state) & @intFromEnum(State.unlocked) == @intFromEnum(State.unlocked); - } + const State = enum(u32) { + unlocked, + locked_once, + contended, }; - pub const init: Mutex = .{ .state = .unlocked }; - - pub fn tryLock(mutex: *Mutex) bool { - const prev_state: State = @enumFromInt(@atomicRmw( - usize, - @as(*usize, @ptrCast(&mutex.state)), - .And, - ~@intFromEnum(State.unlocked), + pub fn tryLock(m: *Mutex) bool { + switch (m.state.cmpxchgWeak( + .unlocked, + .locked_once, .acquire, - )); - return prev_state.isUnlocked(); + .monotonic, + ) orelse return true) { + .unlocked => unreachable, + .locked_once, .contended => return false, + } } - pub fn lock(mutex: *Mutex, io: std.Io) Cancelable!void { - const prev_state: State = @enumFromInt(@atomicRmw( - usize, - @as(*usize, @ptrCast(&mutex.state)), - .And, - ~@intFromEnum(State.unlocked), + pub fn lock(m: *Mutex, io: Io) Cancelable!void { + const initial_state = m.state.cmpxchgWeak( + .unlocked, + .locked_once, .acquire, - )); - if (prev_state.isUnlocked()) { + .monotonic, + ) orelse { @branchHint(.likely); return; + }; + if (initial_state == .contended) { + try io.futexWait(State, &m.state.raw, .contended); + } + while (m.state.swap(.contended, .acquire) != .unlocked) { + try io.futexWait(State, &m.state.raw, .contended); } - return io.vtable.mutexLock(io.userdata, prev_state, mutex); } - /// Same as `lock` but cannot be canceled. - pub fn lockUncancelable(mutex: *Mutex, io: std.Io) void { - const prev_state: State = @enumFromInt(@atomicRmw( - usize, - @as(*usize, @ptrCast(&mutex.state)), - .And, - ~@intFromEnum(State.unlocked), + /// Same as `lock`, except does not introduce a cancelation point. + /// + /// For a description of cancelation and cancelation points, see `Future.cancel`. + pub fn lockUncancelable(m: *Mutex, io: Io) void { + const initial_state = m.state.cmpxchgWeak( + .unlocked, + .locked_once, .acquire, - )); - if (prev_state.isUnlocked()) { + .monotonic, + ) orelse { @branchHint(.likely); return; + }; + if (initial_state == .contended) { + io.futexWaitUncancelable(State, &m.state.raw, .contended); + } + while (m.state.swap(.contended, .acquire) != .unlocked) { + io.futexWaitUncancelable(State, &m.state.raw, .contended); } - return io.vtable.mutexLockUncancelable(io.userdata, prev_state, mutex); } - pub fn unlock(mutex: *Mutex, io: std.Io) void { - const prev_state = @cmpxchgWeak(State, &mutex.state, .locked_once, .unlocked, .release, .acquire) orelse { - @branchHint(.likely); - return; - }; - assert(prev_state != .unlocked); // mutex not locked - return io.vtable.mutexUnlock(io.userdata, prev_state, mutex); + pub fn unlock(m: *Mutex, io: Io) void { + switch (m.state.swap(.unlocked, .release)) { + .unlocked => unreachable, + .locked_once => {}, + .contended => { + @branchHint(.unlikely); + io.futexWake(State, &m.state.raw, 1); + }, + } } }; pub const Condition = struct { - state: u64 = 0, + state: std.atomic.Value(State), + /// Incremented whenever the condition is signaled + epoch: std.atomic.Value(u32), + + const State = packed struct(u32) { + waiters: u16, + signals: u16, + }; + + pub const init: Condition = .{ + .state = .init(.{ .waiters = 0, .signals = 0 }), + .epoch = .init(0), + }; pub fn wait(cond: *Condition, io: Io, mutex: *Mutex) Cancelable!void { - return io.vtable.conditionWait(io.userdata, cond, mutex); + try waitInner(cond, io, mutex, false); } + /// Same as `wait`, except does not introduce a cancelation point. + /// + /// For a description of cancelation and cancelation points, see `Future.cancel`. pub fn waitUncancelable(cond: *Condition, io: Io, mutex: *Mutex) void { - return io.vtable.conditionWaitUncancelable(io.userdata, cond, mutex); + waitInner(cond, io, mutex, true) catch |err| switch (err) { + error.Canceled => unreachable, + }; + } + + fn waitInner(cond: *Condition, io: Io, mutex: *Mutex, uncancelable: bool) Cancelable!void { + var epoch = cond.epoch.load(.acquire); // `.acquire` to ensure ordered before state load + + { + const prev_state = cond.state.fetchAdd(.{ .waiters = 1, .signals = 0 }, .monotonic); + assert(prev_state.waiters < math.maxInt(u16)); // overflow caused by too many waiters + } + + mutex.unlock(io); + defer mutex.lockUncancelable(io); + + while (true) { + const result = if (uncancelable) + io.futexWaitUncancelable(u32, &cond.epoch.raw, epoch) + else + io.futexWait(u32, &cond.epoch.raw, epoch); + + epoch = cond.epoch.load(.acquire); // `.acquire` to ensure ordered before `state` laod + + // Even on error, try to consume a pending signal first. Otherwise a race might + // cause a signal to get stuck in the state with no corresponding waiter. + { + var prev_state = cond.state.load(.monotonic); + while (prev_state.signals > 0) { + prev_state = cond.state.cmpxchgWeak(prev_state, .{ + .waiters = prev_state.waiters - 1, + .signals = prev_state.signals - 1, + }, .acquire, .monotonic) orelse { + // We successfully consumed a signal. + return; + }; + } + } + + // There are no more signals available; this was a spurious wakeup or an error. If it + // was an error, we will remove ourselves as a waiter and return that error. Otherwise, + // we'll loop back to the futex wait. + result catch |err| { + const prev_state = cond.state.fetchSub(.{ .waiters = 1, .signals = 0 }, .monotonic); + assert(prev_state.waiters > 0); // underflow caused by illegal state + return err; + }; + } } pub fn signal(cond: *Condition, io: Io) void { - io.vtable.conditionWake(io.userdata, cond, .one); + var prev_state = cond.state.load(.monotonic); + while (prev_state.waiters > prev_state.signals) { + @branchHint(.unlikely); + prev_state = cond.state.cmpxchgWeak(prev_state, .{ + .waiters = prev_state.waiters, + .signals = prev_state.signals + 1, + }, .release, .monotonic) orelse { + // Update the epoch to tell the waiting threads that there are new signals for them. + // Note that a waiting thread could miss a take if *exactly* (1<<32)-1 wakes happen + // between it observing the epoch and sleeping on it, but this is extraordinarily + // unlikely due to the precise number of calls required. + _ = cond.epoch.fetchAdd(1, .release); // `.release` to ensure ordered after `state` update + io.futexWake(u32, &cond.epoch.raw, 1); + return; + }; + } } pub fn broadcast(cond: *Condition, io: Io) void { - io.vtable.conditionWake(io.userdata, cond, .all); + var prev_state = cond.state.load(.monotonic); + while (prev_state.waiters > prev_state.signals) { + @branchHint(.unlikely); + prev_state = cond.state.cmpxchgWeak(prev_state, .{ + .waiters = prev_state.waiters, + .signals = prev_state.waiters, + }, .release, .monotonic) orelse { + // Update the epoch to tell the waiting threads that there are new signals for them. + // Note that a waiting thread could miss a take if *exactly* (1<<32)-1 wakes happen + // between it observing the epoch and sleeping on it, but this is extraordinarily + // unlikely due to the precise number of calls required. + _ = cond.epoch.fetchAdd(1, .release); // `.release` to ensure ordered after `state` update + io.futexWake(u32, &cond.epoch.raw, prev_state.waiters - prev_state.signals); + return; + }; + } } +}; - pub const Wake = enum { - /// Wake up only one thread. - one, - /// Wake up all threads. - all, - }; +/// Logical boolean flag which can be set and unset and supports a "wait until set" operation. +pub const Event = enum(u32) { + unset, + waiting, + is_set, + + /// Returns whether the logical boolean is `true`. + pub fn isSet(event: *const Event) bool { + return switch (@atomicLoad(Event, event, .acquire)) { + .unset, .waiting => false, + .is_set => true, + }; + } + + /// Blocks until the logical boolean is `true`. + pub fn wait(event: *Event, io: Io) Io.Cancelable!void { + if (@cmpxchgStrong(Event, event, .unset, .waiting, .acquire, .acquire)) |prev| switch (prev) { + .unset => unreachable, + .waiting => {}, + .is_set => return, + }; + errdefer { + // Ideally we would restore the event back to `.unset` instead of `.waiting`, but there + // might be other threads waiting on the event. In theory we could track the *number* of + // waiting threads in the unused bits of the `Event`, but that has its own problem: the + // waiters would wake up when a *new waiter* was added. So it's easiest to just leave + // the state at `.waiting`---at worst it causes one redundant call to `futexWake`. + } + while (true) { + try io.futexWait(Event, event, .waiting); + switch (@atomicLoad(Event, event, .acquire)) { + .unset => unreachable, // `reset` called before pending `wait` returned + .waiting => continue, + .is_set => return, + } + } + } + + /// Same as `wait`, except does not introduce a cancelation point. + /// + /// For a description of cancelation and cancelation points, see `Future.cancel`. + pub fn waitUncancelable(event: *Event, io: Io) void { + if (@cmpxchgStrong(Event, event, .unset, .waiting, .acquire, .acquire)) |prev| switch (prev) { + .unset => unreachable, + .waiting => {}, + .is_set => return, + }; + while (true) { + io.futexWaitUncancelable(Event, event, .waiting); + switch (@atomicLoad(Event, event, .acquire)) { + .unset => unreachable, // `reset` called before pending `wait` returned + .waiting => continue, + .is_set => return, + } + } + } + + /// Blocks the calling thread until either the logical boolean is set, the timeout expires, or a + /// spurious wakeup occurs. If the timeout expires or a spurious wakeup occurs, `error.Timeout` + /// is returned. + pub fn waitTimeout(event: *Event, io: Io, timeout: Timeout) (error{Timeout} || Cancelable)!void { + if (@cmpxchgStrong(Event, event, .unset, .waiting, .acquire, .acquire)) |prev| switch (prev) { + .unset => unreachable, + .waiting => assert(!builtin.single_threaded), // invalid state + .is_set => return, + }; + errdefer { + // Ideally we would restore the event back to `.unset` instead of `.waiting`, but there + // might be other threads waiting on the event. In theory we could track the *number* of + // waiting threads in the unused bits of the `Event`, but that has its own problem: the + // waiters would wake up when a *new waiter* was added. So it's easiest to just leave + // the state at `.waiting`---at worst it causes one redundant call to `futexWake`. + } + io.futexWaitTimeout(Event, event, .waiting, timeout); + switch (@atomicLoad(Event, event, .acquire)) { + .unset => unreachable, // `reset` called before pending `wait` returned + .waiting => return error.Timeout, + .is_set => return, + } + } + + /// Sets the logical boolean to true, and hence unblocks any pending calls to `wait`. The + /// logical boolean remains true until `reset` is called, so future calls to `set` have no + /// semantic effect. + /// + /// Any memory accesses prior to a `set` call are "released", so that if this `set` call causes + /// `isSet` to return `true` or a wait to finish, those tasks will be able to observe those + /// memory accesses. + pub fn set(e: *Event, io: Io) void { + switch (@atomicRmw(Event, e, .Xchg, .is_set, .release)) { + .unset, .is_set => {}, + .waiting => io.futexWake(Event, e, std.math.maxInt(u32)), + } + } + + /// Sets the logical boolean to false. + /// + /// Assumes that there is no pending call to `wait` or `waitUncancelable`. + /// + /// However, concurrent calls to `isSet`, `set`, and `reset` are allowed. + pub fn reset(e: *Event) void { + @atomicStore(Event, e, .unset, .monotonic); + } }; +pub const QueueClosedError = error{Closed}; + pub const TypeErasedQueue = struct { mutex: Mutex, + closed: bool, /// Ring buffer. This data is logically *after* queued getters. buffer: []u8, @@ -1283,12 +1590,14 @@ pub const TypeErasedQueue = struct { const Put = struct { remaining: []const u8, + needed: usize, condition: Condition, node: std.DoublyLinkedList.Node, }; const Get = struct { remaining: []u8, + needed: usize, condition: Condition, node: std.DoublyLinkedList.Node, }; @@ -1296,6 +1605,7 @@ pub const TypeErasedQueue = struct { pub fn init(buffer: []u8) TypeErasedQueue { return .{ .mutex = .init, + .closed = false, .buffer = buffer, .start = 0, .len = 0, @@ -1304,7 +1614,27 @@ pub const TypeErasedQueue = struct { }; } - pub fn put(q: *TypeErasedQueue, io: Io, elements: []const u8, min: usize) Cancelable!usize { + pub fn close(q: *TypeErasedQueue, io: Io) void { + q.mutex.lockUncancelable(io); + defer q.mutex.unlock(io); + q.closed = true; + { + var it = q.getters.first; + while (it) |node| : (it = node.next) { + const getter: *Get = @alignCast(@fieldParentPtr("node", node)); + getter.condition.signal(io); + } + } + { + var it = q.putters.first; + while (it) |node| : (it = node.next) { + const putter: *Put = @alignCast(@fieldParentPtr("node", node)); + putter.condition.signal(io); + } + } + } + + pub fn put(q: *TypeErasedQueue, io: Io, elements: []const u8, min: usize) (QueueClosedError || Cancelable)!usize { assert(elements.len >= min); if (elements.len == 0) return 0; try q.mutex.lock(io); @@ -1312,14 +1642,17 @@ pub const TypeErasedQueue = struct { return q.putLocked(io, elements, min, false); } - /// Same as `put` but cannot be canceled. - pub fn putUncancelable(q: *TypeErasedQueue, io: Io, elements: []const u8, min: usize) usize { + /// Same as `put`, except does not introduce a cancelation point. + /// + /// For a description of cancelation and cancelation points, see `Future.cancel`. + pub fn putUncancelable(q: *TypeErasedQueue, io: Io, elements: []const u8, min: usize) QueueClosedError!usize { assert(elements.len >= min); if (elements.len == 0) return 0; q.mutex.lockUncancelable(io); defer q.mutex.unlock(io); return q.putLocked(io, elements, min, true) catch |err| switch (err) { error.Canceled => unreachable, + error.Closed => |e| return e, }; } @@ -1333,49 +1666,79 @@ pub const TypeErasedQueue = struct { return if (slice.len > 0) slice else null; } - fn putLocked(q: *TypeErasedQueue, io: Io, elements: []const u8, min: usize, uncancelable: bool) Cancelable!usize { + fn putLocked(q: *TypeErasedQueue, io: Io, elements: []const u8, target: usize, uncancelable: bool) (QueueClosedError || Cancelable)!usize { + // A closed queue cannot be added to, even if there is space in the buffer. + if (q.closed) return error.Closed; + // Getters have first priority on the data, and only when the getters // queue is empty do we start populating the buffer. - var remaining = elements; + // The number of elements we add immediately, before possibly blocking. + var n: usize = 0; + while (q.getters.popFirst()) |getter_node| { const getter: *Get = @alignCast(@fieldParentPtr("node", getter_node)); - const copy_len = @min(getter.remaining.len, remaining.len); + const copy_len = @min(getter.remaining.len, elements.len - n); assert(copy_len > 0); - @memcpy(getter.remaining[0..copy_len], remaining[0..copy_len]); - remaining = remaining[copy_len..]; + @memcpy(getter.remaining[0..copy_len], elements[n..][0..copy_len]); getter.remaining = getter.remaining[copy_len..]; - if (getter.remaining.len == 0) { + getter.needed -|= copy_len; + n += copy_len; + if (getter.needed == 0) { getter.condition.signal(io); - if (remaining.len > 0) continue; - } else q.getters.prepend(getter_node); - assert(remaining.len == 0); - return elements.len; + } else { + assert(n == elements.len); // we didn't have enough elements for the getter + q.getters.prepend(getter_node); + } + if (n == elements.len) return elements.len; } while (q.puttableSlice()) |slice| { - const copy_len = @min(slice.len, remaining.len); + const copy_len = @min(slice.len, elements.len - n); assert(copy_len > 0); - @memcpy(slice[0..copy_len], remaining[0..copy_len]); + @memcpy(slice[0..copy_len], elements[n..][0..copy_len]); q.len += copy_len; - remaining = remaining[copy_len..]; - if (remaining.len == 0) return elements.len; + n += copy_len; + if (n == elements.len) return elements.len; } - const total_filled = elements.len - remaining.len; - if (total_filled >= min) return total_filled; + // Don't block if we hit the target. + if (n >= target) return n; - var pending: Put = .{ .remaining = remaining, .condition = .{}, .node = .{} }; + var pending: Put = .{ + .remaining = elements[n..], + .needed = target - n, + .condition = .init, + .node = .{}, + }; q.putters.append(&pending.node); - defer if (pending.remaining.len > 0) q.putters.remove(&pending.node); - while (pending.remaining.len > 0) if (uncancelable) - pending.condition.waitUncancelable(io, &q.mutex) - else - try pending.condition.wait(io, &q.mutex); - return elements.len; + defer if (pending.needed > 0) q.putters.remove(&pending.node); + + while (pending.needed > 0 and !q.closed) { + if (uncancelable) { + pending.condition.waitUncancelable(io, &q.mutex); + continue; + } + pending.condition.wait(io, &q.mutex) catch |err| switch (err) { + error.Canceled => if (pending.remaining.len == elements.len) { + // Canceled while waiting, and appended no elements. + return error.Canceled; + } else { + // Canceled while waiting, but appended some elements, so report those first. + io.recancel(); + return elements.len - pending.remaining.len; + }, + }; + } + if (pending.remaining.len == elements.len) { + // The queue was closed while we were waiting. We appended no elements. + assert(q.closed); + return error.Closed; + } + return elements.len - pending.remaining.len; } - pub fn get(q: *@This(), io: Io, buffer: []u8, min: usize) Cancelable!usize { + pub fn get(q: *TypeErasedQueue, io: Io, buffer: []u8, min: usize) (QueueClosedError || Cancelable)!usize { assert(buffer.len >= min); if (buffer.len == 0) return 0; try q.mutex.lock(io); @@ -1383,13 +1746,17 @@ pub const TypeErasedQueue = struct { return q.getLocked(io, buffer, min, false); } - pub fn getUncancelable(q: *@This(), io: Io, buffer: []u8, min: usize) usize { + /// Same as `get`, except does not introduce a cancelation point. + /// + /// For a description of cancelation and cancelation points, see `Future.cancel`. + pub fn getUncancelable(q: *TypeErasedQueue, io: Io, buffer: []u8, min: usize) QueueClosedError!usize { assert(buffer.len >= min); if (buffer.len == 0) return 0; q.mutex.lockUncancelable(io); defer q.mutex.unlock(io); return q.getLocked(io, buffer, min, true) catch |err| switch (err) { error.Canceled => unreachable, + error.Closed => |e| return e, }; } @@ -1399,21 +1766,23 @@ pub const TypeErasedQueue = struct { return if (slice.len > 0) slice else null; } - fn getLocked(q: *@This(), io: Io, buffer: []u8, min: usize, uncancelable: bool) Cancelable!usize { + fn getLocked(q: *TypeErasedQueue, io: Io, buffer: []u8, target: usize, uncancelable: bool) (QueueClosedError || Cancelable)!usize { // The ring buffer gets first priority, then data should come from any // queued putters, then finally the ring buffer should be filled with // data from putters so they can be resumed. - var remaining = buffer; + // The number of elements we received immediately, before possibly blocking. + var n: usize = 0; + while (q.gettableSlice()) |slice| { - const copy_len = @min(slice.len, remaining.len); + const copy_len = @min(slice.len, buffer.len - n); assert(copy_len > 0); - @memcpy(remaining[0..copy_len], slice[0..copy_len]); + @memcpy(buffer[n..][0..copy_len], slice[0..copy_len]); q.start += copy_len; if (q.buffer.len - q.start == 0) q.start = 0; q.len -= copy_len; - remaining = remaining[copy_len..]; - if (remaining.len == 0) { + n += copy_len; + if (n == buffer.len) { q.fillRingBufferFromPutters(io); return buffer.len; } @@ -1422,33 +1791,64 @@ pub const TypeErasedQueue = struct { // Copy directly from putters into buffer. while (q.putters.popFirst()) |putter_node| { const putter: *Put = @alignCast(@fieldParentPtr("node", putter_node)); - const copy_len = @min(putter.remaining.len, remaining.len); + const copy_len = @min(putter.remaining.len, buffer.len - n); assert(copy_len > 0); - @memcpy(remaining[0..copy_len], putter.remaining[0..copy_len]); + @memcpy(buffer[n..][0..copy_len], putter.remaining[0..copy_len]); putter.remaining = putter.remaining[copy_len..]; - remaining = remaining[copy_len..]; - if (putter.remaining.len == 0) { + putter.needed -|= copy_len; + n += copy_len; + if (putter.needed == 0) { putter.condition.signal(io); - if (remaining.len > 0) continue; - } else q.putters.prepend(putter_node); - assert(remaining.len == 0); - q.fillRingBufferFromPutters(io); - return buffer.len; + } else { + assert(n == buffer.len); // we didn't have enough space for the putter + q.putters.prepend(putter_node); + } + if (n == buffer.len) { + q.fillRingBufferFromPutters(io); + return buffer.len; + } } - // Both ring buffer and putters queue is empty. - const total_filled = buffer.len - remaining.len; - if (total_filled >= min) return total_filled; + // No need to call `fillRingBufferFromPutters` from this point onwards, + // because we emptied the ring buffer *and* the putter queue! + + // Don't block if we hit the target or if the queue is closed. Return how + // many elements we could get immediately, unless the queue was closed and + // empty, in which case report `error.Closed`. + if (n == 0 and q.closed) return error.Closed; + if (n >= target or q.closed) return n; - var pending: Get = .{ .remaining = remaining, .condition = .{}, .node = .{} }; + var pending: Get = .{ + .remaining = buffer[n..], + .needed = target - n, + .condition = .init, + .node = .{}, + }; q.getters.append(&pending.node); - defer if (pending.remaining.len > 0) q.getters.remove(&pending.node); - while (pending.remaining.len > 0) if (uncancelable) - pending.condition.waitUncancelable(io, &q.mutex) - else - try pending.condition.wait(io, &q.mutex); - q.fillRingBufferFromPutters(io); - return buffer.len; + defer if (pending.needed > 0) q.getters.remove(&pending.node); + + while (pending.needed > 0 and !q.closed) { + if (uncancelable) { + pending.condition.waitUncancelable(io, &q.mutex); + continue; + } + pending.condition.wait(io, &q.mutex) catch |err| switch (err) { + error.Canceled => if (pending.remaining.len == buffer.len) { + // Canceled while waiting, and received no elements. + return error.Canceled; + } else { + // Canceled while waiting, but received some elements, so report those first. + io.recancel(); + return buffer.len - pending.remaining.len; + }, + }; + } + if (pending.remaining.len == buffer.len) { + // The queue was closed while we were waiting. We received no elements. + assert(q.closed); + return error.Closed; + } + return buffer.len - pending.remaining.len; } /// Called when there is nonzero space available in the ring buffer and @@ -1464,7 +1864,8 @@ pub const TypeErasedQueue = struct { @memcpy(slice[0..copy_len], putter.remaining[0..copy_len]); q.len += copy_len; putter.remaining = putter.remaining[copy_len..]; - if (putter.remaining.len == 0) { + putter.needed -|= copy_len; + if (putter.needed == 0) { putter.condition.signal(io); break; } @@ -1487,59 +1888,112 @@ pub fn Queue(Elem: type) type { return .{ .type_erased = .init(@ptrCast(buffer)) }; } - /// Appends elements to the end of the queue. The function returns when - /// at least `min` elements have been added to the buffer or sent - /// directly to a consumer. + pub fn close(q: *@This(), io: Io) void { + q.type_erased.close(io); + } + + /// Appends elements to the end of the queue, potentially blocking if + /// there is insufficient capacity. Returns when any one of the + /// following conditions is satisfied: /// - /// Returns how many elements have been added to the queue. + /// * At least `target` elements have been added to the queue + /// * The queue is closed + /// * The current task is canceled /// - /// Asserts that `elements.len >= min`. - pub fn put(q: *@This(), io: Io, elements: []const Elem, min: usize) Cancelable!usize { - return @divExact(try q.type_erased.put(io, @ptrCast(elements), min * @sizeOf(Elem)), @sizeOf(Elem)); + /// Returns how many of `elements` have been added to the queue, if any. + /// If an error is returned, no elements have been added. + /// + /// If the queue is closed or the task is canceled, but some items were + /// already added before the closure or cancelation, then `put` may + /// return a number lower than `target`, in which case future calls are + /// guaranteed to return `error.Canceled` or `error.Closed`. + /// + /// A return value of 0 is only possible if `target` is 0, in which case + /// the call is guaranteed to queue as many of `elements` as is possible + /// *without* blocking. + /// + /// Asserts that `elements.len >= target`. + pub fn put(q: *@This(), io: Io, elements: []const Elem, target: usize) (QueueClosedError || Cancelable)!usize { + return @divExact(try q.type_erased.put(io, @ptrCast(elements), target * @sizeOf(Elem)), @sizeOf(Elem)); } /// Same as `put` but blocks until all elements have been added to the queue. - pub fn putAll(q: *@This(), io: Io, elements: []const Elem) Cancelable!void { - assert(try q.put(io, elements, elements.len) == elements.len); + /// + /// If the queue is closed or canceled, `error.Closed` or `error.Canceled` + /// is returned, and it is unspecified how many, if any, of `elements` were + /// added to the queue prior to cancelation or closure. + pub fn putAll(q: *@This(), io: Io, elements: []const Elem) (QueueClosedError || Cancelable)!void { + const n = try q.put(io, elements, elements.len); + if (n != elements.len) { + _ = try q.put(io, elements[n..], elements.len - n); + unreachable; // partial `put` implies queue was closed or we were canceled + } } - /// Same as `put` but cannot be interrupted. - pub fn putUncancelable(q: *@This(), io: Io, elements: []const Elem, min: usize) usize { - return @divExact(q.type_erased.putUncancelable(io, @ptrCast(elements), min * @sizeOf(Elem)), @sizeOf(Elem)); + /// Same as `put`, except does not introduce a cancelation point. + /// + /// For a description of cancelation and cancelation points, see `Future.cancel`. + pub fn putUncancelable(q: *@This(), io: Io, elements: []const Elem, min: usize) QueueClosedError!usize { + return @divExact(try q.type_erased.putUncancelable(io, @ptrCast(elements), min * @sizeOf(Elem)), @sizeOf(Elem)); } - pub fn putOne(q: *@This(), io: Io, item: Elem) Cancelable!void { + /// Appends `item` to the end of the queue, blocking if the queue is full. + pub fn putOne(q: *@This(), io: Io, item: Elem) (QueueClosedError || Cancelable)!void { assert(try q.put(io, &.{item}, 1) == 1); } - pub fn putOneUncancelable(q: *@This(), io: Io, item: Elem) void { - assert(q.putUncancelable(io, &.{item}, 1) == 1); + /// Same as `putOne`, except does not introduce a cancelation point. + /// + /// For a description of cancelation and cancelation points, see `Future.cancel`. + pub fn putOneUncancelable(q: *@This(), io: Io, item: Elem) QueueClosedError!void { + assert(try q.putUncancelable(io, &.{item}, 1) == 1); } - /// Receives elements from the beginning of the queue. The function - /// returns when at least `min` elements have been populated inside - /// `buffer`. + /// Receives elements from the beginning of the queue, potentially blocking + /// if there are insufficient elements currently in the queue. Returns when + /// any one of the following conditions is satisfied: + /// + /// * At least `target` elements have been received from the queue + /// * The queue is closed and contains no buffered elements + /// * The current task is canceled + /// + /// Returns how many elements of `buffer` have been populated, if any. + /// If an error is returned, no elements have been populated. + /// + /// If the queue is closed or the task is canceled, but some items were + /// already received before the closure or cancelation, then `get` may + /// return a number lower than `target`, in which case future calls are + /// guaranteed to return `error.Canceled` or `error.Closed`. /// - /// Returns how many elements of `buffer` have been populated. + /// A return value of 0 is only possible if `target` is 0, in which case + /// the call is guaranteed to fill as much of `buffer` as is possible + /// *without* blocking. /// - /// Asserts that `buffer.len >= min`. - pub fn get(q: *@This(), io: Io, buffer: []Elem, min: usize) Cancelable!usize { - return @divExact(try q.type_erased.get(io, @ptrCast(buffer), min * @sizeOf(Elem)), @sizeOf(Elem)); + /// Asserts that `buffer.len >= target`. + pub fn get(q: *@This(), io: Io, buffer: []Elem, target: usize) (QueueClosedError || Cancelable)!usize { + return @divExact(try q.type_erased.get(io, @ptrCast(buffer), target * @sizeOf(Elem)), @sizeOf(Elem)); } - pub fn getUncancelable(q: *@This(), io: Io, buffer: []Elem, min: usize) usize { - return @divExact(q.type_erased.getUncancelable(io, @ptrCast(buffer), min * @sizeOf(Elem)), @sizeOf(Elem)); + /// Same as `get`, except does not introduce a cancelation point. + /// + /// For a description of cancelation and cancelation points, see `Future.cancel`. + pub fn getUncancelable(q: *@This(), io: Io, buffer: []Elem, min: usize) QueueClosedError!usize { + return @divExact(try q.type_erased.getUncancelable(io, @ptrCast(buffer), min * @sizeOf(Elem)), @sizeOf(Elem)); } - pub fn getOne(q: *@This(), io: Io) Cancelable!Elem { + /// Receives one element from the beginning of the queue, blocking if the queue is empty. + pub fn getOne(q: *@This(), io: Io) (QueueClosedError || Cancelable)!Elem { var buf: [1]Elem = undefined; assert(try q.get(io, &buf, 1) == 1); return buf[0]; } - pub fn getOneUncancelable(q: *@This(), io: Io) Elem { + /// Same as `getOne`, except does not introduce a cancelation point. + /// + /// For a description of cancelation and cancelation points, see `Future.cancel`. + pub fn getOneUncancelable(q: *@This(), io: Io) QueueClosedError!Elem { var buf: [1]Elem = undefined; - assert(q.getUncancelable(io, &buf, 1) == 1); + assert(try q.getUncancelable(io, &buf, 1) == 1); return buf[0]; } @@ -1627,10 +2081,6 @@ pub fn concurrent( return future; } -pub fn cancelRequested(io: Io) bool { - return io.vtable.cancelRequested(io.userdata); -} - pub const SleepError = error{UnsupportedClock} || UnexpectedError || Cancelable; pub fn sleep(io: Io, duration: Duration, clock: Clock) SleepError!void { diff --git a/lib/std/Io/Reader.zig b/lib/std/Io/Reader.zig index 5da55a7178..1aeb6575f2 100644 --- a/lib/std/Io/Reader.zig +++ b/lib/std/Io/Reader.zig @@ -1252,7 +1252,7 @@ pub const TakeEnumError = Error || error{InvalidEnumTag}; pub fn takeEnum(r: *Reader, comptime Enum: type, endian: std.builtin.Endian) TakeEnumError!Enum { const Tag = @typeInfo(Enum).@"enum".tag_type; const int = try r.takeInt(Tag, endian); - return std.meta.intToEnum(Enum, int); + return std.enums.fromInt(Enum, int) orelse return error.InvalidEnumTag; } /// Reads an integer with the same size as the given nonexhaustive enum's tag type. diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 0e17cc01d0..37a6a7b656 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -86,7 +86,9 @@ const Thread = struct { /// The value that needs to be passed to pthread_kill or tgkill in order to /// send a signal. signal_id: SignaleeId, - current_closure: ?*Closure = null, + current_closure: ?*Closure, + /// Only populated if `current_closure != null`. Indicates the current cancel protection mode. + cancel_protection: Io.CancelProtection, const SignaleeId = if (std.Thread.use_pthreads) std.c.pthread_t else std.Thread.Id; @@ -98,6 +100,12 @@ const Thread = struct { fn checkCancel(thread: *Thread) error{Canceled}!void { const closure = thread.current_closure orelse return; + + switch (thread.cancel_protection) { + .unblocked => {}, + .blocked => return, + } + switch (@cmpxchgStrong( CancelStatus, &closure.cancel_status, @@ -115,6 +123,11 @@ const Thread = struct { fn beginSyscall(thread: *Thread) error{Canceled}!void { const closure = thread.current_closure orelse return; + switch (thread.cancel_protection) { + .unblocked => {}, + .blocked => return, + } + switch (@cmpxchgStrong( CancelStatus, &closure.cancel_status, @@ -135,6 +148,12 @@ const Thread = struct { fn endSyscall(thread: *Thread) void { const closure = thread.current_closure orelse return; + + switch (thread.cancel_protection) { + .unblocked => {}, + .blocked => return, + } + _ = @cmpxchgStrong( CancelStatus, &closure.cancel_status, @@ -155,6 +174,220 @@ const Thread = struct { fn currentSignalId() SignaleeId { return if (std.Thread.use_pthreads) std.c.pthread_self() else std.Thread.getCurrentId(); } + + fn futexWaitUncancelable(ptr: *const u32, expect: u32) void { + return Thread.futexWaitTimed(null, ptr, expect, null) catch unreachable; + } + + fn futexWait(thread: *Thread, ptr: *const u32, expect: u32) Io.Cancelable!void { + return Thread.futexWaitTimed(thread, ptr, expect, null) catch |err| switch (err) { + error.Canceled => return error.Canceled, + error.Timeout => unreachable, + }; + } + + fn futexWaitTimed(thread: ?*Thread, ptr: *const u32, expect: u32, timeout_ns: ?u64) Io.Cancelable!void { + @branchHint(.cold); + + if (builtin.single_threaded) unreachable; // nobody would ever wake us + + if (builtin.cpu.arch.isWasm()) { + comptime assert(builtin.cpu.has(.wasm, .atomics)); + if (thread) |t| try t.checkCancel(); + const to: i64 = if (timeout_ns) |ns| ns else -1; + const signed_expect: i32 = @bitCast(expect); + const result = asm volatile ( + \\local.get %[ptr] + \\local.get %[expected] + \\local.get %[timeout] + \\memory.atomic.wait32 0 + \\local.set %[ret] + : [ret] "=r" (-> u32), + : [ptr] "r" (ptr), + [expected] "r" (signed_expect), + [timeout] "r" (to), + ); + switch (result) { + 0 => {}, // ok + 1 => {}, // expected != loaded + 2 => {}, // timeout + else => assert(!is_debug), + } + } else switch (native_os) { + .linux => { + const linux = std.os.linux; + var ts_buffer: linux.timespec = undefined; + const ts: ?*linux.timespec = if (timeout_ns) |ns| ts: { + ts_buffer = timestampToPosix(ns); + break :ts &ts_buffer; + } else null; + if (thread) |t| try t.beginSyscall(); + const rc = linux.futex_4arg(ptr, .{ .cmd = .WAIT, .private = true }, expect, ts); + if (thread) |t| t.endSyscall(); + switch (linux.errno(rc)) { + .SUCCESS => {}, // notified by `wake()` + .INTR => {}, // caller's responsibility to retry + .AGAIN => {}, // ptr.* != expect + .INVAL => {}, // possibly timeout overflow + .TIMEDOUT => {}, // timeout + .FAULT => recoverableOsBugDetected(), // ptr was invalid + else => recoverableOsBugDetected(), + } + }, + .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => { + const c = std.c; + const flags: c.UL = .{ + .op = .COMPARE_AND_WAIT, + .NO_ERRNO = true, + }; + if (thread) |t| try t.beginSyscall(); + const status = switch (darwin_supports_ulock_wait2) { + true => c.__ulock_wait2(flags, ptr, expect, ns: { + const ns = timeout_ns orelse break :ns 0; + if (ns == 0) break :ns 1; + break :ns ns; + }, 0), + false => c.__ulock_wait(flags, ptr, expect, us: { + const ns = timeout_ns orelse break :us 0; + const us = std.math.lossyCast(u32, ns / std.time.ns_per_us); + if (us == 0) break :us 1; + break :us us; + }), + }; + if (thread) |t| t.endSyscall(); + if (status >= 0) return; + switch (@as(c.E, @enumFromInt(-status))) { + .INTR => {}, // spurious wake + // Address of the futex was paged out. This is unlikely, but possible in theory, and + // pthread/libdispatch on darwin bother to handle it. In this case we'll return + // without waiting, but the caller should retry anyway. + .FAULT => {}, + .TIMEDOUT => {}, // timeout + else => recoverableOsBugDetected(), + } + }, + .windows => { + var timeout_value: windows.LARGE_INTEGER = undefined; + var timeout_ptr: ?*const windows.LARGE_INTEGER = null; + // NTDLL functions work with time in units of 100 nanoseconds. + // Positive values are absolute deadlines while negative values are relative durations. + if (timeout_ns) |delay| { + timeout_value = @as(windows.LARGE_INTEGER, @intCast(delay / 100)); + timeout_value = -timeout_value; + timeout_ptr = &timeout_value; + } + if (thread) |t| try t.checkCancel(); + switch (windows.ntdll.RtlWaitOnAddress(ptr, &expect, @sizeOf(@TypeOf(expect)), timeout_ptr)) { + .SUCCESS => {}, + .CANCELLED => {}, + .TIMEOUT => {}, // timeout + else => recoverableOsBugDetected(), + } + }, + .freebsd => { + const flags = @intFromEnum(std.c.UMTX_OP.WAIT_UINT_PRIVATE); + var tm_size: usize = 0; + var tm: std.c._umtx_time = undefined; + var tm_ptr: ?*const std.c._umtx_time = null; + if (timeout_ns) |ns| { + tm_ptr = &tm; + tm_size = @sizeOf(@TypeOf(tm)); + tm.flags = 0; // use relative time not UMTX_ABSTIME + tm.clockid = .MONOTONIC; + tm.timeout = timestampToPosix(ns); + } + if (thread) |t| try t.beginSyscall(); + const rc = std.c._umtx_op(@intFromPtr(ptr), flags, @as(c_ulong, expect), tm_size, @intFromPtr(tm_ptr)); + if (thread) |t| t.endSyscall(); + if (is_debug) switch (posix.errno(rc)) { + .SUCCESS => {}, + .FAULT => unreachable, // one of the args points to invalid memory + .INVAL => unreachable, // arguments should be correct + .TIMEDOUT => {}, // timeout + .INTR => {}, // spurious wake + else => unreachable, + }; + }, + else => @compileError("unimplemented: futexWait"), + } + } + + fn futexWake(ptr: *const u32, max_waiters: u32) void { + @branchHint(.cold); + + if (builtin.single_threaded) return; // nothing to wake up + + if (builtin.cpu.arch.isWasm()) { + comptime assert(builtin.cpu.has(.wasm, .atomics)); + assert(max_waiters != 0); + const woken_count = asm volatile ( + \\local.get %[ptr] + \\local.get %[waiters] + \\memory.atomic.notify 0 + \\local.set %[ret] + : [ret] "=r" (-> u32), + : [ptr] "r" (ptr), + [waiters] "r" (max_waiters), + ); + _ = woken_count; // can be 0 when linker flag 'shared-memory' is not enabled + } else switch (native_os) { + .linux => { + const linux = std.os.linux; + switch (linux.errno(linux.futex_3arg( + ptr, + .{ .cmd = .WAKE, .private = true }, + @min(max_waiters, std.math.maxInt(i32)), + ))) { + .SUCCESS => return, // successful wake up + .INVAL => return, // invalid futex_wait() on ptr done elsewhere + .FAULT => return, // pointer became invalid while doing the wake + else => return recoverableOsBugDetected(), // deadlock due to operating system bug + } + }, + .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => { + const c = std.c; + const flags: c.UL = .{ + .op = .COMPARE_AND_WAIT, + .NO_ERRNO = true, + .WAKE_ALL = max_waiters > 1, + }; + while (true) { + const status = c.__ulock_wake(flags, ptr, 0); + if (status >= 0) return; + switch (@as(c.E, @enumFromInt(-status))) { + .INTR, .CANCELED => continue, // spurious wake() + .FAULT => unreachable, // __ulock_wake doesn't generate EFAULT according to darwin pthread_cond_t + .NOENT => return, // nothing was woken up + .ALREADY => unreachable, // only for UL.Op.WAKE_THREAD + else => unreachable, // deadlock due to operating system bug + } + } + }, + .windows => { + assert(max_waiters != 0); + switch (max_waiters) { + 1 => windows.ntdll.RtlWakeAddressSingle(ptr), + else => windows.ntdll.RtlWakeAddressAll(ptr), + } + }, + .freebsd => { + const rc = std.c._umtx_op( + @intFromPtr(ptr), + @intFromEnum(std.c.UMTX_OP.WAKE_PRIVATE), + @as(c_ulong, max_waiters), + 0, // there is no timeout struct + 0, // there is no timeout struct pointer + ); + switch (posix.errno(rc)) { + .SUCCESS => {}, + .FAULT => {}, // it's ok if the ptr doesn't point to valid memory + .INVAL => unreachable, // arguments should be correct + else => unreachable, // deadlock due to operating system bug + } + }, + else => @compileError("unimplemented: futexWake"), + } + } }; const max_iovecs_len = 8; @@ -298,6 +531,8 @@ pub fn init( .have_signal_handler = false, .main_thread = .{ .signal_id = Thread.currentSignalId(), + .current_closure = null, + .cancel_protection = undefined, }, }; @@ -332,7 +567,11 @@ pub const init_single_threaded: Threaded = .{ .old_sig_io = undefined, .old_sig_pipe = undefined, .have_signal_handler = false, - .main_thread = .{ .signal_id = undefined }, + .main_thread = .{ + .signal_id = undefined, + .current_closure = null, + .cancel_protection = undefined, + }, }; pub fn setAsyncLimit(t: *Threaded, new_limit: Io.Limit) void { @@ -367,6 +606,8 @@ fn join(t: *Threaded) void { fn worker(t: *Threaded) void { var thread: Thread = .{ .signal_id = Thread.currentSignalId(), + .current_closure = null, + .cancel_protection = undefined, }; Thread.current = &thread; @@ -403,13 +644,13 @@ pub fn io(t: *Threaded) Io { .groupWait = groupWait, .groupCancel = groupCancel, - .mutexLock = mutexLock, - .mutexLockUncancelable = mutexLockUncancelable, - .mutexUnlock = mutexUnlock, + .recancel = recancel, + .swapCancelProtection = swapCancelProtection, + .checkCancel = checkCancel, - .conditionWait = conditionWait, - .conditionWaitUncancelable = conditionWaitUncancelable, - .conditionWake = conditionWake, + .futexWait = futexWait, + .futexWaitUncancelable = futexWaitUncancelable, + .futexWake = futexWake, .dirMake = dirMake, .dirMakePath = dirMakePath, @@ -499,13 +740,13 @@ pub fn ioBasic(t: *Threaded) Io { .groupWait = groupWait, .groupCancel = groupCancel, - .mutexLock = mutexLock, - .mutexLockUncancelable = mutexLockUncancelable, - .mutexUnlock = mutexUnlock, + .recancel = recancel, + .swapCancelProtection = swapCancelProtection, + .checkCancel = checkCancel, - .conditionWait = conditionWait, - .conditionWaitUncancelable = conditionWaitUncancelable, - .conditionWake = conditionWake, + .futexWait = futexWait, + .futexWaitUncancelable = futexWaitUncancelable, + .futexWake = futexWake, .dirMake = dirMake, .dirMakePath = dirMakePath, @@ -577,26 +818,31 @@ const preadv_sym = if (posix.lfs64_abi) posix.system.preadv64 else posix.system. const AsyncClosure = struct { closure: Closure, func: *const fn (context: *anyopaque, result: *anyopaque) void, - reset_event: ResetEvent, - select_condition: ?*ResetEvent, + event: Io.Event, + select_condition: ?*Io.Event, context_alignment: Alignment, result_offset: usize, alloc_len: usize, - const done_reset_event: *ResetEvent = @ptrFromInt(@alignOf(ResetEvent)); + const done_event: *Io.Event = @ptrFromInt(@alignOf(Io.Event)); fn start(closure: *Closure, t: *Threaded) void { const ac: *AsyncClosure = @alignCast(@fieldParentPtr("closure", closure)); const current_thread = Thread.getCurrent(t); + current_thread.current_closure = closure; + current_thread.cancel_protection = .unblocked; + ac.func(ac.contextPointer(), ac.resultPointer()); + current_thread.current_closure = null; + current_thread.cancel_protection = undefined; - if (@atomicRmw(?*ResetEvent, &ac.select_condition, .Xchg, done_reset_event, .release)) |select_reset| { - assert(select_reset != done_reset_event); - select_reset.set(); + if (@atomicRmw(?*Io.Event, &ac.select_condition, .Xchg, done_event, .release)) |select_event| { + assert(select_event != done_event); + select_event.set(ioBasic(t)); } - ac.reset_event.set(); + ac.event.set(ioBasic(t)); } fn resultPointer(ac: *AsyncClosure) [*]u8 { @@ -638,7 +884,7 @@ const AsyncClosure = struct { .context_alignment = context_alignment, .result_offset = actual_result_offset, .alloc_len = alloc_len, - .reset_event = .unset, + .event = .unset, .select_condition = null, }; @memcpy(ac.contextPointer()[0..context.len], context); @@ -646,10 +892,10 @@ const AsyncClosure = struct { } fn waitAndDeinit(ac: *AsyncClosure, t: *Threaded, result: []u8) void { - ac.reset_event.wait(t) catch |err| switch (err) { + ac.event.wait(ioBasic(t)) catch |err| switch (err) { error.Canceled => { ac.closure.requestCancel(t); - ac.reset_event.waitUncancelable(); + ac.event.waitUncancelable(ioBasic(t)); }, }; @memcpy(result, ac.resultPointer()[0..result.len]); @@ -771,14 +1017,19 @@ const GroupClosure = struct { const current_thread = Thread.getCurrent(t); const group = gc.group; const group_state: *std.atomic.Value(usize) = @ptrCast(&group.state); - const reset_event: *ResetEvent = @ptrCast(&group.context); + const event: *Io.Event = @ptrCast(&group.context); + current_thread.current_closure = closure; + current_thread.cancel_protection = .unblocked; + gc.func(group, gc.contextPointer()); + current_thread.current_closure = null; + current_thread.cancel_protection = undefined; const prev_state = group_state.fetchSub(sync_one_pending, .acq_rel); assert((prev_state / sync_one_pending) > 0); - if (prev_state == (sync_one_pending | sync_is_waiting)) reset_event.set(); + if (prev_state == (sync_one_pending | sync_is_waiting)) event.set(ioBasic(t)); } fn contextPointer(gc: *GroupClosure) [*]u8 { @@ -939,10 +1190,10 @@ fn groupWait(userdata: ?*anyopaque, group: *Io.Group, token: *anyopaque) void { if (builtin.single_threaded) return; const group_state: *std.atomic.Value(usize) = @ptrCast(&group.state); - const reset_event: *ResetEvent = @ptrCast(&group.context); + const event: *Io.Event = @ptrCast(&group.context); const prev_state = group_state.fetchAdd(GroupClosure.sync_is_waiting, .acquire); assert(prev_state & GroupClosure.sync_is_waiting == 0); - if ((prev_state / GroupClosure.sync_one_pending) > 0) reset_event.wait(t) catch |err| switch (err) { + if ((prev_state / GroupClosure.sync_one_pending) > 0) event.wait(ioBasic(t)) catch |err| switch (err) { error.Canceled => { var node: *std.SinglyLinkedList.Node = @ptrCast(@alignCast(token)); while (true) { @@ -950,7 +1201,7 @@ fn groupWait(userdata: ?*anyopaque, group: *Io.Group, token: *anyopaque) void { gc.closure.requestCancel(t); node = node.next orelse break; } - reset_event.waitUncancelable(); + event.waitUncancelable(ioBasic(t)); }, }; @@ -979,10 +1230,10 @@ fn groupCancel(userdata: ?*anyopaque, group: *Io.Group, token: *anyopaque) void } const group_state: *std.atomic.Value(usize) = @ptrCast(&group.state); - const reset_event: *ResetEvent = @ptrCast(&group.context); + const event: *Io.Event = @ptrCast(&group.context); const prev_state = group_state.fetchAdd(GroupClosure.sync_is_waiting, .acquire); assert(prev_state & GroupClosure.sync_is_waiting == 0); - if ((prev_state / GroupClosure.sync_one_pending) > 0) reset_event.waitUncancelable(); + if ((prev_state / GroupClosure.sync_one_pending) > 0) event.waitUncancelable(ioBasic(t)); { var node: *std.SinglyLinkedList.Node = @ptrCast(@alignCast(token)); @@ -995,6 +1246,32 @@ fn groupCancel(userdata: ?*anyopaque, group: *Io.Group, token: *anyopaque) void } } +fn recancel(userdata: ?*anyopaque) void { + const t: *Threaded = @ptrCast(@alignCast(userdata)); + const current_thread: *Thread = .getCurrent(t); + const cancel_status = ¤t_thread.current_closure.?.cancel_status; + switch (@atomicLoad(CancelStatus, cancel_status, .monotonic)) { + .none => unreachable, // called `recancel` when not canceled + .requested => unreachable, // called `recancel` when cancelation was already outstanding + .acknowledged => {}, + _ => unreachable, // invalid state: not in a syscall + } + @atomicStore(CancelStatus, cancel_status, .requested, .monotonic); +} + +fn swapCancelProtection(userdata: ?*anyopaque, new: Io.CancelProtection) Io.CancelProtection { + const t: *Threaded = @ptrCast(@alignCast(userdata)); + const current_thread: *Thread = .getCurrent(t); + const old = current_thread.cancel_protection; + current_thread.cancel_protection = new; + return old; +} + +fn checkCancel(userdata: ?*anyopaque) Io.Cancelable!void { + const t: *Threaded = @ptrCast(@alignCast(userdata)); + return Thread.getCurrent(t).checkCancel(); +} + fn await( userdata: ?*anyopaque, any_future: *Io.AnyFuture, @@ -1020,181 +1297,35 @@ fn cancel( ac.waitAndDeinit(t, result); } -fn mutexLock(userdata: ?*anyopaque, prev_state: Io.Mutex.State, mutex: *Io.Mutex) Io.Cancelable!void { - if (builtin.single_threaded) unreachable; // Interface should have prevented this. - if (native_os == .netbsd) @panic("TODO"); +fn futexWait(userdata: ?*anyopaque, ptr: *const u32, expected: u32, timeout: Io.Timeout) Io.Cancelable!void { const t: *Threaded = @ptrCast(@alignCast(userdata)); const current_thread = Thread.getCurrent(t); - if (prev_state == .contended) { - try futexWait(current_thread, @ptrCast(&mutex.state), @intFromEnum(Io.Mutex.State.contended)); - } - while (@atomicRmw(Io.Mutex.State, &mutex.state, .Xchg, .contended, .acquire) != .unlocked) { - try futexWait(current_thread, @ptrCast(&mutex.state), @intFromEnum(Io.Mutex.State.contended)); - } -} - -fn mutexLockUncancelable(userdata: ?*anyopaque, prev_state: Io.Mutex.State, mutex: *Io.Mutex) void { - if (builtin.single_threaded) unreachable; // Interface should have prevented this. - if (native_os == .netbsd) @panic("TODO"); - _ = userdata; - if (prev_state == .contended) { - futexWaitUncancelable(@ptrCast(&mutex.state), @intFromEnum(Io.Mutex.State.contended)); - } - while (@atomicRmw(Io.Mutex.State, &mutex.state, .Xchg, .contended, .acquire) != .unlocked) { - futexWaitUncancelable(@ptrCast(&mutex.state), @intFromEnum(Io.Mutex.State.contended)); - } -} - -fn mutexUnlock(userdata: ?*anyopaque, prev_state: Io.Mutex.State, mutex: *Io.Mutex) void { - if (builtin.single_threaded) unreachable; // Interface should have prevented this. - if (native_os == .netbsd) @panic("TODO"); - _ = userdata; - _ = prev_state; - if (@atomicRmw(Io.Mutex.State, &mutex.state, .Xchg, .unlocked, .release) == .contended) { - futexWake(@ptrCast(&mutex.state), 1); - } -} - -fn conditionWaitUncancelable(userdata: ?*anyopaque, cond: *Io.Condition, mutex: *Io.Mutex) void { - if (builtin.single_threaded) unreachable; // Deadlock. - if (native_os == .netbsd) @panic("TODO"); - const t: *Threaded = @ptrCast(@alignCast(userdata)); const t_io = ioBasic(t); - comptime assert(@TypeOf(cond.state) == u64); - const ints: *[2]std.atomic.Value(u32) = @ptrCast(&cond.state); - const cond_state = &ints[0]; - const cond_epoch = &ints[1]; - const one_waiter = 1; - const waiter_mask = 0xffff; - const one_signal = 1 << 16; - const signal_mask = 0xffff << 16; - var epoch = cond_epoch.load(.acquire); - var state = cond_state.fetchAdd(one_waiter, .monotonic); - assert(state & waiter_mask != waiter_mask); - state += one_waiter; - - mutex.unlock(t_io); - defer mutex.lockUncancelable(t_io); - - while (true) { - futexWaitUncancelable(cond_epoch, epoch); - epoch = cond_epoch.load(.acquire); - state = cond_state.load(.monotonic); - while (state & signal_mask != 0) { - const new_state = state - one_waiter - one_signal; - state = cond_state.cmpxchgWeak(state, new_state, .acquire, .monotonic) orelse return; - } + const timeout_ns: ?u64 = ns: { + const d = (timeout.toDurationFromNow(t_io) catch break :ns 10) orelse break :ns null; + break :ns std.math.lossyCast(u64, d.raw.toNanoseconds()); + }; + switch (native_os) { + .illumos, .netbsd, .openbsd => @panic("TODO"), + else => try current_thread.futexWaitTimed(ptr, expected, timeout_ns), } } -fn conditionWait(userdata: ?*anyopaque, cond: *Io.Condition, mutex: *Io.Mutex) Io.Cancelable!void { - if (builtin.single_threaded) unreachable; // Deadlock. - if (native_os == .netbsd) @panic("TODO"); +fn futexWaitUncancelable(userdata: ?*anyopaque, ptr: *const u32, expected: u32) void { const t: *Threaded = @ptrCast(@alignCast(userdata)); - const current_thread = Thread.getCurrent(t); - const t_io = ioBasic(t); - comptime assert(@TypeOf(cond.state) == u64); - const ints: *[2]std.atomic.Value(u32) = @ptrCast(&cond.state); - const cond_state = &ints[0]; - const cond_epoch = &ints[1]; - const one_waiter = 1; - const waiter_mask = 0xffff; - const one_signal = 1 << 16; - const signal_mask = 0xffff << 16; - // Observe the epoch, then check the state again to see if we should wake up. - // The epoch must be observed before we check the state or we could potentially miss a wake() and deadlock: - // - // - T1: s = LOAD(&state) - // - T2: UPDATE(&s, signal) - // - T2: UPDATE(&epoch, 1) + FUTEX_WAKE(&epoch) - // - T1: e = LOAD(&epoch) (was reordered after the state load) - // - T1: s & signals == 0 -> FUTEX_WAIT(&epoch, e) (missed the state update + the epoch change) - // - // Acquire barrier to ensure the epoch load happens before the state load. - var epoch = cond_epoch.load(.acquire); - var state = cond_state.fetchAdd(one_waiter, .monotonic); - assert(state & waiter_mask != waiter_mask); - state += one_waiter; - - mutex.unlock(t_io); - defer mutex.lockUncancelable(t_io); - - while (true) { - try futexWait(current_thread, cond_epoch, epoch); - - epoch = cond_epoch.load(.acquire); - state = cond_state.load(.monotonic); - - // Try to wake up by consuming a signal and decremented the waiter we - // added previously. Acquire barrier ensures code before the wake() - // which added the signal happens before we decrement it and return. - while (state & signal_mask != 0) { - const new_state = state - one_waiter - one_signal; - state = cond_state.cmpxchgWeak(state, new_state, .acquire, .monotonic) orelse return; - } + _ = t; + switch (native_os) { + .illumos, .netbsd, .openbsd => @panic("TODO"), + else => Thread.futexWaitUncancelable(ptr, expected), } } -fn conditionWake(userdata: ?*anyopaque, cond: *Io.Condition, wake: Io.Condition.Wake) void { - if (builtin.single_threaded) unreachable; // Nothing to wake up. +fn futexWake(userdata: ?*anyopaque, ptr: *const u32, max_waiters: u32) void { const t: *Threaded = @ptrCast(@alignCast(userdata)); _ = t; - comptime assert(@TypeOf(cond.state) == u64); - const ints: *[2]std.atomic.Value(u32) = @ptrCast(&cond.state); - const cond_state = &ints[0]; - const cond_epoch = &ints[1]; - const one_waiter = 1; - const waiter_mask = 0xffff; - const one_signal = 1 << 16; - const signal_mask = 0xffff << 16; - var state = cond_state.load(.monotonic); - while (true) { - const waiters = (state & waiter_mask) / one_waiter; - const signals = (state & signal_mask) / one_signal; - - // Reserves which waiters to wake up by incrementing the signals count. - // Therefore, the signals count is always less than or equal to the - // waiters count. We don't need to Futex.wake if there's nothing to - // wake up or if other wake() threads have reserved to wake up the - // current waiters. - const wakeable = waiters - signals; - if (wakeable == 0) { - return; - } - - const to_wake = switch (wake) { - .one => 1, - .all => wakeable, - }; - - // Reserve the amount of waiters to wake by incrementing the signals - // count. Release barrier ensures code before the wake() happens before - // the signal it posted and consumed by the wait() threads. - const new_state = state + (one_signal * to_wake); - state = cond_state.cmpxchgWeak(state, new_state, .release, .monotonic) orelse { - // Wake up the waiting threads we reserved above by changing the epoch value. - // - // A waiting thread could miss a wake up if *exactly* ((1<<32)-1) - // wake()s happen between it observing the epoch and sleeping on - // it. This is very unlikely due to how many precise amount of - // Futex.wake() calls that would be between the waiting thread's - // potential preemption. - // - // Release barrier ensures the signal being added to the state - // happens before the epoch is changed. If not, the waiting thread - // could potentially deadlock from missing both the state and epoch - // change: - // - // - T2: UPDATE(&epoch, 1) (reordered before the state change) - // - T1: e = LOAD(&epoch) - // - T1: s = LOAD(&state) - // - T2: UPDATE(&state, signal) + FUTEX_WAKE(&epoch) - // - T1: s & signals == 0 -> FUTEX_WAIT(&epoch, e) (missed both epoch change and state change) - _ = cond_epoch.fetchAdd(1, .release); - if (native_os == .netbsd) @panic("TODO"); - futexWake(cond_epoch, to_wake); - return; - }; + switch (native_os) { + .illumos, .netbsd, .openbsd => @panic("TODO"), + else => Thread.futexWake(ptr, max_waiters), } } @@ -1301,8 +1432,11 @@ fn dirMakeWindows(userdata: ?*anyopaque, dir: Io.Dir, sub_path: []const u8, mode _ = mode; const sub_dir_handle = windows.OpenFile(sub_path_w.span(), .{ .dir = dir.handle, - .access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE, - .creation = windows.FILE_CREATE, + .access_mask = .{ + .GENERIC = .{ .READ = true }, + .STANDARD = .{ .SYNCHRONIZE = true }, + }, + .creation = .CREATE, .filter = .dir_only, }) catch |err| switch (err) { error.IsDir => return error.Unexpected, @@ -1370,9 +1504,6 @@ fn dirMakeOpenPathWindows( const t: *Threaded = @ptrCast(@alignCast(userdata)); const current_thread = Thread.getCurrent(t); const w = windows; - const access_mask = w.STANDARD_RIGHTS_READ | w.FILE_READ_ATTRIBUTES | w.FILE_READ_EA | - w.SYNCHRONIZE | w.FILE_TRAVERSE | - (if (options.iterate) w.FILE_LIST_DIRECTORY else @as(u32, 0)); var it = std.fs.path.componentIterator(sub_path); // If there are no components in the path, then create a dummy component with the full path. @@ -1387,7 +1518,7 @@ fn dirMakeOpenPathWindows( const sub_path_w_array = try w.sliceToPrefixedFileW(dir.handle, component.path); const sub_path_w = sub_path_w_array.span(); const is_last = it.peekNext() == null; - const create_disposition: u32 = if (is_last) w.FILE_OPEN_IF else w.FILE_CREATE; + const create_disposition: w.FILE.CREATE_DISPOSITION = if (is_last) .OPEN_IF else .CREATE; var result: Io.Dir = .{ .handle = undefined }; @@ -1397,26 +1528,40 @@ fn dirMakeOpenPathWindows( .MaximumLength = path_len_bytes, .Buffer = @constCast(sub_path_w.ptr), }; - var attr: w.OBJECT_ATTRIBUTES = .{ - .Length = @sizeOf(w.OBJECT_ATTRIBUTES), - .RootDirectory = if (std.fs.path.isAbsoluteWindowsWtf16(sub_path_w)) null else dir.handle, - .Attributes = 0, // Note we do not use OBJ_CASE_INSENSITIVE here. - .ObjectName = &nt_name, - .SecurityDescriptor = null, - .SecurityQualityOfService = null, - }; - const open_reparse_point: w.DWORD = if (!options.follow_symlinks) w.FILE_OPEN_REPARSE_POINT else 0x0; var io_status_block: w.IO_STATUS_BLOCK = undefined; const rc = w.ntdll.NtCreateFile( &result.handle, - access_mask, - &attr, + .{ + .SPECIFIC = .{ .FILE_DIRECTORY = .{ + .LIST = options.iterate, + .READ_EA = true, + .READ_ATTRIBUTES = true, + .TRAVERSE = true, + } }, + .STANDARD = .{ + .RIGHTS = .READ, + .SYNCHRONIZE = true, + }, + }, + &.{ + .Length = @sizeOf(w.OBJECT_ATTRIBUTES), + .RootDirectory = if (std.fs.path.isAbsoluteWindowsWtf16(sub_path_w)) null else dir.handle, + .Attributes = .{}, + .ObjectName = &nt_name, + .SecurityDescriptor = null, + .SecurityQualityOfService = null, + }, &io_status_block, null, - w.FILE_ATTRIBUTE_NORMAL, - w.FILE_SHARE_READ | w.FILE_SHARE_WRITE | w.FILE_SHARE_DELETE, + .{ .NORMAL = true }, + .VALID_FLAGS, create_disposition, - w.FILE_DIRECTORY_FILE | w.FILE_SYNCHRONOUS_IO_NONALERT | w.FILE_OPEN_FOR_BACKUP_INTENT | open_reparse_point, + .{ + .DIRECTORY_FILE = true, + .IO = .SYNCHRONOUS_NONALERT, + .OPEN_FOR_BACKUP_INTENT = true, + .OPEN_REPARSE_POINT = !options.follow_symlinks, + }, null, 0, ); @@ -1519,12 +1664,19 @@ fn dirStatPathLinux( dir.handle, sub_path_posix, flags, - linux.STATX_INO | linux.STATX_SIZE | linux.STATX_TYPE | linux.STATX_MODE | linux.STATX_ATIME | linux.STATX_MTIME | linux.STATX_CTIME, + .{ .TYPE = true, .MODE = true, .ATIME = true, .MTIME = true, .CTIME = true, .INO = true, .SIZE = true }, &statx, ); switch (linux.errno(rc)) { .SUCCESS => { current_thread.endSyscall(); + assert(statx.mask.TYPE); + assert(statx.mask.MODE); + assert(statx.mask.ATIME); + assert(statx.mask.MTIME); + assert(statx.mask.CTIME); + assert(statx.mask.INO); + assert(statx.mask.SIZE); return statFromLinux(&statx); }, .INTR => { @@ -1711,12 +1863,19 @@ fn fileStatLinux(userdata: ?*anyopaque, file: Io.File) Io.File.StatError!Io.File file.handle, "", linux.AT.EMPTY_PATH, - linux.STATX_INO | linux.STATX_SIZE | linux.STATX_TYPE | linux.STATX_MODE | linux.STATX_ATIME | linux.STATX_MTIME | linux.STATX_CTIME, + .{ .TYPE = true, .MODE = true, .ATIME = true, .MTIME = true, .CTIME = true, .INO = true, .SIZE = true }, &statx, ); switch (linux.errno(rc)) { .SUCCESS => { current_thread.endSyscall(); + assert(statx.mask.TYPE); + assert(statx.mask.MODE); + assert(statx.mask.ATIME); + assert(statx.mask.MTIME); + assert(statx.mask.CTIME); + assert(statx.mask.INO); + assert(statx.mask.SIZE); return statFromLinux(&statx); }, .INTR => { @@ -1749,8 +1908,8 @@ fn fileStatWindows(userdata: ?*anyopaque, file: Io.File) Io.File.StatError!Io.Fi try current_thread.checkCancel(); var io_status_block: windows.IO_STATUS_BLOCK = undefined; - var info: windows.FILE_ALL_INFORMATION = undefined; - const rc = windows.ntdll.NtQueryInformationFile(file.handle, &io_status_block, &info, @sizeOf(windows.FILE_ALL_INFORMATION), .FileAllInformation); + var info: windows.FILE.ALL_INFORMATION = undefined; + const rc = windows.ntdll.NtQueryInformationFile(file.handle, &io_status_block, &info, @sizeOf(windows.FILE.ALL_INFORMATION), .All); switch (rc) { .SUCCESS => {}, // Buffer overflow here indicates that there is more information available than was able to be stored in the buffer @@ -1765,9 +1924,9 @@ fn fileStatWindows(userdata: ?*anyopaque, file: Io.File) Io.File.StatError!Io.Fi .inode = info.InternalInformation.IndexNumber, .size = @as(u64, @bitCast(info.StandardInformation.EndOfFile)), .mode = 0, - .kind = if (info.BasicInformation.FileAttributes & windows.FILE_ATTRIBUTE_REPARSE_POINT != 0) reparse_point: { - var tag_info: windows.FILE_ATTRIBUTE_TAG_INFO = undefined; - const tag_rc = windows.ntdll.NtQueryInformationFile(file.handle, &io_status_block, &tag_info, @sizeOf(windows.FILE_ATTRIBUTE_TAG_INFO), .FileAttributeTagInformation); + .kind = if (info.BasicInformation.FileAttributes.REPARSE_POINT) reparse_point: { + var tag_info: windows.FILE.ATTRIBUTE_TAG_INFO = undefined; + const tag_rc = windows.ntdll.NtQueryInformationFile(file.handle, &io_status_block, &tag_info, @sizeOf(windows.FILE.ATTRIBUTE_TAG_INFO), .AttributeTag); switch (tag_rc) { .SUCCESS => {}, // INFO_LENGTH_MISMATCH and ACCESS_DENIED are the only documented possible errors @@ -1776,12 +1935,10 @@ fn fileStatWindows(userdata: ?*anyopaque, file: Io.File) Io.File.StatError!Io.Fi .ACCESS_DENIED => return error.AccessDenied, else => return windows.unexpectedStatus(rc), } - if (tag_info.ReparseTag & windows.reparse_tag_name_surrogate_bit != 0) { - break :reparse_point .sym_link; - } + if (tag_info.ReparseTag.IsSurrogate) break :reparse_point .sym_link; // Unknown reparse point break :reparse_point .unknown; - } else if (info.BasicInformation.FileAttributes & windows.FILE_ATTRIBUTE_DIRECTORY != 0) + } else if (info.BasicInformation.FileAttributes.DIRECTORY) .directory else .file, @@ -1983,15 +2140,15 @@ fn dirAccessWindows( .MaximumLength = path_len_bytes, .Buffer = @constCast(sub_path_w.ptr), }; - var attr = windows.OBJECT_ATTRIBUTES{ + var attr: windows.OBJECT_ATTRIBUTES = .{ .Length = @sizeOf(windows.OBJECT_ATTRIBUTES), .RootDirectory = if (std.fs.path.isAbsoluteWindowsWtf16(sub_path_w)) null else dir.handle, - .Attributes = 0, // Note we do not use OBJ_CASE_INSENSITIVE here. + .Attributes = .{}, .ObjectName = &nt_name, .SecurityDescriptor = null, .SecurityQualityOfService = null, }; - var basic_info: windows.FILE_BASIC_INFORMATION = undefined; + var basic_info: windows.FILE.BASIC_INFORMATION = undefined; switch (windows.ntdll.NtQueryAttributesFile(&attr, &basic_info)) { .SUCCESS => return, .OBJECT_NAME_NOT_FOUND => return error.FileNotFound, @@ -2187,16 +2344,21 @@ fn dirCreateFileWindows( const sub_path_w_array = try w.sliceToPrefixedFileW(dir.handle, sub_path); const sub_path_w = sub_path_w_array.span(); - const read_flag = if (flags.read) @as(u32, w.GENERIC_READ) else 0; const handle = try w.OpenFile(sub_path_w, .{ .dir = dir.handle, - .access_mask = w.SYNCHRONIZE | w.GENERIC_WRITE | read_flag, + .access_mask = .{ + .STANDARD = .{ .SYNCHRONIZE = true }, + .GENERIC = .{ + .WRITE = true, + .READ = flags.read, + }, + }, .creation = if (flags.exclusive) - @as(u32, w.FILE_CREATE) + .CREATE else if (flags.truncate) - @as(u32, w.FILE_OVERWRITE_IF) + .OVERWRITE_IF else - @as(u32, w.FILE_OPEN_IF), + .OPEN_IF, }); errdefer w.CloseHandle(handle); var io_status_block: w.IO_STATUS_BLOCK = undefined; @@ -2511,18 +2673,12 @@ pub fn dirOpenFileWtf16( var attr: w.OBJECT_ATTRIBUTES = .{ .Length = @sizeOf(w.OBJECT_ATTRIBUTES), .RootDirectory = dir_handle, - .Attributes = 0, + .Attributes = .{}, .ObjectName = &nt_name, .SecurityDescriptor = null, .SecurityQualityOfService = null, }; var io_status_block: w.IO_STATUS_BLOCK = undefined; - const blocking_flag: w.ULONG = w.FILE_SYNCHRONOUS_IO_NONALERT; - const file_or_dir_flag: w.ULONG = w.FILE_NON_DIRECTORY_FILE; - // If we're not following symlinks, we need to ensure we don't pass in any - // synchronization flags such as FILE_SYNCHRONOUS_IO_NONALERT. - const create_file_flags: w.ULONG = file_or_dir_flag | - if (flags.follow_symlinks) blocking_flag else w.FILE_OPEN_REPARSE_POINT; // There are multiple kernel bugs being worked around with retries. const max_attempts = 13; @@ -2534,16 +2690,24 @@ pub fn dirOpenFileWtf16( var result: w.HANDLE = undefined; const rc = w.ntdll.NtCreateFile( &result, - w.SYNCHRONIZE | - (if (flags.isRead()) @as(u32, w.GENERIC_READ) else 0) | - (if (flags.isWrite()) @as(u32, w.GENERIC_WRITE) else 0), + .{ + .STANDARD = .{ .SYNCHRONIZE = true }, + .GENERIC = .{ + .READ = flags.isRead(), + .WRITE = flags.isWrite(), + }, + }, &attr, &io_status_block, null, - w.FILE_ATTRIBUTE_NORMAL, - w.FILE_SHARE_WRITE | w.FILE_SHARE_READ | w.FILE_SHARE_DELETE, - w.FILE_OPEN, - create_file_flags, + .{ .NORMAL = true }, + .VALID_FLAGS, + .OPEN, + .{ + .IO = if (flags.follow_symlinks) .SYNCHRONOUS_NONALERT else .ASYNCHRONOUS, + .NON_DIRECTORY_FILE = true, + .OPEN_REPARSE_POINT = !flags.follow_symlinks, + }, null, 0, ); @@ -2835,10 +2999,6 @@ pub fn dirOpenDirWindows( ) Io.Dir.OpenError!Io.Dir { const current_thread = Thread.getCurrent(t); const w = windows; - // TODO remove some of these flags if options.access_sub_paths is false - const base_flags = w.STANDARD_RIGHTS_READ | w.FILE_READ_ATTRIBUTES | w.FILE_READ_EA | - w.SYNCHRONIZE | w.FILE_TRAVERSE; - const access_mask: u32 = if (options.iterate) base_flags | w.FILE_LIST_DIRECTORY else base_flags; const path_len_bytes: u16 = @intCast(sub_path_w.len * 2); var nt_name: w.UNICODE_STRING = .{ @@ -2846,28 +3006,43 @@ pub fn dirOpenDirWindows( .MaximumLength = path_len_bytes, .Buffer = @constCast(sub_path_w.ptr), }; - var attr: w.OBJECT_ATTRIBUTES = .{ - .Length = @sizeOf(w.OBJECT_ATTRIBUTES), - .RootDirectory = if (std.fs.path.isAbsoluteWindowsWtf16(sub_path_w)) null else dir.handle, - .Attributes = 0, // Note we do not use OBJ_CASE_INSENSITIVE here. - .ObjectName = &nt_name, - .SecurityDescriptor = null, - .SecurityQualityOfService = null, - }; - const open_reparse_point: w.DWORD = if (!options.follow_symlinks) w.FILE_OPEN_REPARSE_POINT else 0x0; var io_status_block: w.IO_STATUS_BLOCK = undefined; var result: Io.Dir = .{ .handle = undefined }; try current_thread.checkCancel(); const rc = w.ntdll.NtCreateFile( &result.handle, - access_mask, - &attr, + // TODO remove some of these flags if options.access_sub_paths is false + .{ + .SPECIFIC = .{ .FILE_DIRECTORY = .{ + .LIST = options.iterate, + .READ_EA = true, + .TRAVERSE = true, + .READ_ATTRIBUTES = true, + } }, + .STANDARD = .{ + .RIGHTS = .READ, + .SYNCHRONIZE = true, + }, + }, + &.{ + .Length = @sizeOf(w.OBJECT_ATTRIBUTES), + .RootDirectory = if (std.fs.path.isAbsoluteWindowsWtf16(sub_path_w)) null else dir.handle, + .Attributes = .{}, + .ObjectName = &nt_name, + .SecurityDescriptor = null, + .SecurityQualityOfService = null, + }, &io_status_block, null, - w.FILE_ATTRIBUTE_NORMAL, - w.FILE_SHARE_READ | w.FILE_SHARE_WRITE | w.FILE_SHARE_DELETE, - w.FILE_OPEN, - w.FILE_DIRECTORY_FILE | w.FILE_SYNCHRONOUS_IO_NONALERT | w.FILE_OPEN_FOR_BACKUP_INTENT | open_reparse_point, + .{ .NORMAL = true }, + .VALID_FLAGS, + .OPEN, + .{ + .DIRECTORY_FILE = true, + .IO = .SYNCHRONOUS_NONALERT, + .OPEN_FOR_BACKUP_INTENT = true, + .OPEN_REPARSE_POINT = !options.follow_symlinks, + }, null, 0, ); @@ -3580,28 +3755,28 @@ fn sleepPosix(userdata: ?*anyopaque, timeout: Io.Timeout) Io.SleepError!void { fn select(userdata: ?*anyopaque, futures: []const *Io.AnyFuture) Io.Cancelable!usize { const t: *Threaded = @ptrCast(@alignCast(userdata)); - var reset_event: ResetEvent = .unset; + var event: Io.Event = .unset; for (futures, 0..) |future, i| { const closure: *AsyncClosure = @ptrCast(@alignCast(future)); - if (@atomicRmw(?*ResetEvent, &closure.select_condition, .Xchg, &reset_event, .seq_cst) == AsyncClosure.done_reset_event) { + if (@atomicRmw(?*Io.Event, &closure.select_condition, .Xchg, &event, .seq_cst) == AsyncClosure.done_event) { for (futures[0..i]) |cleanup_future| { const cleanup_closure: *AsyncClosure = @ptrCast(@alignCast(cleanup_future)); - if (@atomicRmw(?*ResetEvent, &cleanup_closure.select_condition, .Xchg, null, .seq_cst) == AsyncClosure.done_reset_event) { - cleanup_closure.reset_event.waitUncancelable(); // Ensure no reference to our stack-allocated reset_event. + if (@atomicRmw(?*Io.Event, &cleanup_closure.select_condition, .Xchg, null, .seq_cst) == AsyncClosure.done_event) { + cleanup_closure.event.waitUncancelable(ioBasic(t)); // Ensure no reference to our stack-allocated event. } } return i; } } - try reset_event.wait(t); + try event.wait(ioBasic(t)); var result: ?usize = null; for (futures, 0..) |future, i| { const closure: *AsyncClosure = @ptrCast(@alignCast(future)); - if (@atomicRmw(?*ResetEvent, &closure.select_condition, .Xchg, null, .seq_cst) == AsyncClosure.done_reset_event) { - closure.reset_event.waitUncancelable(); // Ensure no reference to our stack-allocated reset_event. + if (@atomicRmw(?*Io.Event, &closure.select_condition, .Xchg, null, .seq_cst) == AsyncClosure.done_event) { + closure.event.waitUncancelable(ioBasic(t)); // Ensure no reference to our stack-allocated event. if (result == null) result = i; // In case multiple are ready, return first. } } @@ -5620,11 +5795,13 @@ fn netLookup( host_name: HostName, resolved: *Io.Queue(HostName.LookupResult), options: HostName.LookupOptions, -) void { +) net.HostName.LookupError!void { const t: *Threaded = @ptrCast(@alignCast(userdata)); - const current_thread = Thread.getCurrent(t); - const t_io = io(t); - resolved.putOneUncancelable(t_io, .{ .end = netLookupFallible(t, current_thread, host_name, resolved, options) }); + defer resolved.close(io(t)); + netLookupFallible(t, host_name, resolved, options) catch |err| switch (err) { + error.Closed => unreachable, // `resolved` must not be closed until `netLookup` returns + else => |e| return e, + }; } fn netLookupUnavailable( @@ -5632,22 +5809,23 @@ fn netLookupUnavailable( host_name: HostName, resolved: *Io.Queue(HostName.LookupResult), options: HostName.LookupOptions, -) void { +) net.HostName.LookupError!void { _ = host_name; _ = options; const t: *Threaded = @ptrCast(@alignCast(userdata)); - const t_io = ioBasic(t); - resolved.putOneUncancelable(t_io, .{ .end = error.NetworkDown }); + resolved.close(ioBasic(t)); + return error.NetworkDown; } fn netLookupFallible( t: *Threaded, - current_thread: *Thread, host_name: HostName, resolved: *Io.Queue(HostName.LookupResult), options: HostName.LookupOptions, -) !void { +) (net.HostName.LookupError || Io.QueueClosedError)!void { if (!have_networking) return error.NetworkDown; + + const current_thread: *Thread = .getCurrent(t); const t_io = io(t); const name = host_name.bytes; assert(name.len <= HostName.max_len); @@ -6188,7 +6366,7 @@ fn lookupDnsSearch( host_name: HostName, resolved: *Io.Queue(HostName.LookupResult), options: HostName.LookupOptions, -) HostName.LookupError!void { +) (HostName.LookupError || Io.QueueClosedError)!void { const t_io = io(t); const rc = HostName.ResolvConf.init(t_io) catch return error.ResolvConfParseFailed; @@ -6232,7 +6410,7 @@ fn lookupDns( rc: *const HostName.ResolvConf, resolved: *Io.Queue(HostName.LookupResult), options: HostName.LookupOptions, -) HostName.LookupError!void { +) (HostName.LookupError || Io.QueueClosedError)!void { const t_io = io(t); const family_records: [2]struct { af: IpAddress.Family, rr: HostName.DnsRecord } = .{ .{ .af = .ip6, .rr = .A }, @@ -6446,8 +6624,10 @@ fn lookupHosts( return error.DetectingNetworkConfigurationFailed; }, }, - error.Canceled => |e| return e, - error.UnknownHostName => |e| return e, + error.Canceled, + error.Closed, + error.UnknownHostName, + => |e| return e, }; } @@ -6457,7 +6637,7 @@ fn lookupHostsReader( resolved: *Io.Queue(HostName.LookupResult), options: HostName.LookupOptions, reader: *Io.Reader, -) error{ ReadFailed, Canceled, UnknownHostName }!void { +) error{ ReadFailed, Canceled, UnknownHostName, Closed }!void { const t_io = io(t); var addresses_len: usize = 0; var canonical_name: ?HostName = null; @@ -6562,460 +6742,6 @@ fn copyCanon(canonical_name_buffer: *[HostName.max_len]u8, name: []const u8) Hos /// ulock_wait2() uses 64-bit nano-second timeouts (with the same convention) const darwin_supports_ulock_wait2 = builtin.os.version_range.semver.min.major >= 11; -fn futexWait(current_thread: *Thread, ptr: *const std.atomic.Value(u32), expect: u32) Io.Cancelable!void { - @branchHint(.cold); - - if (builtin.cpu.arch.isWasm()) { - comptime assert(builtin.cpu.has(.wasm, .atomics)); - try current_thread.checkCancel(); - const timeout: i64 = -1; - const signed_expect: i32 = @bitCast(expect); - const result = asm volatile ( - \\local.get %[ptr] - \\local.get %[expected] - \\local.get %[timeout] - \\memory.atomic.wait32 0 - \\local.set %[ret] - : [ret] "=r" (-> u32), - : [ptr] "r" (&ptr.raw), - [expected] "r" (signed_expect), - [timeout] "r" (timeout), - ); - switch (result) { - 0 => {}, // ok - 1 => {}, // expected != loaded - 2 => assert(!is_debug), // timeout - else => assert(!is_debug), - } - } else switch (native_os) { - .linux => { - const linux = std.os.linux; - try current_thread.beginSyscall(); - const rc = linux.futex_4arg(ptr, .{ .cmd = .WAIT, .private = true }, expect, null); - current_thread.endSyscall(); - switch (linux.errno(rc)) { - .SUCCESS => {}, // notified by `wake()` - .INTR => {}, // caller's responsibility to retry - .AGAIN => {}, // ptr.* != expect - .INVAL => {}, // possibly timeout overflow - .TIMEDOUT => recoverableOsBugDetected(), - .FAULT => recoverableOsBugDetected(), // ptr was invalid - else => recoverableOsBugDetected(), - } - }, - .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => { - const c = std.c; - const flags: c.UL = .{ - .op = .COMPARE_AND_WAIT, - .NO_ERRNO = true, - }; - try current_thread.beginSyscall(); - const status = if (darwin_supports_ulock_wait2) - c.__ulock_wait2(flags, ptr, expect, 0, 0) - else - c.__ulock_wait(flags, ptr, expect, 0); - current_thread.endSyscall(); - - if (status >= 0) return; - - if (is_debug) switch (@as(c.E, @enumFromInt(-status))) { - .INTR => {}, // spurious wake - // Address of the futex was paged out. This is unlikely, but possible in theory, and - // pthread/libdispatch on darwin bother to handle it. In this case we'll return - // without waiting, but the caller should retry anyway. - .FAULT => {}, - .TIMEDOUT => unreachable, - else => unreachable, - }; - }, - .windows => { - try current_thread.checkCancel(); - switch (windows.ntdll.RtlWaitOnAddress(ptr, &expect, @sizeOf(@TypeOf(expect)), null)) { - .SUCCESS => {}, - .CANCELLED => return error.Canceled, - else => recoverableOsBugDetected(), - } - }, - .freebsd => { - const flags = @intFromEnum(std.c.UMTX_OP.WAIT_UINT_PRIVATE); - try current_thread.beginSyscall(); - const rc = std.c._umtx_op(@intFromPtr(&ptr.raw), flags, @as(c_ulong, expect), 0, 0); - current_thread.endSyscall(); - if (is_debug) switch (posix.errno(rc)) { - .SUCCESS => {}, - .FAULT => unreachable, // one of the args points to invalid memory - .INVAL => unreachable, // arguments should be correct - .TIMEDOUT => unreachable, // no timeout provided - .INTR => {}, // spurious wake - else => unreachable, - }; - }, - else => @compileError("unimplemented: futexWait"), - } -} - -pub fn futexWaitUncancelable(ptr: *const std.atomic.Value(u32), expect: u32) void { - @branchHint(.cold); - - if (builtin.cpu.arch.isWasm()) { - comptime assert(builtin.cpu.has(.wasm, .atomics)); - const timeout: i64 = -1; - const signed_expect: i32 = @bitCast(expect); - const result = asm volatile ( - \\local.get %[ptr] - \\local.get %[expected] - \\local.get %[timeout] - \\memory.atomic.wait32 0 - \\local.set %[ret] - : [ret] "=r" (-> u32), - : [ptr] "r" (&ptr.raw), - [expected] "r" (signed_expect), - [timeout] "r" (timeout), - ); - switch (result) { - 0 => {}, // ok - 1 => {}, // expected != loaded - 2 => recoverableOsBugDetected(), // timeout - else => recoverableOsBugDetected(), - } - } else switch (native_os) { - .linux => { - const linux = std.os.linux; - const rc = linux.futex_4arg(ptr, .{ .cmd = .WAIT, .private = true }, expect, null); - switch (linux.errno(rc)) { - .SUCCESS => {}, // notified by `wake()` - .INTR => {}, // caller's responsibility to repeat - .AGAIN => {}, // ptr.* != expect - .INVAL => {}, // possibly timeout overflow - .TIMEDOUT => recoverableOsBugDetected(), - .FAULT => recoverableOsBugDetected(), // ptr was invalid - else => recoverableOsBugDetected(), - } - }, - .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => { - const c = std.c; - const flags: c.UL = .{ - .op = .COMPARE_AND_WAIT, - .NO_ERRNO = true, - }; - const status = if (darwin_supports_ulock_wait2) - c.__ulock_wait2(flags, ptr, expect, 0, 0) - else - c.__ulock_wait(flags, ptr, expect, 0); - - if (status >= 0) return; - - switch (@as(c.E, @enumFromInt(-status))) { - // Wait was interrupted by the OS or other spurious signalling. - .INTR => {}, - // Address of the futex was paged out. This is unlikely, but possible in theory, and - // pthread/libdispatch on darwin bother to handle it. In this case we'll return - // without waiting, but the caller should retry anyway. - .FAULT => {}, - .TIMEDOUT => recoverableOsBugDetected(), - else => recoverableOsBugDetected(), - } - }, - .windows => { - switch (windows.ntdll.RtlWaitOnAddress(ptr, &expect, @sizeOf(@TypeOf(expect)), null)) { - .SUCCESS, .CANCELLED => {}, - else => recoverableOsBugDetected(), - } - }, - .freebsd => { - const flags = @intFromEnum(std.c.UMTX_OP.WAIT_UINT_PRIVATE); - const rc = std.c._umtx_op(@intFromPtr(&ptr.raw), flags, @as(c_ulong, expect), 0, 0); - switch (posix.errno(rc)) { - .SUCCESS => {}, - .INTR => {}, // spurious wake - .FAULT => recoverableOsBugDetected(), // one of the args points to invalid memory - .INVAL => recoverableOsBugDetected(), // arguments should be correct - .TIMEDOUT => recoverableOsBugDetected(), // no timeout provided - else => recoverableOsBugDetected(), - } - }, - else => @compileError("unimplemented: futexWaitUncancelable"), - } -} - -pub fn futexWake(ptr: *const std.atomic.Value(u32), max_waiters: u32) void { - @branchHint(.cold); - - if (builtin.cpu.arch.isWasm()) { - comptime assert(builtin.cpu.has(.wasm, .atomics)); - assert(max_waiters != 0); - const woken_count = asm volatile ( - \\local.get %[ptr] - \\local.get %[waiters] - \\memory.atomic.notify 0 - \\local.set %[ret] - : [ret] "=r" (-> u32), - : [ptr] "r" (&ptr.raw), - [waiters] "r" (max_waiters), - ); - _ = woken_count; // can be 0 when linker flag 'shared-memory' is not enabled - } else switch (native_os) { - .linux => { - const linux = std.os.linux; - switch (linux.errno(linux.futex_3arg( - &ptr.raw, - .{ .cmd = .WAKE, .private = true }, - @min(max_waiters, std.math.maxInt(i32)), - ))) { - .SUCCESS => return, // successful wake up - .INVAL => return, // invalid futex_wait() on ptr done elsewhere - .FAULT => return, // pointer became invalid while doing the wake - else => return recoverableOsBugDetected(), // deadlock due to operating system bug - } - }, - .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => { - const c = std.c; - const flags: c.UL = .{ - .op = .COMPARE_AND_WAIT, - .NO_ERRNO = true, - .WAKE_ALL = max_waiters > 1, - }; - while (true) { - const status = c.__ulock_wake(flags, ptr, 0); - if (status >= 0) return; - switch (@as(c.E, @enumFromInt(-status))) { - .INTR, .CANCELED => continue, // spurious wake() - .FAULT => unreachable, // __ulock_wake doesn't generate EFAULT according to darwin pthread_cond_t - .NOENT => return, // nothing was woken up - .ALREADY => unreachable, // only for UL.Op.WAKE_THREAD - else => unreachable, // deadlock due to operating system bug - } - } - }, - .windows => { - assert(max_waiters != 0); - switch (max_waiters) { - 1 => windows.ntdll.RtlWakeAddressSingle(ptr), - else => windows.ntdll.RtlWakeAddressAll(ptr), - } - }, - .freebsd => { - const rc = std.c._umtx_op( - @intFromPtr(&ptr.raw), - @intFromEnum(std.c.UMTX_OP.WAKE_PRIVATE), - @as(c_ulong, max_waiters), - 0, // there is no timeout struct - 0, // there is no timeout struct pointer - ); - switch (posix.errno(rc)) { - .SUCCESS => {}, - .FAULT => {}, // it's ok if the ptr doesn't point to valid memory - .INVAL => unreachable, // arguments should be correct - else => unreachable, // deadlock due to operating system bug - } - }, - else => @compileError("unimplemented: futexWake"), - } -} - -/// A thread-safe logical boolean value which can be `set` and `unset`. -/// -/// It can also block threads until the value is set with cancelation via timed -/// waits. Statically initializable; four bytes on all targets. -pub const ResetEvent = switch (native_os) { - .illumos, .netbsd => ResetEventPosix, - else => ResetEventFutex, -}; - -/// A `ResetEvent` implementation based on futexes. -const ResetEventFutex = enum(u32) { - unset = 0, - waiting = 1, - is_set = 2, - - /// Returns whether the logical boolean is `set`. - /// - /// Once `reset` is called, this returns false until the next `set`. - /// - /// The memory accesses before the `set` can be said to happen before - /// `isSet` returns true. - pub fn isSet(ref: *const ResetEventFutex) bool { - if (builtin.single_threaded) return switch (ref.*) { - .unset => false, - .waiting => unreachable, - .is_set => true, - }; - // Acquire barrier ensures memory accesses before `set` happen before - // returning true. - return @atomicLoad(ResetEventFutex, ref, .acquire) == .is_set; - } - - /// Blocks the calling thread until `set` is called. - /// - /// This is effectively a more efficient version of `while (!isSet()) {}`. - /// - /// The memory accesses before the `set` can be said to happen before `wait` returns. - pub fn wait(ref: *ResetEventFutex, t: *Threaded) Io.Cancelable!void { - if (builtin.single_threaded) switch (ref.*) { - .unset => unreachable, // Deadlock, no other threads to wake us up. - .waiting => unreachable, // Invalid state. - .is_set => return, - }; - // Try to set the state from `unset` to `waiting` to indicate to the - // `set` thread that others are blocked on the ResetEventFutex. Avoid using - // any strict barriers until we know the ResetEventFutex is set. - var state = @atomicLoad(ResetEventFutex, ref, .acquire); - if (state == .is_set) { - @branchHint(.likely); - return; - } - if (state == .unset) { - state = @cmpxchgStrong(ResetEventFutex, ref, state, .waiting, .acquire, .acquire) orelse .waiting; - } - const current_thread = Thread.getCurrent(t); - while (state == .waiting) { - try futexWait(current_thread, @ptrCast(ref), @intFromEnum(ResetEventFutex.waiting)); - state = @atomicLoad(ResetEventFutex, ref, .acquire); - } - assert(state == .is_set); - } - - /// Same as `wait` except uninterruptible. - pub fn waitUncancelable(ref: *ResetEventFutex) void { - if (builtin.single_threaded) switch (ref.*) { - .unset => unreachable, // Deadlock, no other threads to wake us up. - .waiting => unreachable, // Invalid state. - .is_set => return, - }; - // Try to set the state from `unset` to `waiting` to indicate to the - // `set` thread that others are blocked on the ResetEventFutex. Avoid using - // any strict barriers until we know the ResetEventFutex is set. - var state = @atomicLoad(ResetEventFutex, ref, .acquire); - if (state == .is_set) { - @branchHint(.likely); - return; - } - if (state == .unset) { - state = @cmpxchgStrong(ResetEventFutex, ref, state, .waiting, .acquire, .acquire) orelse .waiting; - } - while (state == .waiting) { - futexWaitUncancelable(@ptrCast(ref), @intFromEnum(ResetEventFutex.waiting)); - state = @atomicLoad(ResetEventFutex, ref, .acquire); - } - assert(state == .is_set); - } - - /// Marks the logical boolean as `set` and unblocks any threads in `wait` - /// or `timedWait` to observe the new state. - /// - /// The logical boolean stays `set` until `reset` is called, making future - /// `set` calls do nothing semantically. - /// - /// The memory accesses before `set` can be said to happen before `isSet` - /// returns true or `wait`/`timedWait` return successfully. - pub fn set(ref: *ResetEventFutex) void { - if (builtin.single_threaded) { - ref.* = .is_set; - return; - } - if (@atomicRmw(ResetEventFutex, ref, .Xchg, .is_set, .release) == .waiting) { - futexWake(@ptrCast(ref), std.math.maxInt(u32)); - } - } - - /// Unmarks the ResetEventFutex as if `set` was never called. - /// - /// Assumes no threads are blocked in `wait` or `timedWait`. Concurrent - /// calls to `set`, `isSet` and `reset` are allowed. - pub fn reset(ref: *ResetEventFutex) void { - if (builtin.single_threaded) { - ref.* = .unset; - return; - } - @atomicStore(ResetEventFutex, ref, .unset, .monotonic); - } -}; - -/// A `ResetEvent` implementation based on pthreads API. -const ResetEventPosix = struct { - cond: std.c.pthread_cond_t, - mutex: std.c.pthread_mutex_t, - state: ResetEventFutex, - - pub const unset: ResetEventPosix = .{ - .cond = std.c.PTHREAD_COND_INITIALIZER, - .mutex = std.c.PTHREAD_MUTEX_INITIALIZER, - .state = .unset, - }; - - pub fn isSet(rep: *const ResetEventPosix) bool { - if (builtin.single_threaded) return switch (rep.state) { - .unset => false, - .waiting => unreachable, - .is_set => true, - }; - return @atomicLoad(ResetEventFutex, &rep.state, .acquire) == .is_set; - } - - pub fn wait(rep: *ResetEventPosix, t: *Threaded) Io.Cancelable!void { - if (builtin.single_threaded) switch (rep.*) { - .unset => unreachable, // Deadlock, no other threads to wake us up. - .waiting => unreachable, // Invalid state. - .is_set => return, - }; - const current_thread = Thread.getCurrent(t); - assert(std.c.pthread_mutex_lock(&rep.mutex) == .SUCCESS); - defer assert(std.c.pthread_mutex_unlock(&rep.mutex) == .SUCCESS); - sw: switch (rep.state) { - .unset => { - rep.state = .waiting; - continue :sw .waiting; - }, - .waiting => { - try current_thread.beginSyscall(); - assert(std.c.pthread_cond_wait(&rep.cond, &rep.mutex) == .SUCCESS); - current_thread.endSyscall(); - continue :sw rep.state; - }, - .is_set => return, - } - } - - pub fn waitUncancelable(rep: *ResetEventPosix) void { - if (builtin.single_threaded) switch (rep.*) { - .unset => unreachable, // Deadlock, no other threads to wake us up. - .waiting => unreachable, // Invalid state. - .is_set => return, - }; - assert(std.c.pthread_mutex_lock(&rep.mutex) == .SUCCESS); - defer assert(std.c.pthread_mutex_unlock(&rep.mutex) == .SUCCESS); - sw: switch (rep.state) { - .unset => { - rep.state = .waiting; - continue :sw .waiting; - }, - .waiting => { - assert(std.c.pthread_cond_wait(&rep.cond, &rep.mutex) == .SUCCESS); - continue :sw rep.state; - }, - .is_set => return, - } - } - - pub fn set(rep: *ResetEventPosix) void { - if (builtin.single_threaded) { - rep.* = .is_set; - return; - } - if (@atomicRmw(ResetEventFutex, &rep.state, .Xchg, .is_set, .release) == .waiting) { - assert(std.c.pthread_cond_broadcast(&rep.cond) == .SUCCESS); - } - } - - pub fn reset(rep: *ResetEventPosix) void { - if (builtin.single_threaded) { - rep.* = .unset; - return; - } - @atomicStore(ResetEventFutex, &rep.state, .unset, .monotonic); - } -}; - fn closeSocketWindows(s: ws2_32.SOCKET) void { const rc = ws2_32.closesocket(s); if (is_debug) switch (rc) { diff --git a/lib/std/Io/Threaded/test.zig b/lib/std/Io/Threaded/test.zig index 16afae7b63..9c54c3af1f 100644 --- a/lib/std/Io/Threaded/test.zig +++ b/lib/std/Io/Threaded/test.zig @@ -6,6 +6,11 @@ const testing = std.testing; const assert = std.debug.assert; test "concurrent vs main prevents deadlock via oversubscription" { + if (true) { + // https://codeberg.org/ziglang/zig/issues/30141 + return error.SkipZigTest; + } + var threaded: Io.Threaded = .init(std.testing.allocator); defer threaded.deinit(); const io = threaded.io(); @@ -34,6 +39,11 @@ fn get(io: Io, queue: *Io.Queue(u8)) void { } test "concurrent vs concurrent prevents deadlock via oversubscription" { + if (true) { + // https://codeberg.org/ziglang/zig/issues/30141 + return error.SkipZigTest; + } + var threaded: Io.Threaded = .init(std.testing.allocator); defer threaded.deinit(); const io = threaded.io(); diff --git a/lib/std/Io/Writer.zig b/lib/std/Io/Writer.zig index 1b80b5b4f7..f49ef8eb67 100644 --- a/lib/std/Io/Writer.zig +++ b/lib/std/Io/Writer.zig @@ -1211,10 +1211,6 @@ pub fn printValue( } const is_any = comptime std.mem.eql(u8, fmt, ANY); - if (!is_any and std.meta.hasMethod(T, "format") and fmt.len == 0) { - // after 0.15.0 is tagged, delete this compile error and its condition - @compileError("ambiguous format string; specify {f} to call format method, or {any} to skip it"); - } switch (@typeInfo(T)) { .float, .comptime_float => { @@ -1702,7 +1698,7 @@ pub fn printFloatHex(w: *Writer, value: anytype, case: std.fmt.Case, opt_precisi try w.writeAll("0x"); try w.writeByte(buf[0]); - const trimmed = std.mem.trimRight(u8, buf[1..], "0"); + const trimmed = std.mem.trimEnd(u8, buf[1..], "0"); if (opt_precision) |precision| { if (precision > 0) try w.writeAll("."); } else if (trimmed.len > 0) { diff --git a/lib/std/Io/net/HostName.zig b/lib/std/Io/net/HostName.zig index e2638abfaa..628a97d1f8 100644 --- a/lib/std/Io/net/HostName.zig +++ b/lib/std/Io/net/HostName.zig @@ -82,19 +82,22 @@ pub const LookupError = error{ pub const LookupResult = union(enum) { address: IpAddress, canonical_name: HostName, - end: LookupError!void, }; -/// Adds any number of `IpAddress` into resolved, exactly one canonical_name, -/// and then always finishes by adding one `LookupResult.end` entry. +/// Adds any number of `LookupResult.address` into `resolved`, and exactly one +/// `LookupResult.canonical_name`. /// /// Guaranteed not to block if provided queue has capacity at least 16. +/// +/// Closes `resolved` before return, even on error. +/// +/// Asserts `resolved` is not closed until this call returns. pub fn lookup( host_name: HostName, io: Io, resolved: *Io.Queue(LookupResult), options: LookupOptions, -) void { +) LookupError!void { return io.vtable.netLookup(io.userdata, host_name, resolved, options); } @@ -211,23 +214,25 @@ pub fn connect( port: u16, options: IpAddress.ConnectOptions, ) ConnectError!Stream { - var connect_many_buffer: [32]ConnectManyResult = undefined; - var connect_many_queue: Io.Queue(ConnectManyResult) = .init(&connect_many_buffer); + var connect_many_buffer: [32]IpAddress.ConnectError!Stream = undefined; + var connect_many_queue: Io.Queue(IpAddress.ConnectError!Stream) = .init(&connect_many_buffer); var connect_many = io.async(connectMany, .{ host_name, io, port, &connect_many_queue, options }); - var saw_end = false; defer { - connect_many.cancel(io); - if (!saw_end) while (true) switch (connect_many_queue.getOneUncancelable(io)) { - .connection => |loser| if (loser) |s| s.close(io) else |_| continue, - .end => break, - }; + connect_many.cancel(io) catch {}; + while (connect_many_queue.getOneUncancelable(io)) |loser| { + if (loser) |s| s.close(io) else |_| {} + } else |err| switch (err) { + error.Closed => {}, + } } - var aggregate_error: ConnectError = error.UnknownHostName; + var ip_connect_error: ?IpAddress.ConnectError = null; - while (connect_many_queue.getOne(io)) |result| switch (result) { - .connection => |connection| if (connection) |stream| return stream else |err| switch (err) { + while (connect_many_queue.getOne(io)) |result| { + if (result) |stream| { + return stream; + } else |err| switch (err) { error.SystemResources, error.OptionUnsupported, error.ProcessFdQuotaExceeded, @@ -237,66 +242,80 @@ pub fn connect( error.WouldBlock => return error.Unexpected, - else => |e| aggregate_error = e, - }, - .end => |end| { - saw_end = true; - try end; - return aggregate_error; - }, + else => |e| ip_connect_error = e, + } } else |err| switch (err) { error.Canceled => |e| return e, + error.Closed => { + // There was no successful connection attempt. If there was a lookup error, return that. + try connect_many.await(io); + // Otherwise, return the error from a failed IP connection attempt. + return ip_connect_error orelse + return error.UnknownHostName; + }, } } -pub const ConnectManyResult = union(enum) { - connection: IpAddress.ConnectError!Stream, - end: ConnectError!void, -}; - /// Asynchronously establishes a connection to all IP addresses associated with /// a host name, adding them to a results queue upon completion. +/// +/// Closes `results` before return, even on error. +/// +/// Asserts `results` is not closed until this call returns. pub fn connectMany( host_name: HostName, io: Io, port: u16, - results: *Io.Queue(ConnectManyResult), + results: *Io.Queue(IpAddress.ConnectError!Stream), options: IpAddress.ConnectOptions, -) void { +) LookupError!void { + defer results.close(io); + var canonical_name_buffer: [max_len]u8 = undefined; var lookup_buffer: [32]HostName.LookupResult = undefined; var lookup_queue: Io.Queue(LookupResult) = .init(&lookup_buffer); - var group: Io.Group = .init; - defer group.cancel(io); - - group.async(io, lookup, .{ host_name, io, &lookup_queue, .{ + var lookup_future = io.async(lookup, .{ host_name, io, &lookup_queue, .{ .port = port, .canonical_name_buffer = &canonical_name_buffer, } }); + defer lookup_future.cancel(io) catch {}; + + var group: Io.Group = .init; + defer group.cancel(io); while (lookup_queue.getOne(io)) |dns_result| switch (dns_result) { .address => |address| group.async(io, enqueueConnection, .{ address, io, results, options }), .canonical_name => continue, - .end => |lookup_result| { - group.wait(io); - results.putOneUncancelable(io, .{ .end = lookup_result }); - return; - }, } else |err| switch (err) { - error.Canceled => |e| { - group.cancel(io); - results.putOneUncancelable(io, .{ .end = e }); + error.Canceled => |e| return e, + error.Closed => { + group.wait(io); + return lookup_future.await(io); }, } } - fn enqueueConnection( address: IpAddress, io: Io, - queue: *Io.Queue(ConnectManyResult), + queue: *Io.Queue(IpAddress.ConnectError!Stream), options: IpAddress.ConnectOptions, ) void { - queue.putOneUncancelable(io, .{ .connection = address.connect(io, options) }); + enqueueConnectionFallible(address, io, queue, options) catch |err| switch (err) { + error.Canceled => {}, + }; +} +fn enqueueConnectionFallible( + address: IpAddress, + io: Io, + queue: *Io.Queue(IpAddress.ConnectError!Stream), + options: IpAddress.ConnectOptions, +) Io.Cancelable!void { + const result = address.connect(io, options); + errdefer if (result) |s| s.close(io) else |_| {}; + queue.putOne(io, result) catch |err| switch (err) { + error.Closed => unreachable, // `queue` must not be closed + error.Canceled => |e| return e, + }; } pub const ResolvConf = struct { diff --git a/lib/std/Io/net/test.zig b/lib/std/Io/net/test.zig index 2a7a151b5d..e234a9edde 100644 --- a/lib/std/Io/net/test.zig +++ b/lib/std/Io/net/test.zig @@ -129,7 +129,7 @@ test "resolve DNS" { var results_buffer: [32]net.HostName.LookupResult = undefined; var results: Io.Queue(net.HostName.LookupResult) = .init(&results_buffer); - net.HostName.lookup(try .init("localhost"), io, &results, .{ + try net.HostName.lookup(try .init("localhost"), io, &results, .{ .port = 80, .canonical_name_buffer = &canonical_name_buffer, }); @@ -142,11 +142,10 @@ test "resolve DNS" { addresses_found += 1; }, .canonical_name => |canonical_name| try testing.expectEqualStrings("localhost", canonical_name.bytes), - .end => |end| { - try end; - break; - }, - } else |err| return err; + } else |err| switch (err) { + error.Closed => {}, + error.Canceled => |e| return e, + } try testing.expect(addresses_found != 0); } @@ -161,20 +160,19 @@ test "resolve DNS" { net.HostName.lookup(try .init("example.com"), io, &results, .{ .port = 80, .canonical_name_buffer = &canonical_name_buffer, - }); + }) catch |err| switch (err) { + error.UnknownHostName => return error.SkipZigTest, + error.NameServerFailure => return error.SkipZigTest, + else => |e| return e, + }; while (results.getOne(io)) |result| switch (result) { .address => {}, .canonical_name => {}, - .end => |end| { - end catch |err| switch (err) { - error.UnknownHostName => return error.SkipZigTest, - error.NameServerFailure => return error.SkipZigTest, - else => return err, - }; - break; - }, - } else |err| return err; + } else |err| switch (err) { + error.Closed => {}, + error.Canceled => |e| return e, + } } } diff --git a/lib/std/Io/test.zig b/lib/std/Io/test.zig index 0ac6c333c9..f7965ed14e 100644 --- a/lib/std/Io/test.zig +++ b/lib/std/Io/test.zig @@ -209,10 +209,10 @@ test "select" { return; }, }; - defer if (get_a.cancel(io)) |_| {} else |_| @panic("fail"); + defer _ = get_a.cancel(io) catch {}; var get_b = try io.concurrent(Io.Queue(u8).getOne, .{ &queue, io }); - defer if (get_b.cancel(io)) |_| {} else |_| @panic("fail"); + defer _ = get_b.cancel(io) catch {}; var timeout = io.async(Io.sleep, .{ io, .fromMilliseconds(1), .awake }); defer timeout.cancel(io) catch {}; @@ -225,12 +225,9 @@ test "select" { .get_a => return error.TestFailure, .get_b => return error.TestFailure, .timeout => { - // Unblock the queues to avoid making this unit test depend on - // cancellation. - queue.putOneUncancelable(io, 1); - queue.putOneUncancelable(io, 1); - try testing.expectEqual(1, try get_a.await(io)); - try testing.expectEqual(1, try get_b.await(io)); + queue.close(io); + try testing.expectError(error.Closed, get_a.await(io)); + try testing.expectError(error.Closed, get_b.await(io)); }, } } @@ -255,3 +252,162 @@ test "Queue" { try testQueue(4); try testQueue(5); } + +test "Queue.close single-threaded" { + const io = std.testing.io; + + var buf: [10]u8 = undefined; + var queue: Io.Queue(u8) = .init(&buf); + + try queue.putAll(io, &.{ 0, 1, 2, 3, 4, 5, 6 }); + try expectEqual(3, try queue.put(io, &.{ 7, 8, 9, 10 }, 0)); // there is capacity for 3 more items + + var get_buf: [4]u8 = undefined; + + // Receive some elements before closing + try expectEqual(4, try queue.get(io, &get_buf, 0)); + try expectEqual(0, get_buf[0]); + try expectEqual(1, get_buf[1]); + try expectEqual(2, get_buf[2]); + try expectEqual(3, get_buf[3]); + try expectEqual(4, try queue.getOne(io)); + + // ...and add a couple more now there's space + try queue.putAll(io, &.{ 20, 21 }); + + queue.close(io); + + // Receive more elements *after* closing + try expectEqual(4, try queue.get(io, &get_buf, 0)); + try expectEqual(5, get_buf[0]); + try expectEqual(6, get_buf[1]); + try expectEqual(7, get_buf[2]); + try expectEqual(8, get_buf[3]); + try expectEqual(9, try queue.getOne(io)); + + // Cannot put anything while closed, even if the buffer has space + try expectError(error.Closed, queue.putOne(io, 100)); + try expectError(error.Closed, queue.putAll(io, &.{ 101, 102 })); + try expectError(error.Closed, queue.putUncancelable(io, &.{ 103, 104 }, 0)); + + // Even if we ask for 3 items, the queue is closed, so we only get the last 2 + try expectEqual(2, try queue.get(io, &get_buf, 4)); + try expectEqual(20, get_buf[0]); + try expectEqual(21, get_buf[1]); + + // The queue is now empty, so `get` should return `error.Closed` too + try expectError(error.Closed, queue.getOne(io)); + try expectError(error.Closed, queue.get(io, &get_buf, 0)); + try expectError(error.Closed, queue.putUncancelable(io, &get_buf, 2)); +} + +test "Event" { + const global = struct { + fn waitAndRead(io: Io, event: *Io.Event, ptr: *const u32) Io.Cancelable!u32 { + try event.wait(io); + return ptr.*; + } + }; + + const io = std.testing.io; + + var event: Io.Event = .unset; + var buffer: u32 = undefined; + + { + var future = io.concurrent(global.waitAndRead, .{ io, &event, &buffer }) catch |err| switch (err) { + error.ConcurrencyUnavailable => return error.SkipZigTest, + }; + + buffer = 123; + event.set(io); + + const result = try future.await(io); + + try std.testing.expectEqual(123, result); + } + + event.reset(); + + { + var future = io.concurrent(global.waitAndRead, .{ io, &event, &buffer }) catch |err| switch (err) { + error.ConcurrencyUnavailable => return error.SkipZigTest, + }; + try std.testing.expectError(error.Canceled, future.cancel(io)); + } +} + +test "recancel" { + const global = struct { + fn worker(io: Io) Io.Cancelable!void { + var dummy_event: Io.Event = .unset; + + if (dummy_event.wait(io)) { + return; + } else |err| switch (err) { + error.Canceled => io.recancel(), + } + + // Now we expect to see `error.Canceled` again. + return dummy_event.wait(io); + } + }; + + const io = std.testing.io; + var future = io.concurrent(global.worker, .{io}) catch |err| switch (err) { + error.ConcurrencyUnavailable => return error.SkipZigTest, + }; + if (future.cancel(io)) { + return error.UnexpectedSuccess; // both `wait` calls should have returned `error.Canceled` + } else |err| switch (err) { + error.Canceled => {}, + } +} + +test "swapCancelProtection" { + const global = struct { + fn waitTwice( + io: Io, + event: *Io.Event, + ) error{ Canceled, CanceledWhileProtected }!void { + // Wait for `event` while protected from cancelation. + { + const old_prot = io.swapCancelProtection(.blocked); + defer _ = io.swapCancelProtection(old_prot); + event.wait(io) catch |err| switch (err) { + error.Canceled => return error.CanceledWhileProtected, + }; + } + // Reset the event (it will never be set again), and this time wait for it without protection. + event.reset(); + _ = try event.wait(io); + } + fn sleepThenSet(io: Io, event: *Io.Event) !void { + // Give `waitTwice` a chance to get canceled. + try io.sleep(.fromMilliseconds(200), .awake); + event.set(io); + } + }; + + const io = std.testing.io; + + var event: Io.Event = .unset; + + var wait_future = io.concurrent(global.waitTwice, .{ io, &event }) catch |err| switch (err) { + error.ConcurrencyUnavailable => return error.SkipZigTest, + }; + defer wait_future.cancel(io) catch {}; + + var set_future = try io.concurrent(global.sleepThenSet, .{ io, &event }); + defer set_future.cancel(io) catch {}; + + if (wait_future.cancel(io)) { + return error.UnexpectedSuccess; // there was no `set` call to unblock the second `wait` + } else |err| switch (err) { + error.Canceled => {}, + error.CanceledWhileProtected => |e| return e, + } + + // Because it reached the `set`, it should be too late for `sleepThenSet` to see `error.Canceled`. + try set_future.cancel(io); +} diff --git a/lib/std/Io/tty.zig b/lib/std/Io/tty.zig index 4f5ec8fd2d..3e2bb0969d 100644 --- a/lib/std/Io/tty.zig +++ b/lib/std/Io/tty.zig @@ -5,11 +5,6 @@ const process = std.process; const windows = std.os.windows; const native_os = builtin.os.tag; -/// Deprecated in favor of `Config.detect`. -pub fn detectConfig(file: File) Config { - return .detect(file); -} - pub const Color = enum { black, red, diff --git a/lib/std/Target.zig b/lib/std/Target.zig index b37902d228..29d5a66489 100644 --- a/lib/std/Target.zig +++ b/lib/std/Target.zig @@ -510,7 +510,7 @@ pub const Os = struct { break :blk default_min; }, - .max = .{ .major = 14, .minor = 3, .patch = 0 }, + .max = .{ .major = 15, .minor = 0, .patch = 0 }, }, }, .netbsd => .{ @@ -865,7 +865,6 @@ pub const Abi = enum { }, .freebsd => switch (arch) { .arm, - .powerpc, => .eabihf, else => .none, }, @@ -2724,7 +2723,6 @@ pub const DynamicLinker = struct { .powerpc64, .riscv64, .sh, - .sheb, .sparc64, .x86, .x86_64, diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index 93563bb245..f0fddd7ed0 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -226,7 +226,7 @@ pub fn setName(self: Thread, name: []const u8) SetNameError!void { switch (windows.ntdll.NtSetInformationThread( self.getHandle(), - .ThreadNameInformation, + .NameInformation, &unicode_string, @sizeOf(windows.UNICODE_STRING), )) { @@ -338,7 +338,7 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co switch (windows.ntdll.NtQueryInformationThread( self.getHandle(), - .ThreadNameInformation, + .NameInformation, &buf, buf_capacity, null, @@ -521,12 +521,10 @@ pub const YieldError = error{ /// Yields the current thread potentially allowing other threads to run. pub fn yield() YieldError!void { - if (native_os == .windows) { - // The return value has to do with how many other threads there are; it is not - // an error condition on Windows. - _ = windows.kernel32.SwitchToThread(); - return; - } + if (native_os == .windows) switch (windows.ntdll.NtYieldExecution()) { + .SUCCESS, .NO_YIELD_PERFORMED => return, + else => return error.SystemCannotYield, + }; switch (posix.errno(posix.system.sched_yield())) { .SUCCESS => return, .NOSYS => return error.SystemCannotYield, @@ -647,11 +645,11 @@ const WindowsThreadImpl = struct { const ThreadCompletion = struct { completion: Completion, heap_ptr: windows.PVOID, - heap_handle: windows.HANDLE, + heap_handle: *windows.HEAP, thread_handle: windows.HANDLE = undefined, fn free(self: ThreadCompletion) void { - const status = windows.kernel32.HeapFree(self.heap_handle, 0, self.heap_ptr); + const status = windows.ntdll.RtlFreeHeap(self.heap_handle, .{}, self.heap_ptr); assert(status != 0); } }; @@ -673,10 +671,10 @@ const WindowsThreadImpl = struct { } }; - const heap_handle = windows.kernel32.GetProcessHeap() orelse return error.OutOfMemory; + const heap_handle = windows.GetProcessHeap() orelse return error.OutOfMemory; const alloc_bytes = @alignOf(Instance) + @sizeOf(Instance); - const alloc_ptr = windows.ntdll.RtlAllocateHeap(heap_handle, 0, alloc_bytes) orelse return error.OutOfMemory; - errdefer assert(windows.kernel32.HeapFree(heap_handle, 0, alloc_ptr) != 0); + const alloc_ptr = windows.ntdll.RtlAllocateHeap(heap_handle, .{}, alloc_bytes) orelse return error.OutOfMemory; + errdefer assert(windows.ntdll.RtlFreeHeap(heap_handle, .{}, alloc_ptr) != 0); const instance_bytes = @as([*]u8, @ptrCast(alloc_ptr))[0..alloc_bytes]; var fba = std.heap.FixedBufferAllocator.init(instance_bytes); @@ -693,8 +691,7 @@ const WindowsThreadImpl = struct { // Windows appears to only support SYSTEM_INFO.dwAllocationGranularity minimum stack size. // Going lower makes it default to that specified in the executable (~1mb). // Its also fine if the limit here is incorrect as stack size is only a hint. - var stack_size = std.math.cast(u32, config.stack_size) orelse std.math.maxInt(u32); - stack_size = @max(64 * 1024, stack_size); + const stack_size = @max(64 * 1024, std.math.lossyCast(u32, config.stack_size)); instance.thread.thread_handle = windows.kernel32.CreateThread( null, diff --git a/lib/std/c.zig b/lib/std/c.zig index f3fdeb6d95..656bdc35bb 100644 --- a/lib/std/c.zig +++ b/lib/std/c.zig @@ -4682,14 +4682,6 @@ pub const siginfo_t = switch (native_os) { }, __pad: [128 - 3 * @sizeOf(c_int)]u8, }, - - comptime { - if (@sizeOf(usize) == 4) - assert(@sizeOf(@This()) == 128) - else - // Take into account the padding between errno and data fields. - assert(@sizeOf(@This()) == 136); - } }, // https://github.com/SerenityOS/serenity/blob/ec492a1a0819e6239ea44156825c4ee7234ca3db/Kernel/API/POSIX/signal.h#L27-L37 .serenity => extern struct { @@ -7586,166 +7578,6 @@ pub const EAI = if (builtin.abi.isAndroid()) enum(c_int) { pub const dl_iterate_phdr_callback = *const fn (info: *dl_phdr_info, size: usize, data: ?*anyopaque) callconv(.c) c_int; pub const Stat = switch (native_os) { - .linux => switch (native_arch) { - .sparc64 => extern struct { - dev: u64, - __pad1: u16, - ino: ino_t, - mode: u32, - nlink: u32, - - uid: u32, - gid: u32, - rdev: u64, - __pad2: u16, - - size: off_t, - blksize: isize, - blocks: i64, - - atim: timespec, - mtim: timespec, - ctim: timespec, - __reserved: [2]usize, - - pub fn atime(self: @This()) timespec { - return self.atim; - } - - pub fn mtime(self: @This()) timespec { - return self.mtim; - } - - pub fn ctime(self: @This()) timespec { - return self.ctim; - } - }, - .mips, .mipsel => if (builtin.target.abi.isMusl()) extern struct { - dev: dev_t, - __pad0: [2]i32, - ino: ino_t, - mode: mode_t, - nlink: nlink_t, - uid: uid_t, - gid: gid_t, - rdev: dev_t, - __pad1: [2]i32, - size: off_t, - atim: timespec, - mtim: timespec, - ctim: timespec, - blksize: blksize_t, - __pad3: i32, - blocks: blkcnt_t, - __pad4: [14]i32, - - pub fn atime(self: @This()) timespec { - return self.atim; - } - - pub fn mtime(self: @This()) timespec { - return self.mtim; - } - - pub fn ctime(self: @This()) timespec { - return self.ctim; - } - } else extern struct { - dev: u32, - __pad0: [3]u32, - ino: ino_t, - mode: mode_t, - nlink: nlink_t, - uid: uid_t, - gid: gid_t, - rdev: u32, - __pad1: [3]u32, - size: off_t, - atim: timespec, - mtim: timespec, - ctim: timespec, - blksize: blksize_t, - __pad3: u32, - blocks: blkcnt_t, - __pad4: [14]u32, - - pub fn atime(self: @This()) timespec { - return self.atim; - } - - pub fn mtime(self: @This()) timespec { - return self.mtim; - } - - pub fn ctime(self: @This()) timespec { - return self.ctim; - } - }, - .mips64, .mips64el => if (builtin.target.abi.isMusl()) extern struct { - dev: dev_t, - __pad0: [3]i32, - ino: ino_t, - mode: mode_t, - nlink: nlink_t, - uid: uid_t, - gid: gid_t, - rdev: dev_t, - __pad1: [2]u32, - size: off_t, - __pad2: i32, - atim: timespec, - mtim: timespec, - ctim: timespec, - blksize: blksize_t, - __pad3: u32, - blocks: blkcnt_t, - __pad4: [14]i32, - - pub fn atime(self: @This()) timespec { - return self.atim; - } - - pub fn mtime(self: @This()) timespec { - return self.mtim; - } - - pub fn ctime(self: @This()) timespec { - return self.ctim; - } - } else extern struct { - dev: dev_t, - __pad0: [3]u32, - ino: ino_t, - mode: mode_t, - nlink: nlink_t, - uid: uid_t, - gid: gid_t, - rdev: dev_t, - __pad1: [3]u32, - size: off_t, - atim: timespec, - mtim: timespec, - ctim: timespec, - blksize: blksize_t, - __pad3: u32, - blocks: blkcnt_t, - __pad4: [14]i32, - - pub fn atime(self: @This()) timespec { - return self.atim; - } - - pub fn mtime(self: @This()) timespec { - return self.mtim; - } - - pub fn ctime(self: @This()) timespec { - return self.ctim; - } - }, - - else => std.os.linux.Stat, // libc stat is the same as kernel stat. - }, .emscripten => emscripten.Stat, .wasi => extern struct { // Match wasi-libc's `struct stat` in lib/libc/include/wasm-wasi-musl/__struct_stat.h @@ -10422,6 +10254,7 @@ pub const fstat = switch (native_os) { else => private.fstat, }, .netbsd => private.__fstat50, + .linux => {}, else => private.fstat, }; @@ -10430,8 +10263,12 @@ pub const fstatat = switch (native_os) { .x86_64 => private.@"fstatat$INODE64", else => private.fstatat, }, + .linux => {}, else => private.fstatat, }; + +pub extern "c" fn statx(dirfd: fd_t, path: [*:0]const u8, flags: u32, mask: linux.STATX, buf: *linux.Statx) c_int; + pub extern "c" fn getpwent() ?*passwd; pub extern "c" fn endpwent() void; pub extern "c" fn setpwent() void; @@ -10505,8 +10342,6 @@ pub extern "c" fn inotify_init1(flags: c_uint) c_int; pub extern "c" fn inotify_add_watch(fd: fd_t, pathname: [*:0]const u8, mask: u32) c_int; pub extern "c" fn inotify_rm_watch(fd: fd_t, wd: c_int) c_int; -pub extern "c" fn fstat64(fd: fd_t, buf: *Stat) c_int; -pub extern "c" fn fstatat64(dirfd: fd_t, noalias path: [*:0]const u8, noalias stat_buf: *Stat, flags: u32) c_int; pub extern "c" fn fallocate64(fd: fd_t, mode: c_int, offset: off_t, len: off_t) c_int; pub extern "c" fn fopen64(noalias filename: [*:0]const u8, noalias modes: [*:0]const u8) ?*FILE; pub extern "c" fn ftruncate64(fd: c_int, length: off_t) c_int; @@ -10633,24 +10468,36 @@ pub const sigaction = switch (native_os) { }; /// Zig's version of SIGRTMIN. Actually a function. -pub fn sigrtmin() u8 { - return switch (native_os) { - .freebsd => 65, - .netbsd => 33, - .illumos => @truncate(sysconf(@intFromEnum(_SC.SIGRT_MIN))), - else => @truncate(@as(c_uint, @bitCast(private.__libc_current_sigrtmin()))), - }; -} +pub const sigrtmin = switch (native_os) { + .openbsd => {}, + else => sigrt_private.sigrtmin, +}; /// Zig's version of SIGRTMAX. Actually a function. -pub fn sigrtmax() u8 { - return switch (native_os) { - .freebsd => 126, - .netbsd => 63, - .illumos => @truncate(sysconf(@intFromEnum(_SC.SIGRT_MAX))), - else => @truncate(@as(c_uint, @bitCast(private.__libc_current_sigrtmax()))), - }; -} +pub const sigrtmax = switch (native_os) { + .openbsd => {}, + else => sigrt_private.sigrtmax, +}; + +const sigrt_private = struct { + pub fn sigrtmin() u8 { + return switch (native_os) { + .freebsd => 65, + .netbsd => 33, + .illumos => @truncate(sysconf(@intFromEnum(_SC.SIGRT_MIN))), + else => @truncate(@as(c_uint, @bitCast(private.__libc_current_sigrtmin()))), + }; + } + + pub fn sigrtmax() u8 { + return switch (native_os) { + .freebsd => 126, + .netbsd => 63, + .illumos => @truncate(sysconf(@intFromEnum(_SC.SIGRT_MAX))), + else => @truncate(@as(c_uint, @bitCast(private.__libc_current_sigrtmax()))), + }; + } +}; pub const sigfillset = switch (native_os) { .netbsd => private.__sigfillset14, @@ -11324,6 +11171,8 @@ pub const _dyld_get_image_header = darwin._dyld_get_image_header; pub const _dyld_get_image_name = darwin._dyld_get_image_name; pub const _dyld_get_image_vmaddr_slide = darwin._dyld_get_image_vmaddr_slide; pub const _dyld_image_count = darwin._dyld_image_count; +pub const _dyld_get_image_header_containing_address = darwin._dyld_get_image_header_containing_address; +pub const dyld_image_path_containing_address = darwin.dyld_image_path_containing_address; pub const _host_page_size = darwin._host_page_size; pub const boolean_t = darwin.boolean_t; pub const clock_get_time = darwin.clock_get_time; @@ -11550,7 +11399,6 @@ const private = struct { extern "c" fn sigprocmask(how: c_int, noalias set: ?*const sigset_t, noalias oset: ?*sigset_t) c_int; extern "c" fn socket(domain: c_uint, sock_type: c_uint, protocol: c_uint) c_int; extern "c" fn socketpair(domain: c_uint, sock_type: c_uint, protocol: c_uint, sv: *[2]fd_t) c_int; - extern "c" fn stat(noalias path: [*:0]const u8, noalias buf: *Stat) c_int; extern "c" fn sigaltstack(ss: ?*stack_t, old_ss: ?*stack_t) c_int; extern "c" fn sysconf(sc: c_int) c_long; extern "c" fn shm_open(name: [*:0]const u8, flag: c_int, mode: mode_t) c_int; diff --git a/lib/std/c/darwin.zig b/lib/std/c/darwin.zig index cf7d3127eb..f0c4f4c278 100644 --- a/lib/std/c/darwin.zig +++ b/lib/std/c/darwin.zig @@ -354,6 +354,8 @@ pub extern "c" fn _dyld_image_count() u32; pub extern "c" fn _dyld_get_image_header(image_index: u32) ?*mach_header; pub extern "c" fn _dyld_get_image_vmaddr_slide(image_index: u32) usize; pub extern "c" fn _dyld_get_image_name(image_index: u32) [*:0]const u8; +pub extern "c" fn _dyld_get_image_header_containing_address(address: *const anyopaque) ?*mach_header; +pub extern "c" fn dyld_image_path_containing_address(address: *const anyopaque) ?[*:0]const u8; pub extern "c" fn dladdr(addr: *const anyopaque, info: *dl_info) c_int; pub const dl_info = extern struct { diff --git a/lib/std/crypto/ml_kem.zig b/lib/std/crypto/ml_kem.zig index 9badac5d79..62e21f60a9 100644 --- a/lib/std/crypto/ml_kem.zig +++ b/lib/std/crypto/ml_kem.zig @@ -329,17 +329,19 @@ fn Kyber(comptime p: Params) type { // ct' = innerEnc(pk, m', r') const ct2 = sk.pk.encrypt(&m2, kr2[32..64]); - // Compute H(ct) and put in the second slot of kr2 which will be (K'', H(ct)). - sha3.Sha3_256.hash(ct, kr2[32..], .{}); - - // Replace K'' by z in the first slot of kr2 if ct ≠ct'. - cmov(32, kr2[0..32], sk.z, ctneq(ciphertext_length, ct.*, ct2)); - if (p.ml_kem) { - // ML-KEM: K = K''/z + // ML-KEM: K = K'' if ct == ct', else K = J(z || c) per FIPS 203 + var k_bar: [shared_length]u8 = undefined; + var j = sha3.Shake256.init(.{}); + j.update(&sk.z); + j.update(ct); + j.squeeze(&k_bar); + cmov(shared_length, kr2[0..shared_length], k_bar, ctneq(ciphertext_length, ct.*, ct2)); return kr2[0..shared_length].*; } else { // Kyber: K = KDF(K''/z ‖ H(c)) + sha3.Sha3_256.hash(ct, kr2[32..], .{}); + cmov(32, kr2[0..32], sk.z, ctneq(ciphertext_length, ct.*, ct2)); var ss: [shared_length]u8 = undefined; sha3.Shake256.hash(&kr2, &ss, .{}); return ss; diff --git a/lib/std/crypto/sha2.zig b/lib/std/crypto/sha2.zig index d32f15f57b..86eb9f0440 100644 --- a/lib/std/crypto/sha2.zig +++ b/lib/std/crypto/sha2.zig @@ -211,7 +211,7 @@ fn Sha2x32(comptime iv: Iv32, digest_bits: comptime_int) type { s_v[k] = asm ( \\sha256su0.4s %[w0_3], %[w4_7] \\sha256su1.4s %[w0_3], %[w8_11], %[w12_15] - : [w0_3] "=w" (-> V4u32), + : [w0_3] "=&w" (-> V4u32), : [_] "0" (s_v[k - 4]), [w4_7] "w" (s_v[k - 3]), [w8_11] "w" (s_v[k - 2]), diff --git a/lib/std/crypto/tls/Client.zig b/lib/std/crypto/tls/Client.zig index f6e334af8e..d3a06819bf 100644 --- a/lib/std/crypto/tls/Client.zig +++ b/lib/std/crypto/tls/Client.zig @@ -1158,7 +1158,7 @@ fn readIndirect(c: *Client) Reader.Error!usize { P.AEAD.decrypt(cleartext, ciphertext, auth_tag, ad, nonce, pv.server_key) catch return failRead(c, error.TlsBadRecordMac); // TODO use scalar, non-slice version - const msg = mem.trimRight(u8, cleartext, "\x00"); + const msg = mem.trimEnd(u8, cleartext, "\x00"); break :cleartext .{ msg.len - 1, @enumFromInt(msg[msg.len - 1]) }; }, .tls_1_2 => { diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 355415d311..feea5f9a41 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -329,16 +329,16 @@ pub fn dumpHex(bytes: []const u8) void { } /// Prints a hexadecimal view of the bytes, returning any error that occurs. -pub fn dumpHexFallible(bw: *Writer, ttyconf: tty.Config, bytes: []const u8) !void { +pub fn dumpHexFallible(bw: *Writer, tty_config: tty.Config, bytes: []const u8) !void { var chunks = mem.window(u8, bytes, 16, 16); while (chunks.next()) |window| { // 1. Print the address. const address = (@intFromPtr(bytes.ptr) + 0x10 * (std.math.divCeil(usize, chunks.index orelse bytes.len, 16) catch unreachable)) - 0x10; - try ttyconf.setColor(bw, .dim); + try tty_config.setColor(bw, .dim); // We print the address in lowercase and the bytes in uppercase hexadecimal to distinguish them more. // Also, make sure all lines are aligned by padding the address. try bw.print("{x:0>[1]} ", .{ address, @sizeOf(usize) * 2 }); - try ttyconf.setColor(bw, .reset); + try tty_config.setColor(bw, .reset); // 2. Print the bytes. for (window, 0..) |byte, index| { @@ -358,7 +358,7 @@ pub fn dumpHexFallible(bw: *Writer, ttyconf: tty.Config, bytes: []const u8) !voi try bw.writeByte(byte); } else { // Related: https://github.com/ziglang/zig/issues/7600 - if (ttyconf == .windows_api) { + if (tty_config == .windows_api) { try bw.writeByte('.'); continue; } @@ -1604,6 +1604,13 @@ pub fn dumpStackPointerAddr(prefix: []const u8) void { test "manage resources correctly" { if (SelfInfo == void) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_c) { + // The C backend emits an extremely large C source file, meaning it has a huge + // amount of debug information. Parsing this debug information makes this test + // take too long to be worth running. + return error.SkipZigTest; + } + const S = struct { noinline fn showMyTrace() usize { return @returnAddress(); diff --git a/lib/std/debug/SelfInfo/MachO.zig b/lib/std/debug/SelfInfo/MachO.zig index 94d50bbf77..dd11b4c8bf 100644 --- a/lib/std/debug/SelfInfo/MachO.zig +++ b/lib/std/debug/SelfInfo/MachO.zig @@ -78,9 +78,14 @@ pub fn getSymbol(si: *SelfInfo, gpa: Allocator, io: Io, address: usize) Error!st }; } pub fn getModuleName(si: *SelfInfo, gpa: Allocator, address: usize) Error![]const u8 { - const module = try si.findModule(gpa, address); - defer si.mutex.unlock(); - return module.name; + _ = si; + _ = gpa; + // This function is marked as deprecated; however, it is significantly more + // performant than `dladdr` (since the latter also does a very slow symbol + // lookup), so let's use it since it's still available. + return std.mem.span(std.c.dyld_image_path_containing_address( + @ptrFromInt(address), + ) orelse return error.MissingDebugInfo); } pub fn getModuleSlide(si: *SelfInfo, gpa: Allocator, address: usize) Error!usize { const module = try si.findModule(gpa, address); @@ -426,28 +431,26 @@ fn unwindFrameInner(si: *SelfInfo, gpa: Allocator, context: *UnwindContext) !usi /// Acquires the mutex on success. fn findModule(si: *SelfInfo, gpa: Allocator, address: usize) Error!*Module { - var info: std.c.dl_info = undefined; - if (std.c.dladdr(@ptrFromInt(address), &info) == 0) { - return error.MissingDebugInfo; - } + // This function is marked as deprecated; however, it is significantly more + // performant than `dladdr` (since the latter also does a very slow symbol + // lookup), so let's use it since it's still available. + const text_base = std.c._dyld_get_image_header_containing_address( + @ptrFromInt(address), + ) orelse return error.MissingDebugInfo; si.mutex.lock(); errdefer si.mutex.unlock(); - const gop = try si.modules.getOrPutAdapted(gpa, @intFromPtr(info.fbase), Module.Adapter{}); + const gop = try si.modules.getOrPutAdapted(gpa, @intFromPtr(text_base), Module.Adapter{}); errdefer comptime unreachable; - if (!gop.found_existing) { - gop.key_ptr.* = .{ - .text_base = @intFromPtr(info.fbase), - .name = std.mem.span(info.fname), - .unwind = null, - .file = null, - }; - } + if (!gop.found_existing) gop.key_ptr.* = .{ + .text_base = @intFromPtr(text_base), + .unwind = null, + .file = null, + }; return gop.key_ptr; } const Module = struct { text_base: usize, - name: []const u8, unwind: ?(Error!Unwind), file: ?(Error!MachOFile), @@ -544,10 +547,15 @@ const Module = struct { } fn getFile(module: *Module, gpa: Allocator) Error!*MachOFile { - if (module.file == null) module.file = MachOFile.load(gpa, module.name, builtin.cpu.arch) catch |err| switch (err) { - error.InvalidMachO, error.InvalidDwarf => error.InvalidDebugInfo, - error.MissingDebugInfo, error.OutOfMemory, error.UnsupportedDebugInfo, error.ReadFailed => |e| e, - }; + if (module.file == null) { + const path = std.mem.span( + std.c.dyld_image_path_containing_address(@ptrFromInt(module.text_base)).?, + ); + module.file = MachOFile.load(gpa, path, builtin.cpu.arch) catch |err| switch (err) { + error.InvalidMachO, error.InvalidDwarf => error.InvalidDebugInfo, + error.MissingDebugInfo, error.OutOfMemory, error.UnsupportedDebugInfo, error.ReadFailed => |e| e, + }; + } return if (module.file.?) |*f| f else |err| err; } }; diff --git a/lib/std/debug/SelfInfo/Windows.zig b/lib/std/debug/SelfInfo/Windows.zig index 03fc7e2811..ddb6bf73f6 100644 --- a/lib/std/debug/SelfInfo/Windows.zig +++ b/lib/std/debug/SelfInfo/Windows.zig @@ -154,10 +154,10 @@ pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, context: *UnwindContext) Error _ = gpa; const current_regs = context.cur.getRegs(); - var image_base: windows.DWORD64 = undefined; + var image_base: usize = undefined; if (windows.ntdll.RtlLookupFunctionEntry(current_regs.ip, &image_base, &context.history_table)) |runtime_function| { var handler_data: ?*anyopaque = null; - var establisher_frame: u64 = undefined; + var establisher_frame: usize = undefined; _ = windows.ntdll.RtlVirtualUnwind( windows.UNW_FLAG_NHANDLER, image_base, @@ -351,13 +351,19 @@ const Module = struct { var section_handle: windows.HANDLE = undefined; const create_section_rc = windows.ntdll.NtCreateSection( §ion_handle, - windows.STANDARD_RIGHTS_REQUIRED | windows.SECTION_QUERY | windows.SECTION_MAP_READ, + .{ + .SPECIFIC = .{ .SECTION = .{ + .QUERY = true, + .MAP_READ = true, + } }, + .STANDARD = .{ .RIGHTS = .REQUIRED }, + }, null, null, - windows.PAGE_READONLY, + .{ .READONLY = true }, // The documentation states that if no AllocationAttribute is specified, then SEC_COMMIT is the default. // In practice, this isn't the case and specifying 0 will result in INVALID_PARAMETER_6. - windows.SEC_COMMIT, + .{ .COMMIT = true }, coff_file.handle, ); if (create_section_rc != .SUCCESS) return error.MissingDebugInfo; @@ -372,9 +378,9 @@ const Module = struct { 0, null, &coff_len, - .ViewUnmap, - 0, - windows.PAGE_READONLY, + .Unmap, + .{}, + .{ .READONLY = true }, ); if (map_section_rc != .SUCCESS) return error.MissingDebugInfo; errdefer assert(windows.ntdll.NtUnmapViewOfSection(process_handle, @constCast(section_view_ptr.?)) == .SUCCESS); diff --git a/lib/std/enums.zig b/lib/std/enums.zig index ffc678d772..0815a50f38 100644 --- a/lib/std/enums.zig +++ b/lib/std/enums.zig @@ -61,7 +61,9 @@ pub fn values(comptime E: type) []const E { /// panic when `e` has no tagged value. /// Returns the tag name for `e` or null if no tag exists. pub fn tagName(comptime E: type, e: E) ?[:0]const u8 { - return inline for (@typeInfo(E).@"enum".fields) |f| { + const fields = @typeInfo(E).@"enum".fields; + @setEvalBranchQuota(fields.len); + return inline for (fields) |f| { if (@intFromEnum(e) == f.value) break f.name; } else null; } @@ -202,48 +204,6 @@ test "directEnumArrayDefault slice" { try testing.expectEqualSlices(u8, "default", array[2]); } -/// Deprecated: Use @field(E, @tagName(tag)) or @field(E, string) -pub fn nameCast(comptime E: type, comptime value: anytype) E { - return comptime blk: { - const V = @TypeOf(value); - if (V == E) break :blk value; - const name: ?[]const u8 = switch (@typeInfo(V)) { - .enum_literal, .@"enum" => @tagName(value), - .pointer => value, - else => null, - }; - if (name) |n| { - if (@hasField(E, n)) { - break :blk @field(E, n); - } - @compileError("Enum " ++ @typeName(E) ++ " has no field named " ++ n); - } - @compileError("Cannot cast from " ++ @typeName(@TypeOf(value)) ++ " to " ++ @typeName(E)); - }; -} - -test nameCast { - const A = enum(u1) { a = 0, b = 1 }; - const B = enum(u1) { a = 1, b = 0 }; - try testing.expectEqual(A.a, nameCast(A, .a)); - try testing.expectEqual(A.a, nameCast(A, A.a)); - try testing.expectEqual(A.a, nameCast(A, B.a)); - try testing.expectEqual(A.a, nameCast(A, "a")); - try testing.expectEqual(A.a, nameCast(A, @as(*const [1]u8, "a"))); - try testing.expectEqual(A.a, nameCast(A, @as([:0]const u8, "a"))); - try testing.expectEqual(A.a, nameCast(A, @as([]const u8, "a"))); - - try testing.expectEqual(B.a, nameCast(B, .a)); - try testing.expectEqual(B.a, nameCast(B, A.a)); - try testing.expectEqual(B.a, nameCast(B, B.a)); - try testing.expectEqual(B.a, nameCast(B, "a")); - - try testing.expectEqual(B.b, nameCast(B, .b)); - try testing.expectEqual(B.b, nameCast(B, A.b)); - try testing.expectEqual(B.b, nameCast(B, B.b)); - try testing.expectEqual(B.b, nameCast(B, "b")); -} - test fromInt { const E1 = enum { A, diff --git a/lib/std/fs.zig b/lib/std/fs.zig index 53824b44ed..edc3a5f985 100644 --- a/lib/std/fs.zig +++ b/lib/std/fs.zig @@ -490,8 +490,7 @@ pub fn selfExePath(out_buffer: []u8) SelfExePathError![]u8 { const resolved_path = std.fmt.bufPrintSentinel(&resolved_path_buf, "{s}/{s}", .{ a_path, std.os.argv[0], - 0, - }) catch continue; + }, 0) catch continue; var real_path_buf: [max_path_bytes]u8 = undefined; if (posix.realpathZ(resolved_path, &real_path_buf)) |real_path| { diff --git a/lib/std/fs/Dir.zig b/lib/std/fs/Dir.zig index 23bd903fe8..ea9c6408bf 100644 --- a/lib/std/fs/Dir.zig +++ b/lib/std/fs/Dir.zig @@ -453,10 +453,10 @@ pub const Iterator = switch (native_os) { &io, &self.buf, self.buf.len, - .FileBothDirectoryInformation, + .BothDirectory, w.FALSE, null, - if (self.first_iter) @as(w.BOOLEAN, w.TRUE) else @as(w.BOOLEAN, w.FALSE), + @intFromBool(self.first_iter), ); self.first_iter = false; if (io.Information == 0) return null; @@ -487,8 +487,8 @@ pub const Iterator = switch (native_os) { const name_wtf8 = self.name_data[0..name_wtf8_len]; const kind: Entry.Kind = blk: { const attrs = dir_info.FileAttributes; - if (attrs & w.FILE_ATTRIBUTE_DIRECTORY != 0) break :blk .directory; - if (attrs & w.FILE_ATTRIBUTE_REPARSE_POINT != 0) break :blk .sym_link; + if (attrs.DIRECTORY) break :blk .directory; + if (attrs.REPARSE_POINT) break :blk .sym_link; break :blk .file; }; return Entry{ @@ -1013,15 +1013,14 @@ pub fn realpathW(self: Dir, pathname: []const u16, out_buffer: []u8) RealPathErr pub fn realpathW2(self: Dir, pathname: []const u16, out_buffer: []u16) RealPathError![]u16 { const w = windows; - const access_mask = w.GENERIC_READ | w.SYNCHRONIZE; - const share_access = w.FILE_SHARE_READ | w.FILE_SHARE_WRITE | w.FILE_SHARE_DELETE; - const creation = w.FILE_OPEN; const h_file = blk: { const res = w.OpenFile(pathname, .{ .dir = self.fd, - .access_mask = access_mask, - .share_access = share_access, - .creation = creation, + .access_mask = .{ + .STANDARD = .{ .SYNCHRONIZE = true }, + .GENERIC = .{ .READ = true }, + }, + .creation = .OPEN, .filter = .any, }) catch |err| switch (err) { error.WouldBlock => unreachable, diff --git a/lib/std/fs/File.zig b/lib/std/fs/File.zig index 2601c7643a..5e54ba5f7a 100644 --- a/lib/std/fs/File.zig +++ b/lib/std/fs/File.zig @@ -146,13 +146,13 @@ pub fn isCygwinPty(file: File) bool { // for handles that aren't named pipes. { var io_status: windows.IO_STATUS_BLOCK = undefined; - var device_info: windows.FILE_FS_DEVICE_INFORMATION = undefined; - const rc = windows.ntdll.NtQueryVolumeInformationFile(handle, &io_status, &device_info, @sizeOf(windows.FILE_FS_DEVICE_INFORMATION), .FileFsDeviceInformation); + var device_info: windows.FILE.FS_DEVICE_INFORMATION = undefined; + const rc = windows.ntdll.NtQueryVolumeInformationFile(handle, &io_status, &device_info, @sizeOf(windows.FILE.FS_DEVICE_INFORMATION), .Device); switch (rc) { .SUCCESS => {}, else => return false, } - if (device_info.DeviceType != windows.FILE_DEVICE_NAMED_PIPE) return false; + if (device_info.DeviceType.FileDevice != .NAMED_PIPE) return false; } const name_bytes_offset = @offsetOf(windows.FILE_NAME_INFO, "FileName"); @@ -166,7 +166,7 @@ pub fn isCygwinPty(file: File) bool { var name_info_bytes align(@alignOf(windows.FILE_NAME_INFO)) = [_]u8{0} ** (name_bytes_offset + num_name_bytes); var io_status_block: windows.IO_STATUS_BLOCK = undefined; - const rc = windows.ntdll.NtQueryInformationFile(handle, &io_status_block, &name_info_bytes, @intCast(name_info_bytes.len), .FileNameInformation); + const rc = windows.ntdll.NtQueryInformationFile(handle, &io_status_block, &name_info_bytes, @intCast(name_info_bytes.len), .Name); switch (rc) { .SUCCESS => {}, .INVALID_PARAMETER => unreachable, @@ -485,7 +485,7 @@ pub fn setPermissions(self: File, permissions: Permissions) SetPermissionsError! &io_status_block, &info, @sizeOf(windows.FILE_BASIC_INFORMATION), - .FileBasicInformation, + .Basic, ); switch (rc) { .SUCCESS => return, @@ -1324,7 +1324,7 @@ pub fn unlock(file: File) void { &io_status_block, &range_off, &range_len, - null, + 0, ) catch |err| switch (err) { error.RangeNotLocked => unreachable, // Function assumes unlocked. error.Unexpected => unreachable, // Resource deallocation must succeed. @@ -1415,7 +1415,7 @@ pub fn downgradeLock(file: File) LockError!void { &io_status_block, &range_off, &range_len, - null, + 0, ) catch |err| switch (err) { error.RangeNotLocked => unreachable, // File was not locked. error.Unexpected => unreachable, // Resource deallocation must succeed. diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index 05e5de5f22..4a2c0117b1 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -218,6 +218,32 @@ test "Dir.readLink" { }.impl); } +test "Dir.readLink on non-symlinks" { + try testWithAllSupportedPathTypes(struct { + fn impl(ctx: *TestContext) !void { + const file_path = try ctx.transformPath("file.txt"); + try ctx.dir.writeFile(.{ .sub_path = file_path, .data = "nonsense" }); + const dir_path = try ctx.transformPath("subdir"); + try ctx.dir.makeDir(dir_path); + + // file + var buffer: [fs.max_path_bytes]u8 = undefined; + try std.testing.expectError(error.NotLink, ctx.dir.readLink(file_path, &buffer)); + if (builtin.os.tag == .windows) { + var file_path_w = try std.os.windows.sliceToPrefixedFileW(ctx.dir.fd, file_path); + try std.testing.expectError(error.NotLink, ctx.dir.readLinkW(file_path_w.span(), &file_path_w.data)); + } + + // dir + try std.testing.expectError(error.NotLink, ctx.dir.readLink(dir_path, &buffer)); + if (builtin.os.tag == .windows) { + var dir_path_w = try std.os.windows.sliceToPrefixedFileW(ctx.dir.fd, dir_path); + try std.testing.expectError(error.NotLink, ctx.dir.readLinkW(dir_path_w.span(), &dir_path_w.data)); + } + } + }.impl); +} + fn testReadLink(dir: Dir, target_path: []const u8, symlink_path: []const u8) !void { var buffer: [fs.max_path_bytes]u8 = undefined; const actual = try dir.readLink(symlink_path, buffer[0..]); @@ -256,13 +282,11 @@ test "File.stat on a File that is a symlink returns Kind.sym_link" { try setupSymlink(ctx.dir, dir_target_path, "symlink", .{ .is_directory = true }); - var symlink = switch (builtin.target.os.tag) { + var symlink: Dir = switch (builtin.target.os.tag) { .windows => windows_symlink: { const sub_path_w = try windows.cStrToPrefixedFileW(ctx.dir.fd, "symlink"); - var result = Dir{ - .fd = undefined, - }; + var handle: windows.HANDLE = undefined; const path_len_bytes = @as(u16, @intCast(sub_path_w.span().len * 2)); var nt_name = windows.UNICODE_STRING{ @@ -270,32 +294,46 @@ test "File.stat on a File that is a symlink returns Kind.sym_link" { .MaximumLength = path_len_bytes, .Buffer = @constCast(&sub_path_w.data), }; - var attr = windows.OBJECT_ATTRIBUTES{ + var attr: windows.OBJECT_ATTRIBUTES = .{ .Length = @sizeOf(windows.OBJECT_ATTRIBUTES), .RootDirectory = if (fs.path.isAbsoluteWindowsW(sub_path_w.span())) null else ctx.dir.fd, - .Attributes = 0, + .Attributes = .{}, .ObjectName = &nt_name, .SecurityDescriptor = null, .SecurityQualityOfService = null, }; var io: windows.IO_STATUS_BLOCK = undefined; const rc = windows.ntdll.NtCreateFile( - &result.fd, - windows.STANDARD_RIGHTS_READ | windows.FILE_READ_ATTRIBUTES | windows.FILE_READ_EA | windows.SYNCHRONIZE | windows.FILE_TRAVERSE, + &handle, + .{ + .SPECIFIC = .{ .FILE_DIRECTORY = .{ + .READ_EA = true, + .TRAVERSE = true, + .READ_ATTRIBUTES = true, + } }, + .STANDARD = .{ + .RIGHTS = .READ, + .SYNCHRONIZE = true, + }, + }, &attr, &io, null, - windows.FILE_ATTRIBUTE_NORMAL, - windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE | windows.FILE_SHARE_DELETE, - windows.FILE_OPEN, - // FILE_OPEN_REPARSE_POINT is the important thing here - windows.FILE_OPEN_REPARSE_POINT | windows.FILE_DIRECTORY_FILE | windows.FILE_SYNCHRONOUS_IO_NONALERT | windows.FILE_OPEN_FOR_BACKUP_INTENT, + .{ .NORMAL = true }, + .VALID_FLAGS, + .OPEN, + .{ + .DIRECTORY_FILE = true, + .IO = .SYNCHRONOUS_NONALERT, + .OPEN_FOR_BACKUP_INTENT = true, + .OPEN_REPARSE_POINT = true, // the important thing here + }, null, 0, ); switch (rc) { - .SUCCESS => break :windows_symlink result, + .SUCCESS => break :windows_symlink .{ .fd = handle }, else => return windows.unexpectedStatus(rc), } }, @@ -340,6 +378,7 @@ test "openDir" { test "accessAbsolute" { if (native_os == .wasi) return error.SkipZigTest; + if (native_os == .openbsd) return error.SkipZigTest; var tmp = tmpDir(.{}); defer tmp.cleanup(); @@ -352,6 +391,7 @@ test "accessAbsolute" { test "openDirAbsolute" { if (native_os == .wasi) return error.SkipZigTest; + if (native_os == .openbsd) return error.SkipZigTest; var tmp = tmpDir(.{}); defer tmp.cleanup(); @@ -441,6 +481,7 @@ test "openDir non-cwd parent '..'" { test "readLinkAbsolute" { if (native_os == .wasi) return error.SkipZigTest; + if (native_os == .openbsd) return error.SkipZigTest; var tmp = tmpDir(.{}); defer tmp.cleanup(); @@ -1047,6 +1088,7 @@ test "rename" { test "renameAbsolute" { if (native_os == .wasi) return error.SkipZigTest; + if (native_os == .openbsd) return error.SkipZigTest; var tmp_dir = tmpDir(.{}); defer tmp_dir.cleanup(); @@ -1950,6 +1992,7 @@ test "'.' and '..' in fs.Dir functions" { test "'.' and '..' in absolute functions" { if (native_os == .wasi) return error.SkipZigTest; + if (native_os == .openbsd) return error.SkipZigTest; var tmp = tmpDir(.{}); defer tmp.cleanup(); diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 445b5da455..f584e3f72e 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -141,7 +141,16 @@ test defaultQueryPageSize { assert(std.math.isPowerOfTwo(defaultQueryPageSize())); } -const CAllocator = struct { +/// A wrapper around the C memory allocation API which supports the full `Allocator` +/// interface, including arbitrary alignment. Simple `malloc` calls are used when +/// possible, but large requested alignments may require larger buffers in order to +/// satisfy the request. As well as `malloc`, `realloc`, and `free`, the extension +/// functions `malloc_usable_size` and `posix_memalign` are used when available. +pub const c_allocator: Allocator = .{ + .ptr = undefined, + .vtable = &c_allocator_impl.vtable, +}; +const c_allocator_impl = struct { comptime { if (!builtin.link_libc) { @compileError("C allocator is only available when linking against libc"); @@ -155,67 +164,55 @@ const CAllocator = struct { .free = free, }; - pub const supports_malloc_size = @TypeOf(malloc_size) != void; - pub const malloc_size = if (@TypeOf(c.malloc_size) != void) - c.malloc_size - else if (@TypeOf(c.malloc_usable_size) != void) - c.malloc_usable_size - else if (@TypeOf(c._msize) != void) - c._msize - else {}; - - pub const supports_posix_memalign = switch (builtin.os.tag) { - .dragonfly, .netbsd, .freebsd, .illumos, .openbsd, .linux, .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos, .serenity => true, + const have_posix_memalign = switch (builtin.os.tag) { + .dragonfly, + .netbsd, + .freebsd, + .illumos, + .openbsd, + .linux, + .driverkit, + .ios, + .maccatalyst, + .macos, + .tvos, + .visionos, + .watchos, + .serenity, + => true, else => false, }; - fn getHeader(ptr: [*]u8) *[*]u8 { - return @ptrCast(@alignCast(ptr - @sizeOf(usize))); - } - - fn alignedAlloc(len: usize, alignment: Alignment) ?[*]u8 { - const alignment_bytes = alignment.toByteUnits(); - if (supports_posix_memalign) { - // The posix_memalign only accepts alignment values that are a - // multiple of the pointer size - const effective_alignment = @max(alignment_bytes, @sizeOf(usize)); - - var aligned_ptr: ?*anyopaque = undefined; - if (c.posix_memalign(&aligned_ptr, effective_alignment, len) != 0) - return null; - - return @ptrCast(aligned_ptr); + fn allocStrat(need_align: Alignment) union(enum) { + raw, + posix_memalign: if (have_posix_memalign) void else noreturn, + manual_align: if (have_posix_memalign) noreturn else void, + } { + // If `malloc` guarantees `need_align`, always prefer a raw allocation. + if (Alignment.compare(need_align, .lte, .of(c.max_align_t))) { + return .raw; } - - // Thin wrapper around regular malloc, overallocate to account for - // alignment padding and store the original malloc()'ed pointer before - // the aligned address. - const unaligned_ptr = @as([*]u8, @ptrCast(c.malloc(len + alignment_bytes - 1 + @sizeOf(usize)) orelse return null)); - const unaligned_addr = @intFromPtr(unaligned_ptr); - const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), alignment_bytes); - const aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr); - getHeader(aligned_ptr).* = unaligned_ptr; - - return aligned_ptr; + // Use `posix_memalign` if available. Otherwise, we must manually align the allocation. + return if (have_posix_memalign) .posix_memalign else .manual_align; } - fn alignedFree(ptr: [*]u8) void { - if (supports_posix_memalign) { - return c.free(ptr); - } - - const unaligned_ptr = getHeader(ptr).*; - c.free(unaligned_ptr); - } - - fn alignedAllocSize(ptr: [*]u8) usize { - if (supports_posix_memalign) { - return CAllocator.malloc_size(ptr); - } - - const unaligned_ptr = getHeader(ptr).*; - const delta = @intFromPtr(ptr) - @intFromPtr(unaligned_ptr); - return CAllocator.malloc_size(unaligned_ptr) - delta; + /// If `allocStrat(a) == .manual_align`, an allocation looks like this: + /// + /// unaligned_ptr hdr_ptr aligned_ptr + /// v v v + /// +---------------+--------+--------------+ + /// | padding | header | usable bytes | + /// +---------------+--------+--------------+ + /// + /// * `unaligned_ptr` is the raw return value of `malloc`. + /// * `aligned_ptr` is computed by aligning `unaligned_ptr` forward; it is what `alloc` returns. + /// * `hdr_ptr` points to a pointer-sized header directly before the usable space. This header + /// contains the value `unaligned_ptr`, so that we can pass it to `free` later. This is + /// necessary because the width of the padding is unknown. + /// + /// This function accepts `aligned_ptr` and offsets it backwards to return `hdr_ptr`. + fn manualAlignHeader(aligned_ptr: [*]u8) *[*]u8 { + return @ptrCast(@alignCast(aligned_ptr - @sizeOf(usize))); } fn alloc( @@ -226,136 +223,125 @@ const CAllocator = struct { ) ?[*]u8 { _ = return_address; assert(len > 0); - return alignedAlloc(len, alignment); + switch (allocStrat(alignment)) { + .raw => { + // `std.c.max_align_t` isn't the whole story, because if `len` is smaller than + // every C type with alignment `max_align_t`, the allocation can be less-aligned. + // The implementation need only guarantee that any type of length `len` would be + // suitably aligned. + // + // For instance, if `len == 8` and `alignment == .@"16"`, then `malloc` may not + // fulfil this request, because there is necessarily no C type with 8-byte size + // but 16-byte alignment. + // + // In theory, the resulting rule here would be target-specific, but in practice, + // the smallest type with an alignment of `max_align_t` has the same size (it's + // usually `c_longdouble`), so we can just extend the allocation size up to the + // alignment of `max_align_t` if necessary. + const actual_len = @max(len, @alignOf(std.c.max_align_t)); + const ptr = c.malloc(actual_len) orelse return null; + assert(alignment.check(@intFromPtr(ptr))); + return @ptrCast(ptr); + }, + .posix_memalign => { + // The posix_memalign only accepts alignment values that are a + // multiple of the pointer size + const effective_alignment = @max(alignment.toByteUnits(), @sizeOf(usize)); + var aligned_ptr: ?*anyopaque = undefined; + if (c.posix_memalign(&aligned_ptr, effective_alignment, len) != 0) { + return null; + } + assert(alignment.check(@intFromPtr(aligned_ptr))); + return @ptrCast(aligned_ptr); + }, + .manual_align => { + // Overallocate to account for alignment padding and store the original pointer + // returned by `malloc` before the aligned address. + const padded_len = len + @sizeOf(usize) + alignment.toByteUnits() - 1; + const unaligned_ptr: [*]u8 = @ptrCast(c.malloc(padded_len) orelse return null); + const unaligned_addr = @intFromPtr(unaligned_ptr); + const aligned_addr = alignment.forward(unaligned_addr + @sizeOf(usize)); + const aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr); + manualAlignHeader(aligned_ptr).* = unaligned_ptr; + return aligned_ptr; + }, + } } fn resize( _: *anyopaque, - buf: []u8, + memory: []u8, alignment: Alignment, new_len: usize, return_address: usize, ) bool { - _ = alignment; _ = return_address; - if (new_len <= buf.len) { - return true; + assert(new_len > 0); + if (new_len <= memory.len) { + return true; // in-place shrink always works } - if (CAllocator.supports_malloc_size) { - const full_len = alignedAllocSize(buf.ptr); - if (new_len <= full_len) { - return true; - } - } - return false; + const mallocSize = func: { + if (@TypeOf(c.malloc_size) != void) break :func c.malloc_size; + if (@TypeOf(c.malloc_usable_size) != void) break :func c.malloc_usable_size; + if (@TypeOf(c._msize) != void) break :func c._msize; + return false; // we don't know how much space is actually available + }; + const usable_len: usize = switch (allocStrat(alignment)) { + .raw, .posix_memalign => mallocSize(memory.ptr), + .manual_align => usable_len: { + const unaligned_ptr = manualAlignHeader(memory.ptr).*; + const full_len = mallocSize(unaligned_ptr); + const padding = @intFromPtr(memory.ptr) - @intFromPtr(unaligned_ptr); + break :usable_len full_len - padding; + }, + }; + return new_len <= usable_len; } fn remap( - context: *anyopaque, + ctx: *anyopaque, memory: []u8, alignment: Alignment, new_len: usize, return_address: usize, ) ?[*]u8 { - // realloc would potentially return a new allocation that does not - // respect the original alignment. - return if (resize(context, memory, alignment, new_len, return_address)) memory.ptr else null; + assert(new_len > 0); + // Prefer resizing in-place if possible, since `realloc` could be expensive even if legal. + if (resize(ctx, memory, alignment, new_len, return_address)) { + return memory.ptr; + } + switch (allocStrat(alignment)) { + .raw => { + // `malloc` and friends guarantee the required alignment, so we can try `realloc`. + // C only needs to respect `max_align_t` up to the allocation size due to object + // alignment rules. If necessary, extend the allocation size. + const actual_len = @max(new_len, @alignOf(std.c.max_align_t)); + const new_ptr = c.realloc(memory.ptr, actual_len) orelse return null; + assert(alignment.check(@intFromPtr(new_ptr))); + return @ptrCast(new_ptr); + }, + .posix_memalign, .manual_align => { + // `realloc` would potentially return a new allocation which does not respect + // the original alignment, so we can't do anything more. + return null; + }, + } } fn free( _: *anyopaque, - buf: []u8, + memory: []u8, alignment: Alignment, return_address: usize, ) void { - _ = alignment; _ = return_address; - alignedFree(buf.ptr); + switch (allocStrat(alignment)) { + .raw, .posix_memalign => c.free(memory.ptr), + .manual_align => c.free(manualAlignHeader(memory.ptr).*), + } } }; -/// Supports the full Allocator interface, including alignment, and exploiting -/// `malloc_usable_size` if available. For an allocator that directly calls -/// `malloc`/`free`, see `raw_c_allocator`. -pub const c_allocator: Allocator = .{ - .ptr = undefined, - .vtable = &CAllocator.vtable, -}; - -/// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly -/// calls `malloc`/`free`. Does not attempt to utilize `malloc_usable_size`. -/// This allocator is safe to use as the backing allocator with -/// `ArenaAllocator` for example and is more optimal in such a case than -/// `c_allocator`. -pub const raw_c_allocator: Allocator = .{ - .ptr = undefined, - .vtable = &raw_c_allocator_vtable, -}; -const raw_c_allocator_vtable: Allocator.VTable = .{ - .alloc = rawCAlloc, - .resize = rawCResize, - .remap = rawCRemap, - .free = rawCFree, -}; - -fn rawCAlloc( - context: *anyopaque, - len: usize, - alignment: Alignment, - return_address: usize, -) ?[*]u8 { - _ = context; - _ = return_address; - assert(alignment.compare(.lte, .of(std.c.max_align_t))); - // Note that this pointer cannot be aligncasted to max_align_t because if - // len is < max_align_t then the alignment can be smaller. For example, if - // max_align_t is 16, but the user requests 8 bytes, there is no built-in - // type in C that is size 8 and has 16 byte alignment, so the alignment may - // be 8 bytes rather than 16. Similarly if only 1 byte is requested, malloc - // is allowed to return a 1-byte aligned pointer. - return @ptrCast(c.malloc(len)); -} - -fn rawCResize( - context: *anyopaque, - memory: []u8, - alignment: Alignment, - new_len: usize, - return_address: usize, -) bool { - _ = context; - _ = memory; - _ = alignment; - _ = new_len; - _ = return_address; - return false; -} - -fn rawCRemap( - context: *anyopaque, - memory: []u8, - alignment: Alignment, - new_len: usize, - return_address: usize, -) ?[*]u8 { - _ = context; - _ = alignment; - _ = return_address; - return @ptrCast(c.realloc(memory.ptr, new_len)); -} - -fn rawCFree( - context: *anyopaque, - memory: []u8, - alignment: Alignment, - return_address: usize, -) void { - _ = context; - _ = alignment; - _ = return_address; - c.free(memory.ptr); -} - /// On operating systems that support memory mapping, this allocator makes a /// syscall directly for every allocation and free. /// @@ -508,12 +494,6 @@ test c_allocator { } } -test raw_c_allocator { - if (builtin.link_libc) { - try testAllocator(raw_c_allocator); - } -} - test smp_allocator { if (builtin.single_threaded) return; try testAllocator(smp_allocator); diff --git a/lib/std/heap/PageAllocator.zig b/lib/std/heap/PageAllocator.zig index f3e3857b58..9bb5fba9f5 100644 --- a/lib/std/heap/PageAllocator.zig +++ b/lib/std/heap/PageAllocator.zig @@ -30,7 +30,8 @@ pub fn map(n: usize, alignment: mem.Alignment) ?[*]u8 { var base_addr: ?*anyopaque = null; var size: windows.SIZE_T = n; - var status = ntdll.NtAllocateVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&base_addr), 0, &size, windows.MEM_COMMIT | windows.MEM_RESERVE, windows.PAGE_READWRITE); + const current_process = windows.GetCurrentProcess(); + var status = ntdll.NtAllocateVirtualMemory(current_process, @ptrCast(&base_addr), 0, &size, .{ .COMMIT = true, .RESERVE = true }, .{ .READWRITE = true }); if (status == SUCCESS and mem.isAligned(@intFromPtr(base_addr), alignment_bytes)) { return @ptrCast(base_addr); @@ -38,7 +39,7 @@ pub fn map(n: usize, alignment: mem.Alignment) ?[*]u8 { if (status == SUCCESS) { var region_size: windows.SIZE_T = 0; - _ = ntdll.NtFreeVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&base_addr), ®ion_size, windows.MEM_RELEASE); + _ = ntdll.NtFreeVirtualMemory(current_process, @ptrCast(&base_addr), ®ion_size, .{ .RELEASE = true }); } const overalloc_len = n + alignment_bytes - page_size; @@ -47,7 +48,7 @@ pub fn map(n: usize, alignment: mem.Alignment) ?[*]u8 { base_addr = null; size = overalloc_len; - status = ntdll.NtAllocateVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&base_addr), 0, &size, windows.MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, windows.PAGE_NOACCESS); + status = ntdll.NtAllocateVirtualMemory(current_process, @ptrCast(&base_addr), 0, &size, .{ .RESERVE = true, .RESERVE_PLACEHOLDER = true }, .{ .NOACCESS = true }); if (status != SUCCESS) return null; @@ -58,7 +59,7 @@ pub fn map(n: usize, alignment: mem.Alignment) ?[*]u8 { if (prefix_size > 0) { var prefix_base = base_addr; var prefix_size_param: windows.SIZE_T = prefix_size; - _ = ntdll.NtFreeVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&prefix_base), &prefix_size_param, windows.MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER); + _ = ntdll.NtFreeVirtualMemory(current_process, @ptrCast(&prefix_base), &prefix_size_param, .{ .RELEASE = true, .PRESERVE_PLACEHOLDER = true }); } const suffix_start = aligned_addr + aligned_len; @@ -66,13 +67,13 @@ pub fn map(n: usize, alignment: mem.Alignment) ?[*]u8 { if (suffix_size > 0) { var suffix_base = @as(?*anyopaque, @ptrFromInt(suffix_start)); var suffix_size_param: windows.SIZE_T = suffix_size; - _ = ntdll.NtFreeVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&suffix_base), &suffix_size_param, windows.MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER); + _ = ntdll.NtFreeVirtualMemory(current_process, @ptrCast(&suffix_base), &suffix_size_param, .{ .RELEASE = true, .PRESERVE_PLACEHOLDER = true }); } base_addr = @ptrFromInt(aligned_addr); size = aligned_len; - status = ntdll.NtAllocateVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&base_addr), 0, &size, windows.MEM_COMMIT | MEM_PRESERVE_PLACEHOLDER, windows.PAGE_READWRITE); + status = ntdll.NtAllocateVirtualMemory(current_process, @ptrCast(&base_addr), 0, &size, .{ .COMMIT = true }, .{ .READWRITE = true }); if (status == SUCCESS) { return @ptrCast(base_addr); @@ -80,7 +81,7 @@ pub fn map(n: usize, alignment: mem.Alignment) ?[*]u8 { base_addr = @as(?*anyopaque, @ptrFromInt(aligned_addr)); size = aligned_len; - _ = ntdll.NtFreeVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&base_addr), &size, windows.MEM_RELEASE); + _ = ntdll.NtFreeVirtualMemory(current_process, @ptrCast(&base_addr), &size, .{ .RELEASE = true }); return null; } @@ -145,7 +146,7 @@ pub fn unmap(memory: []align(page_size_min) u8) void { if (native_os == .windows) { var base_addr: ?*anyopaque = memory.ptr; var region_size: windows.SIZE_T = 0; - _ = ntdll.NtFreeVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&base_addr), ®ion_size, windows.MEM_RELEASE); + _ = ntdll.NtFreeVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&base_addr), ®ion_size, .{ .RELEASE = true }); } else { const page_aligned_len = mem.alignForward(usize, memory.len, std.heap.pageSize()); posix.munmap(memory.ptr[0..page_aligned_len]); @@ -166,7 +167,7 @@ pub fn realloc(uncasted_memory: []u8, new_len: usize, may_move: bool) ?[*]u8 { var decommit_addr: ?*anyopaque = @ptrFromInt(new_addr_end); var decommit_size: windows.SIZE_T = old_addr_end - new_addr_end; - _ = ntdll.NtAllocateVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&decommit_addr), 0, &decommit_size, windows.MEM_RESET, windows.PAGE_NOACCESS); + _ = ntdll.NtAllocateVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&decommit_addr), 0, &decommit_size, .{ .RESET = true }, .{ .NOACCESS = true }); } return memory.ptr; } diff --git a/lib/std/heap/debug_allocator.zig b/lib/std/heap/debug_allocator.zig index 4480009781..3183becd82 100644 --- a/lib/std/heap/debug_allocator.zig +++ b/lib/std/heap/debug_allocator.zig @@ -460,7 +460,7 @@ pub fn DebugAllocator(comptime config: Config) type { pub fn detectLeaks(self: *Self) usize { var leaks: usize = 0; - const tty_config = std.Io.tty.detectConfig(.stderr()); + const tty_config: std.Io.tty.Config = .detect(.stderr()); for (self.buckets, 0..) |init_optional_bucket, size_class_index| { var optional_bucket = init_optional_bucket; @@ -536,7 +536,7 @@ pub fn DebugAllocator(comptime config: Config) type { fn reportDoubleFree(ret_addr: usize, alloc_stack_trace: StackTrace, free_stack_trace: StackTrace) void { var addr_buf: [stack_n]usize = undefined; const second_free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = ret_addr }, &addr_buf); - const tty_config = std.Io.tty.detectConfig(.stderr()); + const tty_config: std.Io.tty.Config = .detect(.stderr()); log.err("Double free detected. Allocation: {f} First free: {f} Second free: {f}", .{ std.debug.FormatStackTrace{ .stack_trace = alloc_stack_trace, @@ -590,7 +590,7 @@ pub fn DebugAllocator(comptime config: Config) type { if (config.safety and old_mem.len != entry.value_ptr.bytes.len) { var addr_buf: [stack_n]usize = undefined; const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = ret_addr }, &addr_buf); - const tty_config = std.Io.tty.detectConfig(.stderr()); + const tty_config: std.Io.tty.Config = .detect(.stderr()); log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{ entry.value_ptr.bytes.len, old_mem.len, @@ -703,7 +703,7 @@ pub fn DebugAllocator(comptime config: Config) type { if (config.safety and old_mem.len != entry.value_ptr.bytes.len) { var addr_buf: [stack_n]usize = undefined; const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = ret_addr }, &addr_buf); - const tty_config = std.Io.tty.detectConfig(.stderr()); + const tty_config: std.Io.tty.Config = .detect(.stderr()); log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{ entry.value_ptr.bytes.len, old_mem.len, @@ -935,7 +935,7 @@ pub fn DebugAllocator(comptime config: Config) type { var addr_buf: [stack_n]usize = undefined; const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = return_address }, &addr_buf); if (old_memory.len != requested_size) { - const tty_config = std.Io.tty.detectConfig(.stderr()); + const tty_config: std.Io.tty.Config = .detect(.stderr()); log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{ requested_size, old_memory.len, @@ -950,7 +950,7 @@ pub fn DebugAllocator(comptime config: Config) type { }); } if (alignment != slot_alignment) { - const tty_config = std.Io.tty.detectConfig(.stderr()); + const tty_config: std.Io.tty.Config = .detect(.stderr()); log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {f} Free: {f}", .{ slot_alignment.toByteUnits(), alignment.toByteUnits(), @@ -1044,7 +1044,7 @@ pub fn DebugAllocator(comptime config: Config) type { var addr_buf: [stack_n]usize = undefined; const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = return_address }, &addr_buf); if (memory.len != requested_size) { - const tty_config = std.Io.tty.detectConfig(.stderr()); + const tty_config: std.Io.tty.Config = .detect(.stderr()); log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{ requested_size, memory.len, @@ -1059,7 +1059,7 @@ pub fn DebugAllocator(comptime config: Config) type { }); } if (alignment != slot_alignment) { - const tty_config = std.Io.tty.detectConfig(.stderr()); + const tty_config: std.Io.tty.Config = .detect(.stderr()); log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {f} Free: {f}", .{ slot_alignment.toByteUnits(), alignment.toByteUnits(), diff --git a/lib/std/http/Client.zig b/lib/std/http/Client.zig index 5be73e1a52..10ab23f476 100644 --- a/lib/std/http/Client.zig +++ b/lib/std/http/Client.zig @@ -529,7 +529,7 @@ pub const Response = struct { }; if (first_line[8] != ' ') return error.HttpHeadersInvalid; const status: http.Status = @enumFromInt(parseInt3(first_line[9..12])); - const reason = mem.trimLeft(u8, first_line[12..], " "); + const reason = mem.trimStart(u8, first_line[12..], " "); res.version = version; res.status = status; diff --git a/lib/std/http/test.zig b/lib/std/http/test.zig index 2893039102..d7b53f33ff 100644 --- a/lib/std/http/test.zig +++ b/lib/std/http/test.zig @@ -12,6 +12,8 @@ const expectEqualStrings = std.testing.expectEqualStrings; const expectError = std.testing.expectError; test "trailers" { + if (builtin.cpu.arch.isPowerPC64() and builtin.mode != .Debug) return error.SkipZigTest; // https://github.com/llvm/llvm-project/issues/171879 + const io = std.testing.io; const test_server = try createTestServer(io, struct { fn run(test_server: *TestServer) anyerror!void { @@ -96,6 +98,8 @@ test "trailers" { } test "HTTP server handles a chunked transfer coding request" { + if (builtin.cpu.arch.isPowerPC64() and builtin.mode != .Debug) return error.SkipZigTest; // https://github.com/llvm/llvm-project/issues/171879 + const io = std.testing.io; const test_server = try createTestServer(io, struct { fn run(test_server: *TestServer) anyerror!void { @@ -162,6 +166,8 @@ test "HTTP server handles a chunked transfer coding request" { } test "echo content server" { + if (builtin.cpu.arch.isPowerPC64() and builtin.mode != .Debug) return error.SkipZigTest; // https://github.com/llvm/llvm-project/issues/171879 + const io = std.testing.io; const test_server = try createTestServer(io, struct { fn run(test_server: *TestServer) anyerror!void { @@ -250,6 +256,8 @@ test "echo content server" { } test "Server.Request.respondStreaming non-chunked, unknown content-length" { + if (builtin.cpu.arch.isPowerPC64() and builtin.mode != .Debug) return error.SkipZigTest; // https://github.com/llvm/llvm-project/issues/171879 + const io = std.testing.io; if (builtin.os.tag == .windows) { @@ -326,6 +334,8 @@ test "Server.Request.respondStreaming non-chunked, unknown content-length" { } test "receiving arbitrary http headers from the client" { + if (builtin.cpu.arch.isPowerPC64() and builtin.mode != .Debug) return error.SkipZigTest; // https://github.com/llvm/llvm-project/issues/171879 + const io = std.testing.io; const test_server = try createTestServer(io, struct { @@ -389,6 +399,8 @@ test "receiving arbitrary http headers from the client" { } test "general client/server API coverage" { + if (builtin.cpu.arch.isPowerPC64() and builtin.mode != .Debug) return error.SkipZigTest; // https://github.com/llvm/llvm-project/issues/171879 + const io = std.testing.io; if (builtin.os.tag == .windows) { @@ -882,6 +894,8 @@ test "general client/server API coverage" { } test "Server streams both reading and writing" { + if (builtin.cpu.arch.isPowerPC64() and builtin.mode != .Debug) return error.SkipZigTest; // https://github.com/llvm/llvm-project/issues/171879 + const io = std.testing.io; const test_server = try createTestServer(io, struct { @@ -1136,6 +1150,9 @@ fn createTestServer(io: Io, S: type) !*TestServer { } test "redirect to different connection" { + if (builtin.cpu.arch.isPowerPC64() and builtin.mode != .Debug) return error.SkipZigTest; // https://github.com/llvm/llvm-project/issues/171879 + if (builtin.cpu.arch.isMIPS32() and !builtin.link_libc) return error.SkipZigTest; // https://codeberg.org/ziglang/zig/issues/30216 + const io = std.testing.io; const test_server_new = try createTestServer(io, struct { fn run(test_server: *TestServer) anyerror!void { diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index ac7ee2775e..423b6d1c28 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -17,6 +17,27 @@ const Endian = std.builtin.Endian; const Signedness = std.builtin.Signedness; const native_endian = builtin.cpu.arch.endian(); +// Comptime-computed constants for supported bases (2 - 36) +// all values are set to 0 for bases 0 - 1, to make it possible to +// access a constant for a given base b using `constants.value[b]` +const Constants = struct { + // big_bases[b] is the biggest power of b that fit in a single Limb + // i.e. big_bases[b] = b^k < 2^@bitSizeOf(Limb) and b^(k+1) >= 2^@bitSizeOf(Limb) + big_bases: [37]Limb, + // digits_per_limb[b] is the value of k used in the previous field + digits_per_limb: [37]u8, +}; +const constants: Constants = blk: { + @setEvalBranchQuota(2000); + var digits_per_limb = [_]u8{0} ** 37; + var bases = [_]Limb{0} ** 37; + for (2..37) |base| { + digits_per_limb[base] = @intCast(math.log(Limb, base, math.maxInt(Limb))); + bases[base] = std.math.pow(Limb, base, digits_per_limb[base]); + } + break :blk Constants{ .big_bases = bases, .digits_per_limb = digits_per_limb }; +}; + /// Returns the number of limbs needed to store `scalar`, which must be a /// primitive integer or float value. /// Note: A comptime-known upper bound of this value that may be used @@ -329,23 +350,15 @@ pub const Mutable = struct { /// not allowed (e.g. 0x43 should simply be 43). Underscores in the input string are /// ignored and can be used as digit separators. /// - /// Asserts there is enough memory for the value in `self.limbs`. An upper bound on number of limbs can + /// There must be enough memory for the value in `self.limbs`. An upper bound on number of limbs can /// be determined with `calcSetStringLimbCount`. /// Asserts the base is in the range [2, 36]. /// /// Returns an error if the value has invalid digits for the requested base. - /// - /// `limbs_buffer` is used for temporary storage. The size required can be found with - /// `calcSetStringLimbsBufferLen`. - /// - /// If `allocator` is provided, it will be used for temporary storage to improve - /// multiplication performance. `error.OutOfMemory` is handled with a fallback algorithm. pub fn setString( self: *Mutable, base: u8, value: []const u8, - limbs_buffer: []Limb, - allocator: ?Allocator, ) error{InvalidCharacter}!void { assert(base >= 2); assert(base <= 36); @@ -357,18 +370,41 @@ pub const Mutable = struct { i += 1; } - const ap_base: Const = .{ .limbs = &[_]Limb{base}, .positive = true }; - self.set(0); + @memset(self.limbs, 0); + self.len = 1; + var limb: Limb = 0; + var j: usize = 0; for (value[i..]) |ch| { if (ch == '_') { continue; } const d = try std.fmt.charToDigit(ch, base); - const ap_d: Const = .{ .limbs = &[_]Limb{d}, .positive = true }; - - self.mul(self.toConst(), ap_base, limbs_buffer, allocator); - self.add(self.toConst(), ap_d); + limb *= base; + limb += d; + j += 1; + + if (j == constants.digits_per_limb[base]) { + const len = @min(self.len + 1, self.limbs.len); + // r = a * b = a + a * (b - 1) + // we assert when self.limbs is not large enough to store the number + assert(!llmulLimb(.add, self.limbs[0..len], self.limbs[0..len], constants.big_bases[base] - 1)); + assert(lladdcarry(self.limbs[0..len], self.limbs[0..len], &[1]Limb{limb}) == 0); + + if (self.limbs.len > self.len and self.limbs[self.len] != 0) + self.len += 1; + j = 0; + limb = 0; + } + } + if (j > 0) { + const len = @min(self.len + 1, self.limbs.len); + // we assert when self.limbs is not large enough to store the number + assert(!llmulLimb(.add, self.limbs[0..len], self.limbs[0..len], math.pow(Limb, base, j) - 1)); + assert(lladdcarry(self.limbs[0..len], self.limbs[0..len], &[1]Limb{limb}) == 0); + + if (self.limbs.len > self.len and self.limbs[self.len] != 0) + self.len += 1; } self.positive = positive; } @@ -2081,7 +2117,7 @@ pub const Const = struct { for (self.limbs[0..self.limbs.len]) |limb| { std.debug.print("{x} ", .{limb}); } - std.debug.print("len={} positive={}\n", .{ self.len, self.positive }); + std.debug.print("len={} positive={}\n", .{ self.limbs.len, self.positive }); } pub fn abs(self: Const) Const { @@ -2884,10 +2920,8 @@ pub const Managed = struct { pub fn setString(self: *Managed, base: u8, value: []const u8) !void { if (base < 2 or base > 36) return error.InvalidBase; try self.ensureCapacity(calcSetStringLimbCount(base, value.len)); - const limbs_buffer = try self.allocator.alloc(Limb, calcSetStringLimbsBufferLen(base, value.len)); - defer self.allocator.free(limbs_buffer); var m = self.toMutable(); - try m.setString(base, value, limbs_buffer, self.allocator); + try m.setString(base, value); self.setMetadata(m.positive, m.len); } @@ -3596,6 +3630,7 @@ fn llmulaccKaratsuba( /// r = r (op) a. /// The result is computed modulo `r.len`. fn llaccum(comptime op: AccOp, r: []Limb, a: []const Limb) void { + assert(!slicesOverlap(r, a) or @intFromPtr(r.ptr) <= @intFromPtr(a.ptr)); if (op == .sub) { _ = llsubcarry(r, r, a); return; @@ -3665,6 +3700,8 @@ fn llmulaccLong(comptime op: AccOp, r: []Limb, a: []const Limb, b: []const Limb) /// The result is computed modulo `r.len`. /// Returns whether the operation overflowed. fn llmulLimb(comptime op: AccOp, acc: []Limb, y: []const Limb, xi: Limb) bool { + assert(!slicesOverlap(acc, y) or @intFromPtr(acc.ptr) <= @intFromPtr(y.ptr)); + if (xi == 0) { return false; } @@ -3727,6 +3764,8 @@ fn llsubcarry(r: []Limb, a: []const Limb, b: []const Limb) Limb { assert(a.len != 0 and b.len != 0); assert(a.len >= b.len); assert(r.len >= a.len); + assert(!slicesOverlap(r, a) or @intFromPtr(r.ptr) <= @intFromPtr(a.ptr)); + assert(!slicesOverlap(r, b) or @intFromPtr(r.ptr) <= @intFromPtr(b.ptr)); var i: usize = 0; var borrow: Limb = 0; @@ -3758,6 +3797,8 @@ fn lladdcarry(r: []Limb, a: []const Limb, b: []const Limb) Limb { assert(a.len != 0 and b.len != 0); assert(a.len >= b.len); assert(r.len >= a.len); + assert(!slicesOverlap(r, a) or @intFromPtr(r.ptr) <= @intFromPtr(a.ptr)); + assert(!slicesOverlap(r, b) or @intFromPtr(r.ptr) <= @intFromPtr(b.ptr)); var i: usize = 0; var carry: Limb = 0; diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig index eee6a0c7ce..7efd0f00c3 100644 --- a/lib/std/math/big/int_test.zig +++ b/lib/std/math/big/int_test.zig @@ -737,7 +737,7 @@ test "string to" { defer testing.allocator.free(as); const es = "120317241209124781241290847124"; - try testing.expect(mem.eql(u8, as, es)); + try testing.expectEqualSlices(u8, es, as); } test "string to base base error" { @@ -755,7 +755,7 @@ test "string to base 2" { defer testing.allocator.free(as); const es = "-1011"; - try testing.expect(mem.eql(u8, as, es)); + try testing.expectEqualSlices(u8, es, as); } test "string to base 16" { @@ -766,7 +766,7 @@ test "string to base 16" { defer testing.allocator.free(as); const es = "efffffff00000001eeeeeeefaaaaaaab"; - try testing.expect(mem.eql(u8, as, es)); + try testing.expectEqualSlices(u8, es, as); } test "string to base 36" { @@ -777,7 +777,7 @@ test "string to base 36" { defer testing.allocator.free(as); const es = "fifvthrv1mzt79ez9"; - try testing.expect(mem.eql(u8, as, es)); + try testing.expectEqualSlices(u8, es, as); } test "neg string to" { @@ -788,7 +788,7 @@ test "neg string to" { defer testing.allocator.free(as); const es = "-123907434"; - try testing.expect(mem.eql(u8, as, es)); + try testing.expectEqualSlices(u8, es, as); } test "zero string to" { @@ -799,7 +799,7 @@ test "zero string to" { defer testing.allocator.free(as); const es = "0"; - try testing.expect(mem.eql(u8, as, es)); + try testing.expectEqualSlices(u8, es, as); } test "clone" { @@ -3404,26 +3404,26 @@ test "big int conversion read twos complement with padding" { var bit_count: usize = 12 * 8 + 1; a.toConst().writeTwosComplement(buffer1[0..13], .little); - try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1, 0xaa, 0xaa, 0xaa })); + try testing.expectEqualSlices(u8, &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1, 0xaa, 0xaa, 0xaa }, buffer1); a.toConst().writeTwosComplement(buffer1[0..13], .big); - try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xaa, 0xaa, 0xaa })); + try testing.expectEqualSlices(u8, &[_]u8{ 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xaa, 0xaa, 0xaa }, buffer1); a.toConst().writeTwosComplement(buffer1[0..16], .little); - try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1, 0x0, 0x0, 0x0 })); + try testing.expectEqualSlices(u8, &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1, 0x0, 0x0, 0x0 }, buffer1); a.toConst().writeTwosComplement(buffer1[0..16], .big); - try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0x0, 0x0, 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd })); + try testing.expectEqualSlices(u8, &[_]u8{ 0x0, 0x0, 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd }, buffer1); @memset(buffer1, 0xaa); try a.set(-0x01_02030405_06070809_0a0b0c0d); bit_count = 12 * 8 + 2; a.toConst().writeTwosComplement(buffer1[0..13], .little); - try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xf3, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xaa, 0xaa, 0xaa })); + try testing.expectEqualSlices(u8, &[_]u8{ 0xf3, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xaa, 0xaa, 0xaa }, buffer1); a.toConst().writeTwosComplement(buffer1[0..13], .big); - try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf3, 0xaa, 0xaa, 0xaa })); + try testing.expectEqualSlices(u8, &[_]u8{ 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf3, 0xaa, 0xaa, 0xaa }, buffer1); a.toConst().writeTwosComplement(buffer1[0..16], .little); - try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xf3, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, 0xff, 0xff })); + try testing.expectEqualSlices(u8, &[_]u8{ 0xf3, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, 0xff, 0xff }, buffer1); a.toConst().writeTwosComplement(buffer1[0..16], .big); - try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf3 })); + try testing.expectEqualSlices(u8, &[_]u8{ 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf3 }, buffer1); } test "big int write twos complement +/- zero" { @@ -3438,13 +3438,13 @@ test "big int write twos complement +/- zero" { // Test zero m.toConst().writeTwosComplement(buffer1[0..13], .little); - try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3)))); + try testing.expectEqualSlices(u8, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3)), buffer1); m.toConst().writeTwosComplement(buffer1[0..13], .big); - try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3)))); + try testing.expectEqualSlices(u8, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3)), buffer1); m.toConst().writeTwosComplement(buffer1[0..16], .little); - try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 16)))); + try testing.expectEqualSlices(u8, &(([_]u8{0} ** 16)), buffer1); m.toConst().writeTwosComplement(buffer1[0..16], .big); - try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 16)))); + try testing.expectEqualSlices(u8, &(([_]u8{0} ** 16)), buffer1); @memset(buffer1, 0xaa); m.positive = false; @@ -3452,13 +3452,13 @@ test "big int write twos complement +/- zero" { // Test negative zero m.toConst().writeTwosComplement(buffer1[0..13], .little); - try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3)))); + try testing.expectEqualSlices(u8, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3)), buffer1); m.toConst().writeTwosComplement(buffer1[0..13], .big); - try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3)))); + try testing.expectEqualSlices(u8, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3)), buffer1); m.toConst().writeTwosComplement(buffer1[0..16], .little); - try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 16)))); + try testing.expectEqualSlices(u8, &(([_]u8{0} ** 16)), buffer1); m.toConst().writeTwosComplement(buffer1[0..16], .big); - try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 16)))); + try testing.expectEqualSlices(u8, &(([_]u8{0} ** 16)), buffer1); } test "big int conversion write twos complement with padding" { @@ -3816,7 +3816,7 @@ test "(BigInt) positive" { const b_fmt = try std.fmt.allocPrint(testing.allocator, "{d}", .{b}); defer testing.allocator.free(b_fmt); - try testing.expect(!mem.eql(u8, b_fmt, "(BigInt)")); + try testing.expect(!mem.eql(u8, "(BigInt)", b_fmt)); } test "(BigInt) negative" { @@ -3840,7 +3840,7 @@ test "(BigInt) negative" { const b_fmt = try std.fmt.allocPrint(testing.allocator, "{d}", .{b}); defer testing.allocator.free(b_fmt); - try testing.expect(mem.eql(u8, a_fmt, "(BigInt)")); + try testing.expectEqualSlices(u8, "(BigInt)", a_fmt); try testing.expect(!mem.eql(u8, b_fmt, "(BigInt)")); } diff --git a/lib/std/math/hypot.zig b/lib/std/math/hypot.zig index e90b6505ce..f95c3c0bd4 100644 --- a/lib/std/math/hypot.zig +++ b/lib/std/math/hypot.zig @@ -1,3 +1,4 @@ +const builtin = @import("builtin"); const std = @import("../std.zig"); const math = std.math; const expect = std.testing.expect; @@ -92,10 +93,12 @@ const hypot_test_cases = .{ }; test hypot { + if (builtin.cpu.arch.isPowerPC() and builtin.mode != .Debug) return error.SkipZigTest; // https://github.com/llvm/llvm-project/issues/171869 try expect(hypot(0.3, 0.4) == 0.5); } test "hypot.correct" { + if (builtin.cpu.arch.isPowerPC() and builtin.mode != .Debug) return error.SkipZigTest; // https://github.com/llvm/llvm-project/issues/171869 inline for (.{ f16, f32, f64, f128 }) |T| { inline for (hypot_test_cases) |v| { const a: T, const b: T, const c: T = v; @@ -105,6 +108,7 @@ test "hypot.correct" { } test "hypot.precise" { + if (builtin.cpu.arch.isPowerPC() and builtin.mode != .Debug) return error.SkipZigTest; // https://github.com/llvm/llvm-project/issues/171869 inline for (.{ f16, f32, f64 }) |T| { // f128 seems to be 5 ulp inline for (hypot_test_cases) |v| { const a: T, const b: T, const c: T = v; @@ -114,6 +118,7 @@ test "hypot.precise" { } test "hypot.special" { + if (builtin.cpu.arch.isPowerPC() and builtin.mode != .Debug) return error.SkipZigTest; // https://github.com/llvm/llvm-project/issues/171869 @setEvalBranchQuota(2000); inline for (.{ f16, f32, f64, f128 }) |T| { try expect(math.isNan(hypot(nan(T), 0.0))); diff --git a/lib/std/mem.zig b/lib/std/mem.zig index ed2768b6bd..169922e449 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -1221,9 +1221,6 @@ test trimStart { try testing.expectEqualSlices(u8, "foo\n ", trimStart(u8, " foo\n ", " \n")); } -/// Deprecated: use `trimStart` instead. -pub const trimLeft = trimStart; - /// Remove a set of values from the end of a slice. pub fn trimEnd(comptime T: type, slice: []const T, values_to_strip: []const T) []const T { var end: usize = slice.len; @@ -1235,9 +1232,6 @@ test trimEnd { try testing.expectEqualSlices(u8, " foo", trimEnd(u8, " foo\n ", " \n")); } -/// Deprecated: use `trimEnd` instead. -pub const trimRight = trimEnd; - /// Remove a set of values from the beginning and end of a slice. pub fn trim(comptime T: type, slice: []const T, values_to_strip: []const T) []const T { var begin: usize = 0; diff --git a/lib/std/meta.zig b/lib/std/meta.zig index 77e288ecaa..677e359b34 100644 --- a/lib/std/meta.zig +++ b/lib/std/meta.zig @@ -614,38 +614,6 @@ test activeTag { try testing.expect(activeTag(u) == UE.Float); } -/// Deprecated: Use @FieldType(U, tag_name) -const TagPayloadType = TagPayload; - -/// Deprecated: Use @FieldType(U, tag_name) -pub fn TagPayloadByName(comptime U: type, comptime tag_name: []const u8) type { - const info = @typeInfo(U).@"union"; - - inline for (info.fields) |field_info| { - if (comptime mem.eql(u8, field_info.name, tag_name)) - return field_info.type; - } - - @compileError("no field '" ++ tag_name ++ "' in union '" ++ @typeName(U) ++ "'"); -} - -/// Deprecated: Use @FieldType(U, @tagName(tag)) -pub fn TagPayload(comptime U: type, comptime tag: Tag(U)) type { - return TagPayloadByName(U, @tagName(tag)); -} - -test TagPayload { - const Event = union(enum) { - Moved: struct { - from: i32, - to: i32, - }, - }; - const MovedEvent = TagPayload(Event, Event.Moved); - const e: Event = .{ .Moved = undefined }; - try testing.expect(MovedEvent == @TypeOf(e.Moved)); -} - /// Compares two of any type for equality. Containers that do not support comparison /// on their own are compared on a field-by-field basis. Pointers are not followed. pub fn eql(a: anytype, b: @TypeOf(a)) bool { @@ -774,14 +742,6 @@ test eql { try testing.expect(!eql(v1, v3)); } -/// Deprecated: use `std.enums.fromInt` instead and handle null. -pub const IntToEnumError = error{InvalidEnumTag}; - -/// Deprecated: use `std.enums.fromInt` instead and handle null instead of an error. -pub fn intToEnum(comptime EnumTag: type, tag_int: anytype) IntToEnumError!EnumTag { - return std.enums.fromInt(EnumTag, tag_int) orelse return error.InvalidEnumTag; -} - /// Given a type and a name, return the field index according to source order. /// Returns `null` if the field is not found. pub fn fieldIndex(comptime T: type, comptime name: []const u8) ?comptime_int { diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index b80d7c606f..dc835fe81e 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -95,15 +95,14 @@ pub fn clone( pub const ARCH = arch_bits.ARCH; pub const HWCAP = arch_bits.HWCAP; pub const SC = arch_bits.SC; -pub const Stat = arch_bits.Stat; pub const VDSO = arch_bits.VDSO; -pub const blkcnt_t = arch_bits.blkcnt_t; -pub const blksize_t = arch_bits.blksize_t; -pub const dev_t = arch_bits.dev_t; -pub const ino_t = arch_bits.ino_t; -pub const mode_t = arch_bits.mode_t; -pub const nlink_t = arch_bits.nlink_t; -pub const off_t = arch_bits.off_t; +pub const blkcnt_t = u64; +pub const blksize_t = u32; +pub const dev_t = u64; +pub const ino_t = u64; +pub const mode_t = u32; +pub const nlink_t = u32; +pub const off_t = i64; pub const time_t = arch_bits.time_t; pub const user_desc = arch_bits.user_desc; @@ -2199,61 +2198,13 @@ pub fn accept4(fd: i32, noalias addr: ?*sockaddr, noalias len: ?*socklen_t, flag return syscall4(.accept4, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len), flags); } -pub fn fstat(fd: i32, stat_buf: *Stat) usize { - if (native_arch == .riscv32 or native_arch.isLoongArch()) { - // riscv32 and loongarch have made the interesting decision to not implement some of - // the older stat syscalls, including this one. - @compileError("No fstat syscall on this architecture."); - } else if (@hasField(SYS, "fstat64")) { - return syscall2(.fstat64, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(stat_buf)); - } else { - return syscall2(.fstat, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(stat_buf)); - } -} - -pub fn stat(pathname: [*:0]const u8, statbuf: *Stat) usize { - if (native_arch == .riscv32 or native_arch.isLoongArch()) { - // riscv32 and loongarch have made the interesting decision to not implement some of - // the older stat syscalls, including this one. - @compileError("No stat syscall on this architecture."); - } else if (@hasField(SYS, "stat64")) { - return syscall2(.stat64, @intFromPtr(pathname), @intFromPtr(statbuf)); - } else { - return syscall2(.stat, @intFromPtr(pathname), @intFromPtr(statbuf)); - } -} - -pub fn lstat(pathname: [*:0]const u8, statbuf: *Stat) usize { - if (native_arch == .riscv32 or native_arch.isLoongArch()) { - // riscv32 and loongarch have made the interesting decision to not implement some of - // the older stat syscalls, including this one. - @compileError("No lstat syscall on this architecture."); - } else if (@hasField(SYS, "lstat64")) { - return syscall2(.lstat64, @intFromPtr(pathname), @intFromPtr(statbuf)); - } else { - return syscall2(.lstat, @intFromPtr(pathname), @intFromPtr(statbuf)); - } -} - -pub fn fstatat(dirfd: i32, path: [*:0]const u8, stat_buf: *Stat, flags: u32) usize { - if (native_arch == .riscv32 or native_arch.isLoongArch()) { - // riscv32 and loongarch have made the interesting decision to not implement some of - // the older stat syscalls, including this one. - @compileError("No fstatat syscall on this architecture."); - } else if (@hasField(SYS, "fstatat64")) { - return syscall4(.fstatat64, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), @intFromPtr(stat_buf), flags); - } else { - return syscall4(.fstatat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), @intFromPtr(stat_buf), flags); - } -} - -pub fn statx(dirfd: i32, path: [*:0]const u8, flags: u32, mask: u32, statx_buf: *Statx) usize { +pub fn statx(dirfd: i32, path: [*:0]const u8, flags: u32, mask: STATX, statx_buf: *Statx) usize { return syscall5( .statx, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), flags, - mask, + @as(u32, @bitCast(mask)), @intFromPtr(statx_buf), ); } @@ -6940,96 +6891,161 @@ pub const utsname = extern struct { }; pub const HOST_NAME_MAX = 64; -pub const STATX_TYPE = 0x0001; -pub const STATX_MODE = 0x0002; -pub const STATX_NLINK = 0x0004; -pub const STATX_UID = 0x0008; -pub const STATX_GID = 0x0010; -pub const STATX_ATIME = 0x0020; -pub const STATX_MTIME = 0x0040; -pub const STATX_CTIME = 0x0080; -pub const STATX_INO = 0x0100; -pub const STATX_SIZE = 0x0200; -pub const STATX_BLOCKS = 0x0400; -pub const STATX_BASIC_STATS = 0x07ff; - -pub const STATX_BTIME = 0x0800; - -pub const STATX_ATTR_COMPRESSED = 0x0004; -pub const STATX_ATTR_IMMUTABLE = 0x0010; -pub const STATX_ATTR_APPEND = 0x0020; -pub const STATX_ATTR_NODUMP = 0x0040; -pub const STATX_ATTR_ENCRYPTED = 0x0800; -pub const STATX_ATTR_AUTOMOUNT = 0x1000; +/// Flags used to request specific members in `Statx` be filled out. +/// The `Statx.mask` member will be updated with what information the kernel +/// returned. Callers must check this field since support varies by kernel +/// version and filesystem. +pub const STATX = packed struct(u32) { + /// Want `mode & S.IFMT`. + TYPE: bool = false, + /// Want `mode & ~S.IFMT`. + MODE: bool = false, + /// Want the `nlink` member. + NLINK: bool = false, + /// Want the `uid` member. + UID: bool = false, + /// Want the `gid` member. + GID: bool = false, + /// Want the `atime` member. + ATIME: bool = false, + /// Want the `mtime` member. + MTIME: bool = false, + /// Want the `ctime` member. + CTIME: bool = false, + /// Want the `ino` member. + INO: bool = false, + /// Want the `size` member. + SIZE: bool = false, + /// Want the `blocks` member. + BLOCKS: bool = false, + /// Want the `btime` member. + BTIME: bool = false, + /// Want the `mnt_id` member. + MNT_ID: bool = false, + /// Want the `dio_mem_align` and `dio_offset_align` members. + DIOALIGN: bool = false, + /// Want the `stx_mnt_id` member. + MNT_ID_UNIQUE: bool = false, + /// Want the `sub` member. + SUBVOL: bool = false, + /// Want the `atomic_write_unit_min`, `atomic_write_unit_max` and + /// `atomic_write_segments_max` members. + WRITE_ATOMIC: bool = false, + /// Want the `dio_read_offset_align` member. + DIO_READ_ALIGN: bool = false, + __pad: u13 = 0, + /// Reserved for future expansion; must not be set. + __RESERVED: bool = false, + + pub const BASIC_STATS: STATX = @bitCast(@as(u32, 0x7ff)); +}; + +/// Attributes about the state or features of a file as a bitmask. +/// Flags marked [I] correspond to the `FS_IOC_SETFLAGS` values semantically. +/// See [FS_IOC_SETFLAGS(2const)](https://man7.org/linux/man-pages/man2/FS_IOC_GETFLAGS.2const.html) +/// for more. +pub const STATX_ATTR = packed struct(u64) { + __pad1: u3 = 0, + /// [I] File is compressed by the fs. + COMPRESSED: bool = false, + __pad2: u1 = 0, + /// [I] File is marked immutable. + IMMUTABLE: bool = false, + /// [I] File is append-only. + APPEND: bool = false, + /// [I] File is not to be dumped. + NODUMP: bool = false, + /// [I] File requires a key to decrypt in the filesystem. + ENCRYPTED: bool = false, + /// File names a directory that triggers an automount. + AUTOMOUNT: bool = false, + /// File names the root of a mount. + MOUNT_ROOT: bool = false, + /// [I] File is protected by the `dm-verity` device. + VERITY: bool = false, + /// File is currently in the CPU direct access state. + /// Does not correspond to the per-inode DAX flag that some filesystems support. + DAX: bool = false, + /// File supports atomic write operations. + WRITE_ATOMIC: bool = false, + __pad3: u50 = 0, +}; pub const statx_timestamp = extern struct { + /// Number of seconds before or after `1970-01-01T00:00:00Z`. sec: i64, + /// Number of nanoseconds (0..999,999,999) after `sec`. nsec: u32, + // Reserved for future increases in resolution. __pad1: u32, }; /// Renamed to `Statx` to not conflict with the `statx` function. pub const Statx = extern struct { - /// Mask of bits indicating filled fields - mask: u32, - - /// Block size for filesystem I/O + /// Mask of bits indicating filled fields. + mask: STATX, + /// Block size for filesystem I/O. blksize: u32, - - /// Extra file attribute indicators - attributes: u64, - - /// Number of hard links + /// Extra file attribute indicators. + attributes: STATX_ATTR, + /// Number of hard links. nlink: u32, - - /// User ID of owner + /// User ID of owner. uid: uid_t, - - /// Group ID of owner + /// Group ID of owner. gid: gid_t, - - /// File type and mode + /// File type and mode. mode: u16, - __pad1: u16, - - /// Inode number + __spare0: u16, + /// Inode number. ino: u64, - - /// Total size in bytes + /// Total size in bytes. size: u64, - - /// Number of 512B blocks allocated + /// Number of 512B blocks allocated. blocks: u64, - /// Mask to show what's supported in `attributes`. - attributes_mask: u64, - - /// Last access file timestamp + attributes_mask: STATX_ATTR, + /// Last access file timestamp. atime: statx_timestamp, - - /// Creation file timestamp + /// Creation file timestamp. btime: statx_timestamp, - - /// Last status change file timestamp + /// Last status change file timestamp. ctime: statx_timestamp, - - /// Last modification file timestamp + /// Last modification file timestamp. mtime: statx_timestamp, - /// Major ID, if this file represents a device. rdev_major: u32, - /// Minor ID, if this file represents a device. rdev_minor: u32, - /// Major ID of the device containing the filesystem where this file resides. dev_major: u32, - /// Minor ID of the device containing the filesystem where this file resides. dev_minor: u32, - - __pad2: [14]u64, -}; + /// Mount ID + mnt_id: u64, + /// Memory buffer alignment for direct I/O. + dio_mem_align: u32, + /// File offset alignment for direct I/O. + dio_offset_align: u32, + /// Subvolume identifier. + subvol: u64, + /// Min atomic write unit in bytes. + atomic_write_unit_min: u32, + /// Max atomic write unit in bytes. + atomic_write_unit_max: u32, + /// Max atomic write segment count. + atomic_write_segments_max: u32, + /// File offset alignment for direct I/O reads. + dio_read_offset_align: u32, + /// Optimised max atomic write unit in bytes. + atomic_write_unit_max_opt: u32, + __spare2: [1]u32, + __spare3: [8]u64, +}; + +comptime { + assert(@sizeOf(Statx) == 0x100); +} pub const addrinfo = extern struct { flags: AI, @@ -9970,6 +9986,46 @@ pub const wrapped = struct { } } + pub const StatxError = std.posix.UnexpectedError || error{ + /// Search permission is denied for one of the directories in `path`. + AccessDenied, + /// Too many symbolic links were encountered traversing `path`. + SymLinkLoop, + /// `path` is too long. + NameTooLong, + /// One of: + /// - A component of `path` does not exist. + /// - A component of `path` is not a directory. + /// - `path` is a relative and `dirfd` is not a directory file descriptor. + FileNotFound, + /// Insufficient memory is available. + SystemResources, + }; + + pub fn statx(dirfd: fd_t, path: [*:0]const u8, flags: u32, mask: STATX) StatxError!Statx { + const use_c = std.c.versionCheck(if (builtin.abi.isAndroid()) + .{ .major = 30, .minor = 0, .patch = 0 } + else + .{ .major = 2, .minor = 28, .patch = 0 }); + const sys = if (use_c) std.c else std.os.linux; + + var stx = std.mem.zeroes(Statx); + const rc = sys.statx(dirfd, path, flags, mask, &stx); + return switch (sys.errno(rc)) { + .SUCCESS => stx, + .ACCES => error.AccessDenied, + .BADF => invalidApiUsage(), + .FAULT => invalidApiUsage(), + .INVAL => invalidApiUsage(), + .LOOP => error.SymLinkLoop, + .NAMETOOLONG => error.NameTooLong, + .NOENT => error.FileNotFound, + .NOTDIR => error.FileNotFound, + .NOMEM => error.SystemResources, + else => |err| unexpectedErrno(err), + }; + } + const unexpectedErrno = std.posix.unexpectedErrno; fn invalidApiUsage() error{Unexpected} { diff --git a/lib/std/os/linux/IoUring.zig b/lib/std/os/linux/IoUring.zig index 0eebbac1d0..c927dab376 100644 --- a/lib/std/os/linux/IoUring.zig +++ b/lib/std/os/linux/IoUring.zig @@ -958,7 +958,7 @@ pub fn statx( fd: linux.fd_t, path: [:0]const u8, flags: u32, - mask: u32, + mask: linux.STATX, buf: *linux.Statx, ) !*linux.io_uring_sqe { const sqe = try self.get_sqe(); @@ -2691,7 +2691,7 @@ test "statx" { tmp.dir.fd, path, 0, - linux.STATX_SIZE, + .{ .SIZE = true }, &buf, ); try testing.expectEqual(linux.IORING_OP.STATX, sqe.opcode); @@ -2718,7 +2718,7 @@ test "statx" { .flags = 0, }, cqe); - try testing.expect(buf.mask & linux.STATX_SIZE == linux.STATX_SIZE); + try testing.expect(buf.mask.SIZE); try testing.expectEqual(@as(u64, 6), buf.size); } diff --git a/lib/std/os/linux/aarch64.zig b/lib/std/os/linux/aarch64.zig index 4977593ef5..ddf1a61a25 100644 --- a/lib/std/os/linux/aarch64.zig +++ b/lib/std/os/linux/aarch64.zig @@ -143,43 +143,4 @@ pub const VDSO = struct { pub const CGT_VER = "LINUX_2.6.39"; }; -pub const blksize_t = i32; -pub const nlink_t = u32; pub const time_t = i64; -pub const mode_t = u32; -pub const off_t = i64; -pub const ino_t = u64; -pub const dev_t = u64; -pub const blkcnt_t = i64; - -// The `stat` definition used by the Linux kernel. -pub const Stat = extern struct { - dev: dev_t, - ino: ino_t, - mode: mode_t, - nlink: nlink_t, - uid: std.os.linux.uid_t, - gid: std.os.linux.gid_t, - rdev: dev_t, - __pad: u64, - size: off_t, - blksize: blksize_t, - __pad2: i32, - blocks: blkcnt_t, - atim: std.os.linux.timespec, - mtim: std.os.linux.timespec, - ctim: std.os.linux.timespec, - __unused: [2]u32, - - pub fn atime(self: @This()) std.os.linux.timespec { - return self.atim; - } - - pub fn mtime(self: @This()) std.os.linux.timespec { - return self.mtim; - } - - pub fn ctime(self: @This()) std.os.linux.timespec { - return self.ctim; - } -}; diff --git a/lib/std/os/linux/arm.zig b/lib/std/os/linux/arm.zig index 0a5b25f9f0..0bcdaaf319 100644 --- a/lib/std/os/linux/arm.zig +++ b/lib/std/os/linux/arm.zig @@ -179,43 +179,4 @@ pub const HWCAP = struct { pub const EVTSTRM = 1 << 21; }; -pub const blksize_t = i32; -pub const nlink_t = u32; pub const time_t = i32; -pub const mode_t = u32; -pub const off_t = i64; -pub const ino_t = u64; -pub const dev_t = u64; -pub const blkcnt_t = i64; - -// The `stat` definition used by the Linux kernel. -pub const Stat = extern struct { - dev: dev_t, - __dev_padding: u32, - __ino_truncated: u32, - mode: mode_t, - nlink: nlink_t, - uid: std.os.linux.uid_t, - gid: std.os.linux.gid_t, - rdev: dev_t, - __rdev_padding: u32, - size: off_t, - blksize: blksize_t, - blocks: blkcnt_t, - atim: std.os.linux.timespec, - mtim: std.os.linux.timespec, - ctim: std.os.linux.timespec, - ino: ino_t, - - pub fn atime(self: @This()) std.os.linux.timespec { - return self.atim; - } - - pub fn mtime(self: @This()) std.os.linux.timespec { - return self.mtim; - } - - pub fn ctime(self: @This()) std.os.linux.timespec { - return self.ctim; - } -}; diff --git a/lib/std/os/linux/hexagon.zig b/lib/std/os/linux/hexagon.zig index d3b4149d65..ff5331467d 100644 --- a/lib/std/os/linux/hexagon.zig +++ b/lib/std/os/linux/hexagon.zig @@ -119,45 +119,6 @@ pub fn clone() callconv(.naked) u32 { ); } -pub const blksize_t = i32; -pub const nlink_t = u32; pub const time_t = i64; -pub const mode_t = u32; -pub const off_t = i64; -pub const ino_t = u64; -pub const dev_t = u64; -pub const blkcnt_t = i64; - -// The `stat` definition used by the Linux kernel. -pub const Stat = extern struct { - dev: dev_t, - ino: ino_t, - mode: mode_t, - nlink: nlink_t, - uid: std.os.linux.uid_t, - gid: std.os.linux.gid_t, - rdev: dev_t, - __pad: u32, - size: off_t, - blksize: blksize_t, - __pad2: i32, - blocks: blkcnt_t, - atim: std.os.linux.timespec, - mtim: std.os.linux.timespec, - ctim: std.os.linux.timespec, - __unused: [2]u32, - - pub fn atime(self: @This()) std.os.linux.timespec { - return self.atim; - } - - pub fn mtime(self: @This()) std.os.linux.timespec { - return self.mtim; - } - - pub fn ctime(self: @This()) std.os.linux.timespec { - return self.ctim; - } -}; pub const VDSO = void; diff --git a/lib/std/os/linux/io_uring_sqe.zig b/lib/std/os/linux/io_uring_sqe.zig index 5658206a66..808276170a 100644 --- a/lib/std/os/linux/io_uring_sqe.zig +++ b/lib/std/os/linux/io_uring_sqe.zig @@ -420,10 +420,10 @@ pub const io_uring_sqe = extern struct { fd: linux.fd_t, path: [*:0]const u8, flags: u32, - mask: u32, + mask: linux.STATX, buf: *linux.Statx, ) void { - sqe.prep_rw(.STATX, fd, @intFromPtr(path), mask, @intFromPtr(buf)); + sqe.prep_rw(.STATX, fd, @intFromPtr(path), @as(u32, @bitCast(mask)), @intFromPtr(buf)); sqe.rw_flags = flags; } diff --git a/lib/std/os/linux/loongarch64.zig b/lib/std/os/linux/loongarch64.zig index 41450c9976..8e7677f276 100644 --- a/lib/std/os/linux/loongarch64.zig +++ b/lib/std/os/linux/loongarch64.zig @@ -125,46 +125,7 @@ pub fn clone() callconv(.naked) u64 { ); } -pub const blksize_t = i32; -pub const nlink_t = u32; pub const time_t = i64; -pub const mode_t = u32; -pub const off_t = i64; -pub const ino_t = u64; -pub const dev_t = u32; -pub const blkcnt_t = i64; - -// The `stat` definition used by the Linux kernel. -pub const Stat = extern struct { - dev: dev_t, - ino: ino_t, - mode: mode_t, - nlink: nlink_t, - uid: std.os.linux.uid_t, - gid: std.os.linux.gid_t, - rdev: dev_t, - _pad1: u64, - size: off_t, - blksize: blksize_t, - _pad2: i32, - blocks: blkcnt_t, - atim: std.os.linux.timespec, - mtim: std.os.linux.timespec, - ctim: std.os.linux.timespec, - _pad3: [2]u32, - - pub fn atime(self: @This()) std.os.linux.timespec { - return self.atim; - } - - pub fn mtime(self: @This()) std.os.linux.timespec { - return self.mtim; - } - - pub fn ctime(self: @This()) std.os.linux.timespec { - return self.ctim; - } -}; pub const VDSO = struct { pub const CGT_SYM = "__vdso_clock_gettime"; diff --git a/lib/std/os/linux/m68k.zig b/lib/std/os/linux/m68k.zig index 29d9adf1f7..2c5f13aa63 100644 --- a/lib/std/os/linux/m68k.zig +++ b/lib/std/os/linux/m68k.zig @@ -142,45 +142,7 @@ pub fn restore_rt() callconv(.naked) noreturn { ); } -pub const blksize_t = i32; -pub const nlink_t = u32; pub const time_t = i32; -pub const mode_t = u32; -pub const off_t = i64; -pub const ino_t = u64; -pub const dev_t = u64; -pub const blkcnt_t = i64; - -pub const Stat = extern struct { - dev: dev_t, - __pad: i16, - __ino_truncated: i32, - mode: mode_t, - nlink: nlink_t, - uid: std.os.linux.uid_t, - gid: std.os.linux.gid_t, - rdev: dev_t, - __pad2: i16, - size: off_t, - blksize: blksize_t, - blocks: blkcnt_t, - atim: std.os.linux.timespec, - mtim: std.os.linux.timespec, - ctim: std.os.linux.timespec, - ino: ino_t, - - pub fn atime(self: @This()) std.os.linux.timespec { - return self.atim; - } - - pub fn mtime(self: @This()) std.os.linux.timespec { - return self.mtim; - } - - pub fn ctime(self: @This()) std.os.linux.timespec { - return self.ctim; - } -}; // No VDSO used as of glibc 112a0ae18b831bf31f44d81b82666980312511d6. pub const VDSO = void; diff --git a/lib/std/os/linux/mips.zig b/lib/std/os/linux/mips.zig index 3bd7790bd5..bcca7d1ef2 100644 --- a/lib/std/os/linux/mips.zig +++ b/lib/std/os/linux/mips.zig @@ -230,43 +230,4 @@ pub const VDSO = struct { pub const CGT_VER = "LINUX_2.6"; }; -pub const blksize_t = u32; -pub const nlink_t = u32; pub const time_t = i32; -pub const mode_t = u32; -pub const off_t = i64; -pub const ino_t = u64; -pub const dev_t = u64; -pub const blkcnt_t = i64; - -// The `stat64` definition used by the Linux kernel. -pub const Stat = extern struct { - dev: dev_t, - __pad0: [2]u32, // -1 because our dev_t is u64 (kernel dev_t is really u32). - ino: ino_t, - mode: mode_t, - nlink: nlink_t, - uid: std.os.linux.uid_t, - gid: std.os.linux.gid_t, - rdev: dev_t, - __pad1: [2]u32, - size: off_t, - atim: std.os.linux.timespec, - mtim: std.os.linux.timespec, - ctim: std.os.linux.timespec, - blksize: blksize_t, - __pad3: u32, - blocks: blkcnt_t, - - pub fn atime(self: @This()) std.os.linux.timespec { - return self.atim; - } - - pub fn mtime(self: @This()) std.os.linux.timespec { - return self.mtim; - } - - pub fn ctime(self: @This()) std.os.linux.timespec { - return self.ctim; - } -}; diff --git a/lib/std/os/linux/mips64.zig b/lib/std/os/linux/mips64.zig index 82ad6184f1..ec5681b513 100644 --- a/lib/std/os/linux/mips64.zig +++ b/lib/std/os/linux/mips64.zig @@ -184,55 +184,4 @@ pub const VDSO = struct { pub const CGT_VER = "LINUX_2.6"; }; -pub const blksize_t = u32; -pub const nlink_t = u32; pub const time_t = i32; -pub const mode_t = u32; -pub const off_t = i64; -pub const ino_t = u64; -pub const dev_t = u64; -pub const blkcnt_t = i64; - -// The `stat` definition used by the Linux kernel. -pub const Stat = extern struct { - dev: dev_t, - __pad0: [2]u32, // -1 because our dev_t is u64 (kernel dev_t is really u32). - ino: ino_t, - mode: mode_t, - nlink: nlink_t, - uid: std.os.linux.uid_t, - gid: std.os.linux.gid_t, - rdev: dev_t, - __pad1: [2]u32, // -1 because our dev_t is u64 (kernel dev_t is really u32). - size: off_t, - atim: u32, - atim_nsec: u32, - mtim: u32, - mtim_nsec: u32, - ctim: u32, - ctim_nsec: u32, - blksize: blksize_t, - __pad3: u32, - blocks: blkcnt_t, - - pub fn atime(self: @This()) std.os.linux.timespec { - return .{ - .sec = self.atim, - .nsec = self.atim_nsec, - }; - } - - pub fn mtime(self: @This()) std.os.linux.timespec { - return .{ - .sec = self.mtim, - .nsec = self.mtim_nsec, - }; - } - - pub fn ctime(self: @This()) std.os.linux.timespec { - return .{ - .sec = self.ctim, - .nsec = self.ctim_nsec, - }; - } -}; diff --git a/lib/std/os/linux/mipsn32.zig b/lib/std/os/linux/mipsn32.zig index 584edf7c80..4f7c7d60fd 100644 --- a/lib/std/os/linux/mipsn32.zig +++ b/lib/std/os/linux/mipsn32.zig @@ -184,55 +184,4 @@ pub const VDSO = struct { pub const CGT_VER = "LINUX_2.6"; }; -pub const blksize_t = u32; -pub const nlink_t = u32; pub const time_t = i32; -pub const mode_t = u32; -pub const off_t = i64; -pub const ino_t = u64; -pub const dev_t = u64; -pub const blkcnt_t = i64; - -// The `stat` definition used by the Linux kernel. -pub const Stat = extern struct { - dev: dev_t, - __pad0: [2]u32, // -1 because our dev_t is u64 (kernel dev_t is really u32). - ino: ino_t, - mode: mode_t, - nlink: nlink_t, - uid: std.os.linux.uid_t, - gid: std.os.linux.gid_t, - rdev: dev_t, - __pad1: [2]u32, // -1 because our dev_t is u64 (kernel dev_t is really u32). - size: off_t, - atim: u32, - atim_nsec: u32, - mtim: u32, - mtim_nsec: u32, - ctim: u32, - ctim_nsec: u32, - blksize: blksize_t, - __pad3: u32, - blocks: blkcnt_t, - - pub fn atime(self: @This()) std.os.linux.timespec { - return .{ - .sec = self.atim, - .nsec = self.atim_nsec, - }; - } - - pub fn mtime(self: @This()) std.os.linux.timespec { - return .{ - .sec = self.mtim, - .nsec = self.mtim_nsec, - }; - } - - pub fn ctime(self: @This()) std.os.linux.timespec { - return .{ - .sec = self.ctim, - .nsec = self.ctim_nsec, - }; - } -}; diff --git a/lib/std/os/linux/or1k.zig b/lib/std/os/linux/or1k.zig index 1054e52d19..45352e4791 100644 --- a/lib/std/os/linux/or1k.zig +++ b/lib/std/os/linux/or1k.zig @@ -131,43 +131,4 @@ pub fn clone() callconv(.naked) u32 { pub const VDSO = void; -pub const blksize_t = u32; -pub const nlink_t = u32; pub const time_t = i32; -pub const mode_t = u32; -pub const off_t = i64; -pub const ino_t = u64; -pub const dev_t = u64; -pub const blkcnt_t = i64; - -// The `stat64` definition used by the Linux kernel. -pub const Stat = extern struct { - dev: dev_t, - ino: ino_t, - mode: mode_t, - nlink: nlink_t, - uid: std.os.linux.uid_t, - gid: std.os.linux.gid_t, - rdev: dev_t, - _pad0: [2]u32, - size: off_t, - blksize: blksize_t, - _pad1: u32, - blocks: blkcnt_t, - atim: std.os.linux.timespec, - mtim: std.os.linux.timespec, - ctim: std.os.linux.timespec, - _pad2: [2]u32, - - pub fn atime(self: @This()) std.os.linux.timespec { - return self.atim; - } - - pub fn mtime(self: @This()) std.os.linux.timespec { - return self.mtim; - } - - pub fn ctime(self: @This()) std.os.linux.timespec { - return self.ctim; - } -}; diff --git a/lib/std/os/linux/powerpc.zig b/lib/std/os/linux/powerpc.zig index d96a9d67f0..78c6aadda7 100644 --- a/lib/std/os/linux/powerpc.zig +++ b/lib/std/os/linux/powerpc.zig @@ -269,42 +269,4 @@ pub const VDSO = struct { pub const CGT_VER = "LINUX_2.6.15"; }; -pub const blksize_t = i32; -pub const nlink_t = u32; pub const time_t = i32; -pub const mode_t = u32; -pub const off_t = i64; -pub const ino_t = u64; -pub const dev_t = u64; -pub const blkcnt_t = i64; - -// The `stat` definition used by the Linux kernel. -pub const Stat = extern struct { - dev: dev_t, - ino: ino_t, - mode: mode_t, - nlink: nlink_t, - uid: std.os.linux.uid_t, - gid: std.os.linux.gid_t, - rdev: dev_t, - __rdev_padding: i16, - size: off_t, - blksize: blksize_t, - blocks: blkcnt_t, - atim: std.os.linux.timespec, - mtim: std.os.linux.timespec, - ctim: std.os.linux.timespec, - __unused: [2]u32, - - pub fn atime(self: @This()) std.os.linux.timespec { - return self.atim; - } - - pub fn mtime(self: @This()) std.os.linux.timespec { - return self.mtim; - } - - pub fn ctime(self: @This()) std.os.linux.timespec { - return self.ctim; - } -}; diff --git a/lib/std/os/linux/powerpc64.zig b/lib/std/os/linux/powerpc64.zig index d1663feaa8..f4375a4545 100644 --- a/lib/std/os/linux/powerpc64.zig +++ b/lib/std/os/linux/powerpc64.zig @@ -254,41 +254,4 @@ pub const VDSO = struct { pub const CGT_VER = "LINUX_2.6.15"; }; -pub const blksize_t = i64; -pub const nlink_t = u64; pub const time_t = i64; -pub const mode_t = u32; -pub const off_t = i64; -pub const ino_t = u64; -pub const dev_t = u64; -pub const blkcnt_t = i64; - -// The `stat` definition used by the Linux kernel. -pub const Stat = extern struct { - dev: dev_t, - ino: ino_t, - nlink: nlink_t, - mode: mode_t, - uid: std.os.linux.uid_t, - gid: std.os.linux.gid_t, - rdev: dev_t, - size: off_t, - blksize: blksize_t, - blocks: blkcnt_t, - atim: std.os.linux.timespec, - mtim: std.os.linux.timespec, - ctim: std.os.linux.timespec, - __unused: [3]u64, - - pub fn atime(self: @This()) std.os.linux.timespec { - return self.atim; - } - - pub fn mtime(self: @This()) std.os.linux.timespec { - return self.mtim; - } - - pub fn ctime(self: @This()) std.os.linux.timespec { - return self.ctim; - } -}; diff --git a/lib/std/os/linux/riscv32.zig b/lib/std/os/linux/riscv32.zig index 34f73506a1..c1b216b38e 100644 --- a/lib/std/os/linux/riscv32.zig +++ b/lib/std/os/linux/riscv32.zig @@ -124,46 +124,7 @@ pub fn clone() callconv(.naked) u32 { ); } -pub const blksize_t = i32; -pub const nlink_t = u32; pub const time_t = i64; -pub const mode_t = u32; -pub const off_t = i64; -pub const ino_t = u64; -pub const dev_t = u64; -pub const blkcnt_t = i64; - -// The `stat` definition used by the Linux kernel. -pub const Stat = extern struct { - dev: dev_t, - ino: ino_t, - mode: mode_t, - nlink: nlink_t, - uid: std.os.linux.uid_t, - gid: std.os.linux.gid_t, - rdev: dev_t, - __pad: u32, - size: off_t, - blksize: blksize_t, - __pad2: i32, - blocks: blkcnt_t, - atim: std.os.linux.timespec, - mtim: std.os.linux.timespec, - ctim: std.os.linux.timespec, - __unused: [2]u32, - - pub fn atime(self: @This()) std.os.linux.timespec { - return self.atim; - } - - pub fn mtime(self: @This()) std.os.linux.timespec { - return self.mtim; - } - - pub fn ctime(self: @This()) std.os.linux.timespec { - return self.ctim; - } -}; pub const VDSO = struct { pub const CGT_SYM = "__vdso_clock_gettime"; diff --git a/lib/std/os/linux/riscv64.zig b/lib/std/os/linux/riscv64.zig index e404693df0..50a456836a 100644 --- a/lib/std/os/linux/riscv64.zig +++ b/lib/std/os/linux/riscv64.zig @@ -124,46 +124,7 @@ pub fn clone() callconv(.naked) u64 { ); } -pub const blksize_t = i32; -pub const nlink_t = u32; pub const time_t = i64; -pub const mode_t = u32; -pub const off_t = i64; -pub const ino_t = u64; -pub const dev_t = u64; -pub const blkcnt_t = i64; - -// The `stat` definition used by the Linux kernel. -pub const Stat = extern struct { - dev: dev_t, - ino: ino_t, - mode: mode_t, - nlink: nlink_t, - uid: std.os.linux.uid_t, - gid: std.os.linux.gid_t, - rdev: dev_t, - __pad: u64, - size: off_t, - blksize: blksize_t, - __pad2: i32, - blocks: blkcnt_t, - atim: std.os.linux.timespec, - mtim: std.os.linux.timespec, - ctim: std.os.linux.timespec, - __unused: [2]u32, - - pub fn atime(self: @This()) std.os.linux.timespec { - return self.atim; - } - - pub fn mtime(self: @This()) std.os.linux.timespec { - return self.mtim; - } - - pub fn ctime(self: @This()) std.os.linux.timespec { - return self.ctim; - } -}; pub const VDSO = struct { pub const CGT_SYM = "__vdso_clock_gettime"; diff --git a/lib/std/os/linux/s390x.zig b/lib/std/os/linux/s390x.zig index 0a09982f2a..17a558e83d 100644 --- a/lib/std/os/linux/s390x.zig +++ b/lib/std/os/linux/s390x.zig @@ -152,44 +152,7 @@ pub fn restore_rt() callconv(.naked) noreturn { ); } -pub const blksize_t = i64; -pub const nlink_t = u64; pub const time_t = i64; -pub const mode_t = u32; -pub const off_t = i64; -pub const ino_t = u64; -pub const dev_t = u64; -pub const blkcnt_t = i64; - -// The `stat` definition used by the Linux kernel. -pub const Stat = extern struct { - dev: dev_t, - ino: ino_t, - nlink: nlink_t, - mode: mode_t, - uid: std.os.linux.uid_t, - gid: std.os.linux.gid_t, - rdev: dev_t, - size: off_t, - atim: std.os.linux.timespec, - mtim: std.os.linux.timespec, - ctim: std.os.linux.timespec, - blksize: blksize_t, - blocks: blkcnt_t, - __unused: [3]c_ulong, - - pub fn atime(self: @This()) std.os.linux.timespec { - return self.atim; - } - - pub fn mtime(self: @This()) std.os.linux.timespec { - return self.mtim; - } - - pub fn ctime(self: @This()) std.os.linux.timespec { - return self.ctim; - } -}; pub const VDSO = struct { pub const CGT_SYM = "__kernel_clock_gettime"; diff --git a/lib/std/os/linux/sparc64.zig b/lib/std/os/linux/sparc64.zig index 76506dbfa3..59880bba87 100644 --- a/lib/std/os/linux/sparc64.zig +++ b/lib/std/os/linux/sparc64.zig @@ -228,46 +228,4 @@ pub const VDSO = struct { pub const CGT_VER = "LINUX_2.6"; }; -pub const off_t = i64; -pub const ino_t = u64; pub const time_t = i64; -pub const mode_t = u32; -pub const dev_t = u64; -pub const nlink_t = u32; -pub const blksize_t = i64; -pub const blkcnt_t = i64; - -// The `stat64` definition used by the kernel. -pub const Stat = extern struct { - dev: dev_t, - ino: ino_t, - nlink: nlink_t, - _pad: i32, - - mode: mode_t, - uid: std.os.linux.uid_t, - gid: std.os.linux.gid_t, - __pad0: u32, - - rdev: dev_t, - size: i64, - blksize: blksize_t, - blocks: blkcnt_t, - - atim: std.os.linux.timespec, - mtim: std.os.linux.timespec, - ctim: std.os.linux.timespec, - __unused: [3]u64, - - pub fn atime(self: @This()) std.os.linux.timespec { - return self.atim; - } - - pub fn mtime(self: @This()) std.os.linux.timespec { - return self.mtim; - } - - pub fn ctime(self: @This()) std.os.linux.timespec { - return self.ctim; - } -}; diff --git a/lib/std/os/linux/test.zig b/lib/std/os/linux/test.zig index f5533a54bd..500c3f0bae 100644 --- a/lib/std/os/linux/test.zig +++ b/lib/std/os/linux/test.zig @@ -10,7 +10,7 @@ const expectEqual = std.testing.expectEqual; const fs = std.fs; test "fallocate" { - if (builtin.cpu.arch.isMIPS64() and (builtin.abi == .gnuabin32 or builtin.abi == .muslabin32)) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/23809 + if (builtin.cpu.arch.isMIPS64() and (builtin.abi == .gnuabin32 or builtin.abi == .muslabin32)) return error.SkipZigTest; // https://codeberg.org/ziglang/zig/issues/30220 var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); @@ -84,26 +84,22 @@ test "statx" { var file = try tmp.dir.createFile(tmp_file_name, .{}); defer file.close(); - var statx_buf: linux.Statx = undefined; - switch (linux.errno(linux.statx(file.handle, "", linux.AT.EMPTY_PATH, linux.STATX_BASIC_STATS, &statx_buf))) { + var buf: linux.Statx = undefined; + switch (linux.errno(linux.statx(file.handle, "", linux.AT.EMPTY_PATH, .BASIC_STATS, &buf))) { .SUCCESS => {}, else => unreachable, } - if (builtin.cpu.arch == .riscv32 or builtin.cpu.arch.isLoongArch()) return error.SkipZigTest; // No fstatat, so the rest of the test is meaningless. - - var stat_buf: linux.Stat = undefined; - switch (linux.errno(linux.fstatat(file.handle, "", &stat_buf, linux.AT.EMPTY_PATH))) { - .SUCCESS => {}, - else => unreachable, - } - - try expect(stat_buf.mode == statx_buf.mode); - try expect(@as(u32, @bitCast(stat_buf.uid)) == statx_buf.uid); - try expect(@as(u32, @bitCast(stat_buf.gid)) == statx_buf.gid); - try expect(@as(u64, @bitCast(@as(i64, stat_buf.size))) == statx_buf.size); - try expect(@as(u64, @bitCast(@as(i64, stat_buf.blksize))) == statx_buf.blksize); - try expect(@as(u64, @bitCast(@as(i64, stat_buf.blocks))) == statx_buf.blocks); + const uid = linux.getuid(); + const gid = linux.getgid(); + if (buf.mask.MODE) + try expectEqual(@as(linux.mode_t, linux.S.IFREG), buf.mode & linux.S.IFMT); + if (buf.mask.UID) + try expectEqual(uid, buf.uid); + if (buf.mask.GID) + try expectEqual(gid, buf.gid); + if (buf.mask.SIZE) + try expectEqual(@as(u64, 0), buf.size); } test "user and group ids" { @@ -138,23 +134,23 @@ test "sigset_t" { // See that none are set, then set each one, see that they're all set, then // remove them all, and then see that none are set. for (1..linux.NSIG) |i| { - const sig = std.meta.intToEnum(SIG, i) catch continue; + const sig = std.enums.fromInt(SIG, i) orelse continue; try expectEqual(false, linux.sigismember(&sigset, sig)); } for (1..linux.NSIG) |i| { - const sig = std.meta.intToEnum(SIG, i) catch continue; + const sig = std.enums.fromInt(SIG, i) orelse continue; linux.sigaddset(&sigset, sig); } for (1..linux.NSIG) |i| { - const sig = std.meta.intToEnum(SIG, i) catch continue; + const sig = std.enums.fromInt(SIG, i) orelse continue; try expectEqual(true, linux.sigismember(&sigset, sig)); } for (1..linux.NSIG) |i| { - const sig = std.meta.intToEnum(SIG, i) catch continue; + const sig = std.enums.fromInt(SIG, i) orelse continue; linux.sigdelset(&sigset, sig); } for (1..linux.NSIG) |i| { - const sig = std.meta.intToEnum(SIG, i) catch continue; + const sig = std.enums.fromInt(SIG, i) orelse continue; try expectEqual(false, linux.sigismember(&sigset, sig)); } } @@ -163,7 +159,7 @@ test "sigfillset" { // unlike the C library, all the signals are set in the kernel-level fillset const sigset = linux.sigfillset(); for (1..linux.NSIG) |i| { - const sig = std.meta.intToEnum(linux.SIG, i) catch continue; + const sig = std.enums.fromInt(linux.SIG, i) orelse continue; try expectEqual(true, linux.sigismember(&sigset, sig)); } } @@ -171,7 +167,7 @@ test "sigfillset" { test "sigemptyset" { const sigset = linux.sigemptyset(); for (1..linux.NSIG) |i| { - const sig = std.meta.intToEnum(linux.SIG, i) catch continue; + const sig = std.enums.fromInt(linux.SIG, i) orelse continue; try expectEqual(false, linux.sigismember(&sigset, sig)); } } diff --git a/lib/std/os/linux/x32.zig b/lib/std/os/linux/x32.zig index ac596844d6..97deb7640f 100644 --- a/lib/std/os/linux/x32.zig +++ b/lib/std/os/linux/x32.zig @@ -132,14 +132,7 @@ pub fn restore_rt() callconv(.naked) noreturn { } } -pub const mode_t = u32; pub const time_t = i32; -pub const nlink_t = u32; -pub const blksize_t = i32; -pub const blkcnt_t = i32; -pub const off_t = i64; -pub const ino_t = u64; -pub const dev_t = u64; pub const VDSO = struct { pub const CGT_SYM = "__vdso_clock_gettime"; @@ -155,36 +148,3 @@ pub const ARCH = struct { pub const GET_FS = 0x1003; pub const GET_GS = 0x1004; }; - -// The `stat` definition used by the Linux kernel. -pub const Stat = extern struct { - dev: dev_t, - ino: ino_t, - nlink: nlink_t, - - mode: mode_t, - uid: std.os.linux.uid_t, - gid: std.os.linux.gid_t, - __pad0: u32, - rdev: dev_t, - size: off_t, - blksize: blksize_t, - blocks: i64, - - atim: std.os.linux.timespec, - mtim: std.os.linux.timespec, - ctim: std.os.linux.timespec, - __unused: [3]i32, - - pub fn atime(self: @This()) std.os.linux.timespec { - return self.atim; - } - - pub fn mtime(self: @This()) std.os.linux.timespec { - return self.mtim; - } - - pub fn ctime(self: @This()) std.os.linux.timespec { - return self.ctim; - } -}; diff --git a/lib/std/os/linux/x86.zig b/lib/std/os/linux/x86.zig index a68a4af317..e95afe23a6 100644 --- a/lib/std/os/linux/x86.zig +++ b/lib/std/os/linux/x86.zig @@ -195,46 +195,7 @@ pub const VDSO = struct { pub const CGT_VER = "LINUX_2.6"; }; -pub const blksize_t = i32; -pub const nlink_t = u32; pub const time_t = i32; -pub const mode_t = u32; -pub const off_t = i64; -pub const ino_t = u64; -pub const dev_t = u64; -pub const blkcnt_t = i64; - -// The `stat` definition used by the Linux kernel. -pub const Stat = extern struct { - dev: dev_t, - __dev_padding: u32, - __ino_truncated: u32, - mode: mode_t, - nlink: nlink_t, - uid: std.os.linux.uid_t, - gid: std.os.linux.gid_t, - rdev: dev_t, - __rdev_padding: u32, - size: off_t, - blksize: blksize_t, - blocks: blkcnt_t, - atim: std.os.linux.timespec, - mtim: std.os.linux.timespec, - ctim: std.os.linux.timespec, - ino: ino_t, - - pub fn atime(self: @This()) std.os.linux.timespec { - return self.atim; - } - - pub fn mtime(self: @This()) std.os.linux.timespec { - return self.mtim; - } - - pub fn ctime(self: @This()) std.os.linux.timespec { - return self.ctim; - } -}; pub const user_desc = extern struct { entry_number: u32, diff --git a/lib/std/os/linux/x86_64.zig b/lib/std/os/linux/x86_64.zig index d9c2d17f09..cab71d03e0 100644 --- a/lib/std/os/linux/x86_64.zig +++ b/lib/std/os/linux/x86_64.zig @@ -132,14 +132,7 @@ pub fn restore_rt() callconv(.naked) noreturn { } } -pub const mode_t = u64; pub const time_t = i64; -pub const nlink_t = u64; -pub const blksize_t = i64; -pub const blkcnt_t = i64; -pub const off_t = i64; -pub const ino_t = u64; -pub const dev_t = u64; pub const VDSO = struct { pub const CGT_SYM = "__vdso_clock_gettime"; @@ -155,36 +148,3 @@ pub const ARCH = struct { pub const GET_FS = 0x1003; pub const GET_GS = 0x1004; }; - -// The `stat` definition used by the Linux kernel. -pub const Stat = extern struct { - dev: dev_t, - ino: ino_t, - nlink: u64, - - mode: u32, - uid: std.os.linux.uid_t, - gid: std.os.linux.gid_t, - __pad0: u32, - rdev: dev_t, - size: off_t, - blksize: i64, - blocks: i64, - - atim: std.os.linux.timespec, - mtim: std.os.linux.timespec, - ctim: std.os.linux.timespec, - __unused: [3]i64, - - pub fn atime(self: @This()) std.os.linux.timespec { - return self.atim; - } - - pub fn mtime(self: @This()) std.os.linux.timespec { - return self.mtim; - } - - pub fn ctime(self: @This()) std.os.linux.timespec { - return self.ctim; - } -}; diff --git a/lib/std/os/uefi/protocol/ip6_config.zig b/lib/std/os/uefi/protocol/ip6_config.zig index bb660a3b8e..0ada2a7899 100644 --- a/lib/std/os/uefi/protocol/ip6_config.zig +++ b/lib/std/os/uefi/protocol/ip6_config.zig @@ -44,7 +44,7 @@ pub const Ip6Config = extern struct { pub fn setData( self: *const Ip6Config, comptime data_type: std.meta.Tag(DataType), - payload: *const std.meta.TagPayload(DataType, data_type), + payload: *const @FieldType(DataType, @tagName(data_type)), ) SetDataError!void { const data_size = @sizeOf(@TypeOf(payload)); switch (self._set_data(self, data_type, data_size, @ptrCast(payload))) { @@ -64,8 +64,8 @@ pub const Ip6Config = extern struct { pub fn getData( self: *const Ip6Config, comptime data_type: std.meta.Tag(DataType), - ) GetDataError!std.meta.TagPayload(DataType, data_type) { - const DataPayload = std.meta.TagPayload(DataType, data_type); + ) GetDataError!@FieldType(DataType, @tagName(data_type)) { + const DataPayload = @FieldType(DataType, @tagName(data_type)); var payload: DataPayload = undefined; var payload_size: usize = @sizeOf(DataPayload); diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig index c91ce8b245..dd41879b3b 100644 --- a/lib/std/os/windows.zig +++ b/lib/std/os/windows.zig @@ -28,9 +28,2265 @@ pub const ws2_32 = @import("windows/ws2_32.zig"); pub const crypt32 = @import("windows/crypt32.zig"); pub const nls = @import("windows/nls.zig"); -pub const self_process_handle = @as(HANDLE, @ptrFromInt(maxInt(usize))); +pub const FILE = struct { + // ref: km/ntddk.h -const Self = @This(); + pub const END_OF_FILE_INFORMATION = extern struct { + EndOfFile: LARGE_INTEGER, + }; + + pub const ALIGNMENT_INFORMATION = extern struct { + AlignmentRequirement: ULONG, + }; + + pub const NAME_INFORMATION = extern struct { + FileNameLength: ULONG, + FileName: [1]WCHAR, + }; + + pub const DISPOSITION = packed struct(ULONG) { + DELETE: bool = false, + POSIX_SEMANTICS: bool = false, + FORCE_IMAGE_SECTION_CHECK: bool = false, + ON_CLOSE: bool = false, + IGNORE_READONLY_ATTRIBUTE: bool = false, + Reserved5: u27 = 0, + + pub const DO_NOT_DELETE: DISPOSITION = .{}; + + pub const INFORMATION = extern struct { + DeleteFile: BOOLEAN, + + pub const EX = extern struct { + Flags: DISPOSITION, + }; + }; + }; + + pub const FS_VOLUME_INFORMATION = extern struct { + VolumeCreationTime: LARGE_INTEGER, + VolumeSerialNumber: ULONG, + VolumeLabelLength: ULONG, + SupportsObjects: BOOLEAN, + VolumeLabel: [0]WCHAR, + + pub fn getVolumeLabel(fvi: *const FS_VOLUME_INFORMATION) []const WCHAR { + return (&fvi).ptr[0..@divExact(fvi.VolumeLabelLength, @sizeOf(WCHAR))]; + } + }; + + // ref: km/ntifs.h + + pub const PIPE = struct { + /// Define the `NamedPipeType` flags for `NtCreateNamedPipeFile` + pub const TYPE = packed struct(ULONG) { + TYPE: enum(u1) { + BYTE_STREAM = 0b0, + MESSAGE = 0b1, + } = .BYTE_STREAM, + REMOTE_CLIENTS: enum(u1) { + ACCEPT = 0b0, + REJECT = 0b1, + } = .ACCEPT, + Reserved2: u30 = 0, + + pub const VALID_MASK: TYPE = .{ + .TYPE = .MESSAGE, + .REMOTE_CLIENTS = .REJECT, + }; + }; + + /// Define the `CompletionMode` flags for `NtCreateNamedPipeFile` + pub const COMPLETION_MODE = packed struct(ULONG) { + OPERATION: enum(u1) { + QUEUE = 0b0, + COMPLETE = 0b1, + } = .QUEUE, + Reserved1: u31 = 0, + }; + + /// Define the `ReadMode` flags for `NtCreateNamedPipeFile` + pub const READ_MODE = packed struct(ULONG) { + MODE: enum(u1) { + BYTE_STREAM = 0b0, + MESSAGE = 0b1, + }, + Reserved1: u31 = 0, + }; + + /// Define the `NamedPipeConfiguration` flags for `NtQueryInformationFile` + pub const CONFIGURATION = enum(ULONG) { + INBOUND = 0x00000000, + OUTBOUND = 0x00000001, + FULL_DUPLEX = 0x00000002, + }; + + /// Define the `NamedPipeState` flags for `NtQueryInformationFile` + pub const STATE = enum(ULONG) { + DISCONNECTED = 0x00000001, + LISTENING = 0x00000002, + CONNECTED = 0x00000003, + CLOSING = 0x00000004, + }; + + /// Define the `NamedPipeEnd` flags for `NtQueryInformationFile` + pub const END = enum(ULONG) { + CLIENT = 0x00000000, + SERVER = 0x00000001, + }; + + pub const INFORMATION = extern struct { + ReadMode: READ_MODE, + CompletionMode: COMPLETION_MODE, + }; + + pub const LOCAL_INFORMATION = extern struct { + NamedPipeType: TYPE, + NamedPipeConfiguration: CONFIGURATION, + MaximumInstances: ULONG, + CurrentInstances: ULONG, + InboundQuota: ULONG, + ReadDataAvailable: ULONG, + OutboundQuota: ULONG, + WriteQuotaAvailable: ULONG, + NamedPipeState: STATE, + NamedPipeEnd: END, + }; + + pub const REMOTE_INFORMATION = extern struct { + CollectDataTime: LARGE_INTEGER, + MaximumCollectionCount: ULONG, + }; + + pub const WAIT_FOR_BUFFER = extern struct { + Timeout: LARGE_INTEGER, + NameLength: ULONG, + TimeoutSpecified: BOOLEAN, + Name: [PATH_MAX_WIDE]WCHAR, + + pub const WAIT_FOREVER: LARGE_INTEGER = std.math.minInt(LARGE_INTEGER); + + pub fn init(opts: struct { + Timeout: ?LARGE_INTEGER = null, + Name: []const WCHAR, + }) WAIT_FOR_BUFFER { + var fpwfb: WAIT_FOR_BUFFER = .{ + .Timeout = opts.Timeout orelse undefined, + .NameLength = @intCast(@sizeOf(WCHAR) * opts.Name.len), + .TimeoutSpecified = @intFromBool(opts.Timeout != null), + .Name = undefined, + }; + @memcpy(fpwfb.Name[0..opts.Name.len], opts.Name); + return fpwfb; + } + + pub fn getName(fpwfb: *const WAIT_FOR_BUFFER) []const WCHAR { + return fpwfb.Name[0..@divExact(fpwfb.NameLength, @sizeOf(WCHAR))]; + } + + pub fn toBuffer(fpwfb: *const WAIT_FOR_BUFFER) []const u8 { + const start: [*]const u8 = @ptrCast(fpwfb); + return start[0 .. @offsetOf(WAIT_FOR_BUFFER, "Name") + fpwfb.NameLength]; + } + }; + }; + + pub const ALL_INFORMATION = extern struct { + BasicInformation: BASIC_INFORMATION, + StandardInformation: STANDARD_INFORMATION, + InternalInformation: INTERNAL_INFORMATION, + EaInformation: EA_INFORMATION, + AccessInformation: ACCESS_INFORMATION, + PositionInformation: POSITION_INFORMATION, + ModeInformation: MODE.INFORMATION, + AlignmentInformation: ALIGNMENT_INFORMATION, + NameInformation: NAME_INFORMATION, + }; + + pub const INTERNAL_INFORMATION = extern struct { + IndexNumber: LARGE_INTEGER, + }; + + pub const EA_INFORMATION = extern struct { + EaSize: ULONG, + }; + + pub const ACCESS_INFORMATION = extern struct { + AccessFlags: ACCESS_MASK, + }; + + pub const RENAME_INFORMATION = extern struct { + Flags: FLAGS, + RootDirectory: ?HANDLE, + FileNameLength: ULONG, + FileName: [PATH_MAX_WIDE]WCHAR, + + pub fn init(opts: struct { + Flags: FLAGS = .{}, + RootDirectory: ?HANDLE = null, + FileName: []const WCHAR, + }) RENAME_INFORMATION { + var fri: RENAME_INFORMATION = .{ + .Flags = opts.Flags, + .RootDirectory = opts.RootDirectory, + .FileNameLength = @intCast(@sizeOf(WCHAR) * opts.FileName.len), + .FileName = undefined, + }; + @memcpy(fri.FileName[0..opts.FileName.len], opts.FileName); + return fri; + } + + pub const FLAGS = packed struct(ULONG) { + REPLACE_IF_EXISTS: bool = false, + POSIX_SEMANTICS: bool = false, + SUPPRESS_PIN_STATE_INHERITANCE: bool = false, + SUPPRESS_STORAGE_RESERVE_INHERITANCE: bool = false, + AVAILABLE_SPACE: enum(u2) { + NO_PRESERVE = 0b00, + NO_INCREASE = 0b01, + NO_DECREASE = 0b10, + PRESERVE = 0b11, + } = .NO_PRESERVE, + IGNORE_READONLY_ATTRIBUTE: bool = false, + RESIZE_SR: enum(u2) { + NO_FORCE = 0b00, + FORCE_TARGET = 0b01, + FORCE_SOURCE = 0b10, + FORCE = 0b11, + } = .NO_FORCE, + Reserved9: u23 = 0, + }; + + pub fn getFileName(ri: *const RENAME_INFORMATION) []const WCHAR { + return ri.FileName[0..@divExact(ri.FileNameLength, @sizeOf(WCHAR))]; + } + + pub fn toBuffer(fri: *const RENAME_INFORMATION) []const u8 { + const start: [*]const u8 = @ptrCast(fri); + return start[0 .. @offsetOf(RENAME_INFORMATION, "FileName") + fri.FileNameLength]; + } + }; + + // ref: km/wdm.h + + pub const INFORMATION_CLASS = enum(c_int) { + Directory = 1, + FullDirectory = 2, + BothDirectory = 3, + Basic = 4, + Standard = 5, + Internal = 6, + Ea = 7, + Access = 8, + Name = 9, + Rename = 10, + Link = 11, + Names = 12, + Disposition = 13, + Position = 14, + FullEa = 15, + Mode = 16, + Alignment = 17, + All = 18, + Allocation = 19, + EndOfFile = 20, + AlternateName = 21, + Stream = 22, + Pipe = 23, + PipeLocal = 24, + PipeRemote = 25, + MailslotQuery = 26, + MailslotSet = 27, + Compression = 28, + ObjectId = 29, + Completion = 30, + MoveCluster = 31, + Quota = 32, + ReparsePoint = 33, + NetworkOpen = 34, + AttributeTag = 35, + Tracking = 36, + IdBothDirectory = 37, + IdFullDirectory = 38, + ValidDataLength = 39, + ShortName = 40, + IoCompletionNotification = 41, + IoStatusBlockRange = 42, + IoPriorityHint = 43, + SfioReserve = 44, + SfioVolume = 45, + HardLink = 46, + ProcessIdsUsingFile = 47, + NormalizedName = 48, + NetworkPhysicalName = 49, + IdGlobalTxDirectory = 50, + IsRemoteDevice = 51, + Unused = 52, + NumaNode = 53, + StandardLink = 54, + RemoteProtocol = 55, + RenameBypassAccessCheck = 56, + LinkBypassAccessCheck = 57, + VolumeName = 58, + Id = 59, + IdExtdDirectory = 60, + ReplaceCompletion = 61, + HardLinkFullId = 62, + IdExtdBothDirectory = 63, + DispositionEx = 64, + RenameEx = 65, + RenameExBypassAccessCheck = 66, + DesiredStorageClass = 67, + Stat = 68, + MemoryPartition = 69, + StatLx = 70, + CaseSensitive = 71, + LinkEx = 72, + LinkExBypassAccessCheck = 73, + StorageReserveId = 74, + CaseSensitiveForceAccessCheck = 75, + KnownFolder = 76, + StatBasic = 77, + Id64ExtdDirectory = 78, + Id64ExtdBothDirectory = 79, + IdAllExtdDirectory = 80, + IdAllExtdBothDirectory = 81, + StreamReservation = 82, + MupProvider = 83, + + pub const Maximum: @typeInfo(@This()).@"enum".tag_type = 1 + @typeInfo(@This()).@"enum".fields.len; + }; + + pub const BASIC_INFORMATION = extern struct { + CreationTime: LARGE_INTEGER, + LastAccessTime: LARGE_INTEGER, + LastWriteTime: LARGE_INTEGER, + ChangeTime: LARGE_INTEGER, + FileAttributes: ATTRIBUTE, + }; + + pub const STANDARD_INFORMATION = extern struct { + AllocationSize: LARGE_INTEGER, + EndOfFile: LARGE_INTEGER, + NumberOfLinks: ULONG, + DeletePending: BOOLEAN, + Directory: BOOLEAN, + }; + + pub const POSITION_INFORMATION = extern struct { + CurrentByteOffset: LARGE_INTEGER, + }; + + pub const FS_DEVICE_INFORMATION = extern struct { + DeviceType: DEVICE_TYPE, + Characteristics: ULONG, + }; + + // ref: um/WinBase.h + + pub const ATTRIBUTE_TAG_INFO = extern struct { + FileAttributes: DWORD, + ReparseTag: IO_REPARSE_TAG, + }; + + // ref: um/winnt.h + + pub const SHARE = packed struct(ULONG) { + /// The file can be opened for read access by other threads. + READ: bool = false, + /// The file can be opened for write access by other threads. + WRITE: bool = false, + /// The file can be opened for delete access by other threads. + DELETE: bool = false, + Reserved3: u29 = 0, + + pub const VALID_FLAGS: SHARE = .{ + .READ = true, + .WRITE = true, + .DELETE = true, + }; + }; + + pub const ATTRIBUTE = packed struct(ULONG) { + /// The file is read only. Applications can read the file, but cannot write to or delete it. + READONLY: bool = false, + /// The file is hidden. Do not include it in an ordinary directory listing. + HIDDEN: bool = false, + /// The file is part of or used exclusively by an operating system. + SYSTEM: bool = false, + Reserved3: u1 = 0, + DIRECTORY: bool = false, + /// The file should be archived. Applications use this attribute to mark files for backup or removal. + ARCHIVE: bool = false, + DEVICE: bool = false, + /// The file does not have other attributes set. This attribute is valid only if used alone. + NORMAL: bool = false, + /// The file is being used for temporary storage. + TEMPORARY: bool = false, + SPARSE_FILE: bool = false, + REPARSE_POINT: bool = false, + COMPRESSED: bool = false, + /// The data of a file is not immediately available. This attribute indicates that file data is physically moved to offline storage. + /// This attribute is used by Remote Storage, the hierarchical storage management software. Applications should not arbitrarily change this attribute. + OFFLINE: bool = false, + NOT_CONTENT_INDEXED: bool = false, + /// The file or directory is encrypted. For a file, this means that all data in the file is encrypted. For a directory, this means that encryption is + /// the default for newly created files and subdirectories. For more information, see File Encryption. + /// + /// This flag has no effect if `SYSTEM` is also specified. + /// + /// This flag is not supported on Home, Home Premium, Starter, or ARM editions of Windows. + ENCRYPTED: bool = false, + INTEGRITY_STREAM: bool = false, + VIRTUAL: bool = false, + NO_SCRUB_DATA: bool = false, + EA_or_RECALL_ON_OPEN: bool = false, + PINNED: bool = false, + UNPINNED: bool = false, + Reserved21: u1 = 0, + RECALL_ON_DATA_ACCESS: bool = false, + Reserved23: u6 = 0, + STRICTLY_SEQUENTIAL: bool = false, + Reserved30: u2 = 0, + }; + + // ref: um/winternl.h + + /// Define the create disposition values + pub const CREATE_DISPOSITION = enum(ULONG) { + /// If the file already exists, replace it with the given file. If it does not, create the given file. + SUPERSEDE = 0x00000000, + /// If the file already exists, open it instead of creating a new file. If it does not, fail the request and do not create a new file. + OPEN = 0x00000001, + /// If the file already exists, fail the request and do not create or open the given file. If it does not, create the given file. + CREATE = 0x00000002, + /// If the file already exists, open it. If it does not, create the given file. + OPEN_IF = 0x00000003, + /// If the file already exists, open it and overwrite it. If it does not, fail the request. + OVERWRITE = 0x00000004, + /// If the file already exists, open it and overwrite it. If it does not, create the given file. + OVERWRITE_IF = 0x00000005, + + pub const MAXIMUM_DISPOSITION: CREATE_DISPOSITION = .OVERWRITE_IF; + }; + + /// Define the create/open option flags + pub const MODE = packed struct(ULONG) { + /// The file being created or opened is a directory file. With this flag, the CreateDisposition parameter must be set to `.CREATE`, `.FILE_OPEN`, or `.OPEN_IF`. + /// With this flag, other compatible CreateOptions flags include only the following: `SYNCHRONOUS_IO`, `WRITE_THROUGH`, `OPEN_FOR_BACKUP_INTENT`, and `OPEN_BY_FILE_ID`. + DIRECTORY_FILE: bool = false, + /// Applications that write data to the file must actually transfer the data into the file before any requested write operation is considered complete. + /// This flag is automatically set if the CreateOptions flag `NO_INTERMEDIATE_BUFFERING` is set. + WRITE_THROUGH: bool = false, + /// All accesses to the file are sequential. + SEQUENTIAL_ONLY: bool = false, + /// The file cannot be cached or buffered in a driver's internal buffers. This flag is incompatible with the DesiredAccess `FILE_APPEND_DATA` flag. + NO_INTERMEDIATE_BUFFERING: bool = false, + IO: enum(u2) { + /// All operations on the file are performed asynchronously. + ASYNCHRONOUS = 0b00, + /// All operations on the file are performed synchronously. Any wait on behalf of the caller is subject to premature termination from alerts. + /// This flag also causes the I/O system to maintain the file position context. If this flag is set, the DesiredAccess `SYNCHRONIZE` flag also must be set. + SYNCHRONOUS_ALERT = 0b01, + /// All operations on the file are performed synchronously. Waits in the system to synchronize I/O queuing and completion are not subject to alerts. + /// This flag also causes the I/O system to maintain the file position context. If this flag is set, the DesiredAccess `SYNCHRONIZE` flag also must be set. + SYNCHRONOUS_NONALERT = 0b10, + _, + + pub const VALID_FLAGS: @This() = @enumFromInt(0b11); + } = .ASYNCHRONOUS, + /// The file being opened must not be a directory file or this call fails. The file object being opened can represent a data file, a logical, virtual, or physical + /// device, or a volume. + NON_DIRECTORY_FILE: bool = false, + /// Create a tree connection for this file in order to open it over the network. This flag is not used by device and intermediate drivers. + CREATE_TREE_CONNECTION: bool = false, + /// Complete this operation immediately with an alternate success code of `STATUS_OPLOCK_BREAK_IN_PROGRESS` if the target file is oplocked, rather than blocking + /// the caller's thread. If the file is oplocked, another caller already has access to the file. This flag is not used by device and intermediate drivers. + COMPLETE_IF_OPLOCKED: bool = false, + /// If the extended attributes on an existing file being opened indicate that the caller must understand EAs to properly interpret the file, fail this request + /// because the caller does not understand how to deal with EAs. This flag is irrelevant for device and intermediate drivers. + NO_EA_KNOWLEDGE: bool = false, + OPEN_REMOTE_INSTANCE: bool = false, + /// Accesses to the file can be random, so no sequential read-ahead operations should be performed on the file by FSDs or the system. + RANDOM_ACCESS: bool = false, + /// Delete the file when the last handle to it is passed to `NtClose`. If this flag is set, the `DELETE` flag must be set in the DesiredAccess parameter. + DELETE_ON_CLOSE: bool = false, + /// The file name that is specified by the `ObjectAttributes` parameter includes the 8-byte file reference number for the file. This number is assigned by and + /// specific to the particular file system. If the file is a reparse point, the file name will also include the name of a device. Note that the FAT file system + /// does not support this flag. This flag is not used by device and intermediate drivers. + OPEN_BY_FILE_ID: bool = false, + /// The file is being opened for backup intent. Therefore, the system should check for certain access rights and grant the caller the appropriate access to the + /// file before checking the DesiredAccess parameter against the file's security descriptor. This flag not used by device and intermediate drivers. + OPEN_FOR_BACKUP_INTENT: bool = false, + /// Suppress inheritance of `FILE_ATTRIBUTE.COMPRESSED` from the parent directory. This allows creation of a non-compressed file in a directory that is marked + /// compressed. + NO_COMPRESSION: bool = false, + /// The file is being opened and an opportunistic lock on the file is being requested as a single atomic operation. The file system checks for oplocks before it + /// performs the create operation and will fail the create with a return code of STATUS_CANNOT_BREAK_OPLOCK if the result would be to break an existing oplock. + /// For more information, see the Remarks section. + /// + /// Windows Server 2008, Windows Vista, Windows Server 2003 and Windows XP: This flag is not supported. + /// + /// This flag is supported on the following file systems: NTFS, FAT, and exFAT. + OPEN_REQUIRING_OPLOCK: bool = false, + Reserved17: u3 = 0, + /// This flag allows an application to request a filter opportunistic lock to prevent other applications from getting share violations. If there are already open + /// handles, the create request will fail with STATUS_OPLOCK_NOT_GRANTED. For more information, see the Remarks section. + RESERVE_OPFILTER: bool = false, + /// Open a file with a reparse point and bypass normal reparse point processing for the file. For more information, see the Remarks section. + OPEN_REPARSE_POINT: bool = false, + /// Instructs any filters that perform offline storage or virtualization to not recall the contents of the file as a result of this open. + OPEN_NO_RECALL: bool = false, + /// This flag instructs the file system to capture the user associated with the calling thread. Any subsequent calls to `FltQueryVolumeInformation` or + /// `ZwQueryVolumeInformationFile` using the returned handle will assume the captured user, rather than the calling user at the time, for purposes of computing + /// the free space available to the caller. This applies to the following FsInformationClass values: `FileFsSizeInformation`, `FileFsFullSizeInformation`, and + /// `FileFsFullSizeInformationEx`. + OPEN_FOR_FREE_SPACE_QUERY: bool = false, + Reserved24: u8 = 0, + + pub const VALID_OPTION_FLAGS: MODE = .{ + .DIRECTORY_FILE = true, + .WRITE_THROUGH = true, + .SEQUENTIAL_ONLY = true, + .NO_INTERMEDIATE_BUFFERING = true, + .IO = .VALID_FLAGS, + .NON_DIRECTORY_FILE = true, + .CREATE_TREE_CONNECTION = true, + .COMPLETE_IF_OPLOCKED = true, + .NO_EA_KNOWLEDGE = true, + .OPEN_REMOTE_INSTANCE = true, + .RANDOM_ACCESS = true, + .DELETE_ON_CLOSE = true, + .OPEN_BY_FILE_ID = true, + .OPEN_FOR_BACKUP_INTENT = true, + .NO_COMPRESSION = true, + .OPEN_REQUIRING_OPLOCK = true, + .Reserved17 = 0b111, + .RESERVE_OPFILTER = true, + .OPEN_REPARSE_POINT = true, + .OPEN_NO_RECALL = true, + .OPEN_FOR_FREE_SPACE_QUERY = true, + }; + + pub const VALID_PIPE_OPTION_FLAGS: MODE = .{ + .WRITE_THROUGH = true, + .IO = .VALID_FLAGS, + }; + + pub const VALID_MAILSLOT_OPTION_FLAGS: MODE = .{ + .WRITE_THROUGH = true, + .IO = .VALID_FLAGS, + }; + + pub const VALID_SET_OPTION_FLAGS: MODE = .{ + .WRITE_THROUGH = true, + .SEQUENTIAL_ONLY = true, + .IO = .VALID_FLAGS, + }; + + // ref: km/ntifs.h + + pub const INFORMATION = extern struct { + /// The set of flags that specify the mode in which the file can be accessed. These flags are a subset of `MODE`. + Mode: MODE, + }; + }; +}; + +// ref: km/ntddk.h + +pub const PROCESSINFOCLASS = enum(c_int) { + BasicInformation = 0, + QuotaLimits = 1, + IoCounters = 2, + VmCounters = 3, + Times = 4, + BasePriority = 5, + RaisePriority = 6, + DebugPort = 7, + ExceptionPort = 8, + AccessToken = 9, + LdtInformation = 10, + LdtSize = 11, + DefaultHardErrorMode = 12, + IoPortHandlers = 13, + PooledUsageAndLimits = 14, + WorkingSetWatch = 15, + UserModeIOPL = 16, + EnableAlignmentFaultFixup = 17, + PriorityClass = 18, + Wx86Information = 19, + HandleCount = 20, + AffinityMask = 21, + PriorityBoost = 22, + DeviceMap = 23, + SessionInformation = 24, + ForegroundInformation = 25, + Wow64Information = 26, + ImageFileName = 27, + LUIDDeviceMapsEnabled = 28, + BreakOnTermination = 29, + DebugObjectHandle = 30, + DebugFlags = 31, + HandleTracing = 32, + IoPriority = 33, + ExecuteFlags = 34, + TlsInformation = 35, + Cookie = 36, + ImageInformation = 37, + CycleTime = 38, + PagePriority = 39, + InstrumentationCallback = 40, + ThreadStackAllocation = 41, + WorkingSetWatchEx = 42, + ImageFileNameWin32 = 43, + ImageFileMapping = 44, + AffinityUpdateMode = 45, + MemoryAllocationMode = 46, + GroupInformation = 47, + TokenVirtualizationEnabled = 48, + OwnerInformation = 49, + WindowInformation = 50, + HandleInformation = 51, + MitigationPolicy = 52, + DynamicFunctionTableInformation = 53, + HandleCheckingMode = 54, + KeepAliveCount = 55, + RevokeFileHandles = 56, + WorkingSetControl = 57, + HandleTable = 58, + CheckStackExtentsMode = 59, + CommandLineInformation = 60, + ProtectionInformation = 61, + MemoryExhaustion = 62, + FaultInformation = 63, + TelemetryIdInformation = 64, + CommitReleaseInformation = 65, + Reserved1Information = 66, + Reserved2Information = 67, + SubsystemProcess = 68, + InPrivate = 70, + RaiseUMExceptionOnInvalidHandleClose = 71, + SubsystemInformation = 75, + Win32kSyscallFilterInformation = 79, + EnergyTrackingState = 82, + NetworkIoCounters = 114, + _, + + pub const Max: @typeInfo(@This()).@"enum".tag_type = 117; +}; + +pub const THREADINFOCLASS = enum(c_int) { + BasicInformation = 0, + Times = 1, + Priority = 2, + BasePriority = 3, + AffinityMask = 4, + ImpersonationToken = 5, + DescriptorTableEntry = 6, + EnableAlignmentFaultFixup = 7, + EventPair_Reusable = 8, + QuerySetWin32StartAddress = 9, + ZeroTlsCell = 10, + PerformanceCount = 11, + AmILastThread = 12, + IdealProcessor = 13, + PriorityBoost = 14, + SetTlsArrayAddress = 15, + IsIoPending = 16, + // Windows 2000+ from here + HideFromDebugger = 17, + // Windows XP+ from here + BreakOnTermination = 18, + SwitchLegacyState = 19, + IsTerminated = 20, + // Windows Vista+ from here + LastSystemCall = 21, + IoPriority = 22, + CycleTime = 23, + PagePriority = 24, + ActualBasePriority = 25, + TebInformation = 26, + CSwitchMon = 27, + // Windows 7+ from here + CSwitchPmu = 28, + Wow64Context = 29, + GroupInformation = 30, + UmsInformation = 31, + CounterProfiling = 32, + IdealProcessorEx = 33, + // Windows 8+ from here + CpuAccountingInformation = 34, + // Windows 8.1+ from here + SuspendCount = 35, + // Windows 10+ from here + HeterogeneousCpuPolicy = 36, + ContainerId = 37, + NameInformation = 38, + SelectedCpuSets = 39, + SystemThreadInformation = 40, + ActualGroupAffinity = 41, + DynamicCodePolicyInfo = 42, + SubsystemInformation = 45, + + pub const Max: @typeInfo(@This()).@"enum".tag_type = 60; +}; + +// ref: km/ntifs.h + +pub const HEAP = opaque { + pub const FLAGS = packed struct(u8) { + /// Serialized access is not used when the heap functions access this heap. This option + /// applies to all subsequent heap function calls. Alternatively, you can specify this + /// option on individual heap function calls. + /// + /// The low-fragmentation heap (LFH) cannot be enabled for a heap created with this option. + /// + /// A heap created with this option cannot be locked. + NO_SERIALIZE: bool = false, + /// Specifies that the heap is growable. Must be specified if `HeapBase` is `NULL`. + GROWABLE: bool = false, + /// The system raises an exception to indicate failure (for example, an out-of-memory + /// condition) for calls to `HeapAlloc` and `HeapReAlloc` instead of returning `NULL`. + /// + /// To ensure that exceptions are generated for all calls to an allocation function, specify + /// `GENERATE_EXCEPTIONS` in the call to `HeapCreate`. In this case, it is not necessary to + /// additionally specify `GENERATE_EXCEPTIONS` in the allocation function calls. + GENERATE_EXCEPTIONS: bool = false, + /// The allocated memory will be initialized to zero. Otherwise, the memory is not + /// initialized to zero. + ZERO_MEMORY: bool = false, + REALLOC_IN_PLACE_ONLY: bool = false, + TAIL_CHECKING_ENABLED: bool = false, + FREE_CHECKING_ENABLED: bool = false, + DISABLE_COALESCE_ON_FREE: bool = false, + + pub const CLASS = enum(u4) { + /// process heap + PROCESS, + /// private heap + PRIVATE, + /// Kernel Heap + KERNEL, + /// GDI heap + GDI, + /// User heap + USER, + /// Console heap + CONSOLE, + /// User Desktop heap + USER_DESKTOP, + /// Csrss Shared heap + CSRSS_SHARED, + /// Csr Port heap + CSR_PORT, + _, + + pub const MASK: CLASS = @enumFromInt(maxInt(@typeInfo(CLASS).@"enum".tag_type)); + }; + + pub const CREATE = packed struct(ULONG) { + COMMON: FLAGS = .{}, + SEGMENT_HEAP: bool = false, + /// Only applies to segment heap. Applies pointer obfuscation which is + /// generally excessive and unnecessary but is necessary for certain insecure + /// heaps in win32k. + /// + /// Specifying HEAP_CREATE_HARDENED prevents the heap from using locks as + /// pointers would potentially be exposed in heap metadata lock variables. + /// Callers are therefore responsible for synchronizing access to hardened heaps. + HARDENED: bool = false, + Reserved10: u2 = 0, + CLASS: CLASS = @enumFromInt(0), + /// Create heap with 16 byte alignment (obsolete) + ALIGN_16: bool = false, + /// Create heap call tracing enabled (obsolete) + ENABLE_TRACING: bool = false, + /// Create heap with executable pages + /// + /// All memory blocks that are allocated from this heap allow code execution, if the + /// hardware enforces data execution prevention. Use this flag heap in applications that + /// run code from the heap. If `ENABLE_EXECUTE` is not specified and an application + /// attempts to run code from a protected page, the application receives an exception + /// with the status code `STATUS_ACCESS_VIOLATION`. + ENABLE_EXECUTE: bool = false, + Reserved19: u13 = 0, + + pub const VALID_MASK: CREATE = .{ + .COMMON = .{ + .NO_SERIALIZE = true, + .GROWABLE = true, + .GENERATE_EXCEPTIONS = true, + .ZERO_MEMORY = true, + .REALLOC_IN_PLACE_ONLY = true, + .TAIL_CHECKING_ENABLED = true, + .FREE_CHECKING_ENABLED = true, + .DISABLE_COALESCE_ON_FREE = true, + }, + .CLASS = .MASK, + .ALIGN_16 = true, + .ENABLE_TRACING = true, + .ENABLE_EXECUTE = true, + .SEGMENT_HEAP = true, + .HARDENED = true, + }; + }; + + pub const ALLOCATION = packed struct(ULONG) { + COMMON: FLAGS = .{}, + SETTABLE_USER: packed struct(u4) { + VALUE: u1 = 0, + FLAGS: packed struct(u3) { + FLAG1: bool = false, + FLAG2: bool = false, + FLAG3: bool = false, + } = .{}, + } = .{}, + CLASS: CLASS = @enumFromInt(0), + Reserved16: u2 = 0, + TAG: u12 = 0, + Reserved30: u2 = 0, + }; + }; + + pub const RTL_PARAMETERS = extern struct { + Length: ULONG, + SegmentReserve: SIZE_T, + SegmentCommit: SIZE_T, + DeCommitFreeBlockThreshold: SIZE_T, + DeCommitTotalFreeThreshold: SIZE_T, + MaximumAllocationSize: SIZE_T, + VirtualMemoryThreshold: SIZE_T, + InitialCommit: SIZE_T, + InitialReserve: SIZE_T, + CommitRoutine: *const COMMIT_ROUTINE, + Reserved: [2]SIZE_T = @splat(0), + + pub const COMMIT_ROUTINE = fn ( + Base: PVOID, + CommitAddress: *PVOID, + CommitSize: *SIZE_T, + ) callconv(.winapi) NTSTATUS; + + pub const SEGMENT = extern struct { + Version: VERSION, + Size: USHORT, + Flags: FLG, + MemorySource: MEMORY_SOURCE, + Reserved: [4]SIZE_T, + + pub const VERSION = enum(USHORT) { + CURRENT = 3, + _, + }; + + pub const FLG = packed struct(ULONG) { + USE_PAGE_HEAP: bool = false, + NO_LFH: bool = false, + Reserved2: u30 = 0, + + pub const VALID_FLAGS: FLG = .{ + .USE_PAGE_HEAP = true, + .NO_LFH = true, + }; + }; + + pub const MEMORY_SOURCE = extern struct { + Flags: ULONG, + MemoryTypeMask: TYPE, + NumaNode: ULONG, + u: extern union { + PartitionHandle: HANDLE, + Callbacks: *const VA_CALLBACKS, + }, + Reserved: [2]SIZE_T = @splat(0), + + pub const TYPE = enum(ULONG) { + Paged, + NonPaged, + @"64KPage", + LargePage, + HugePage, + Custom, + _, + + pub const Max: @typeInfo(@This()).@"enum".tag_type = @typeInfo(@This()).@"enum".fields.len; + }; + + pub const VA_CALLBACKS = extern struct { + CallbackContext: HANDLE, + AllocateVirtualMemory: *const ALLOCATE_VIRTUAL_MEMORY_EX_CALLBACK, + FreeVirtualMemory: *const FREE_VIRTUAL_MEMORY_EX_CALLBACK, + QueryVirtualMemory: *const QUERY_VIRTUAL_MEMORY_CALLBACK, + + pub const ALLOCATE_VIRTUAL_MEMORY_EX_CALLBACK = fn ( + CallbackContext: HANDLE, + BaseAddress: *PVOID, + RegionSize: *SIZE_T, + AllocationType: ULONG, + PageProtection: ULONG, + ExtendedParameters: ?[*]MEM.EXTENDED_PARAMETER, + ExtendedParameterCount: ULONG, + ) callconv(.c) NTSTATUS; + + pub const FREE_VIRTUAL_MEMORY_EX_CALLBACK = fn ( + CallbackContext: HANDLE, + ProcessHandle: HANDLE, + BaseAddress: *PVOID, + RegionSize: *SIZE_T, + FreeType: ULONG, + ) callconv(.c) NTSTATUS; + + pub const QUERY_VIRTUAL_MEMORY_CALLBACK = fn ( + CallbackContext: HANDLE, + ProcessHandle: HANDLE, + BaseAddress: *PVOID, + MemoryInformationClass: MEMORY_INFO_CLASS, + MemoryInformation: PVOID, + MemoryInformationLength: SIZE_T, + ReturnLength: ?*SIZE_T, + ) callconv(.c) NTSTATUS; + + pub const MEMORY_INFO_CLASS = enum(c_int) { + Basic, + _, + }; + }; + }; + }; + }; +}; + +pub const CTL_CODE = packed struct(ULONG) { + Method: METHOD, + Function: u12, + Access: FILE_ACCESS, + DeviceType: FILE_DEVICE, + + pub const METHOD = enum(u2) { + BUFFERED = 0, + IN_DIRECT = 1, + OUT_DIRECT = 2, + NEITHER = 3, + }; + + pub const FILE_ACCESS = packed struct(u2) { + READ: bool = false, + WRITE: bool = false, + + pub const ANY: FILE_ACCESS = .{ .READ = false, .WRITE = false }; + pub const SPECIAL = ANY; + }; + + pub const FILE_DEVICE = enum(u16) { + BEEP = 0x00000001, + CD_ROM = 0x00000002, + CD_ROM_FILE_SYSTEM = 0x00000003, + CONTROLLER = 0x00000004, + DATALINK = 0x00000005, + DFS = 0x00000006, + DISK = 0x00000007, + DISK_FILE_SYSTEM = 0x00000008, + FILE_SYSTEM = 0x00000009, + INPORT_PORT = 0x0000000a, + KEYBOARD = 0x0000000b, + MAILSLOT = 0x0000000c, + MIDI_IN = 0x0000000d, + MIDI_OUT = 0x0000000e, + MOUSE = 0x0000000f, + MULTI_UNC_PROVIDER = 0x00000010, + NAMED_PIPE = 0x00000011, + NETWORK = 0x00000012, + NETWORK_BROWSER = 0x00000013, + NETWORK_FILE_SYSTEM = 0x00000014, + NULL = 0x00000015, + PARALLEL_PORT = 0x00000016, + PHYSICAL_NETCARD = 0x00000017, + PRINTER = 0x00000018, + SCANNER = 0x00000019, + SERIAL_MOUSE_PORT = 0x0000001a, + SERIAL_PORT = 0x0000001b, + SCREEN = 0x0000001c, + SOUND = 0x0000001d, + STREAMS = 0x0000001e, + TAPE = 0x0000001f, + TAPE_FILE_SYSTEM = 0x00000020, + TRANSPORT = 0x00000021, + UNKNOWN = 0x00000022, + VIDEO = 0x00000023, + VIRTUAL_DISK = 0x00000024, + WAVE_IN = 0x00000025, + WAVE_OUT = 0x00000026, + @"8042_PORT" = 0x00000027, + NETWORK_REDIRECTOR = 0x00000028, + BATTERY = 0x00000029, + BUS_EXTENDER = 0x0000002a, + MODEM = 0x0000002b, + VDM = 0x0000002c, + MASS_STORAGE = 0x0000002d, + SMB = 0x0000002e, + KS = 0x0000002f, + CHANGER = 0x00000030, + SMARTCARD = 0x00000031, + ACPI = 0x00000032, + DVD = 0x00000033, + FULLSCREEN_VIDEO = 0x00000034, + DFS_FILE_SYSTEM = 0x00000035, + DFS_VOLUME = 0x00000036, + SERENUM = 0x00000037, + TERMSRV = 0x00000038, + KSEC = 0x00000039, + FIPS = 0x0000003A, + INFINIBAND = 0x0000003B, + VMBUS = 0x0000003E, + CRYPT_PROVIDER = 0x0000003F, + WPD = 0x00000040, + BLUETOOTH = 0x00000041, + MT_COMPOSITE = 0x00000042, + MT_TRANSPORT = 0x00000043, + BIOMETRIC = 0x00000044, + PMI = 0x00000045, + EHSTOR = 0x00000046, + DEVAPI = 0x00000047, + GPIO = 0x00000048, + USBEX = 0x00000049, + CONSOLE = 0x00000050, + NFP = 0x00000051, + SYSENV = 0x00000052, + VIRTUAL_BLOCK = 0x00000053, + POINT_OF_SERVICE = 0x00000054, + STORAGE_REPLICATION = 0x00000055, + TRUST_ENV = 0x00000056, + UCM = 0x00000057, + UCMTCPCI = 0x00000058, + PERSISTENT_MEMORY = 0x00000059, + NVDIMM = 0x0000005a, + HOLOGRAPHIC = 0x0000005b, + SDFXHCI = 0x0000005c, + UCMUCSI = 0x0000005d, + PRM = 0x0000005e, + EVENT_COLLECTOR = 0x0000005f, + USB4 = 0x00000060, + SOUNDWIRE = 0x00000061, + + MOUNTMGRCONTROLTYPE = 'm', + + _, + }; +}; + +pub const IOCTL = struct { + pub const MOUNTMGR = struct { + pub const QUERY_POINTS: CTL_CODE = .{ .DeviceType = .MOUNTMGRCONTROLTYPE, .Function = 2, .Method = .BUFFERED, .Access = .ANY }; + pub const QUERY_DOS_VOLUME_PATH: CTL_CODE = .{ .DeviceType = .MOUNTMGRCONTROLTYPE, .Function = 12, .Method = .BUFFERED, .Access = .ANY }; + }; +}; + +pub const FSCTL = struct { + pub const SET_REPARSE_POINT: CTL_CODE = .{ .DeviceType = .FILE_SYSTEM, .Function = 41, .Method = .BUFFERED, .Access = .SPECIAL }; + pub const GET_REPARSE_POINT: CTL_CODE = .{ .DeviceType = .FILE_SYSTEM, .Function = 42, .Method = .BUFFERED, .Access = .ANY }; + + pub const PIPE = struct { + pub const ASSIGN_EVENT: CTL_CODE = .{ .DeviceType = .NAMED_PIPE, .Function = 0, .Method = .BUFFERED, .Access = .ANY }; + pub const DISCONNECT: CTL_CODE = .{ .DeviceType = .NAMED_PIPE, .Function = 1, .Method = .BUFFERED, .Access = .ANY }; + pub const LISTEN: CTL_CODE = .{ .DeviceType = .NAMED_PIPE, .Function = 2, .Method = .BUFFERED, .Access = .ANY }; + pub const PEEK: CTL_CODE = .{ .DeviceType = .NAMED_PIPE, .Function = 3, .Method = .BUFFERED, .Access = .{ .READ = true } }; + pub const QUERY_EVENT: CTL_CODE = .{ .DeviceType = .NAMED_PIPE, .Function = 4, .Method = .BUFFERED, .Access = .ANY }; + pub const TRANSCEIVE: CTL_CODE = .{ .DeviceType = .NAMED_PIPE, .Function = 5, .Method = .NEITHER, .Access = .{ .READ = true, .WRITE = true } }; + pub const WAIT: CTL_CODE = .{ .DeviceType = .NAMED_PIPE, .Function = 6, .Method = .BUFFERED, .Access = .ANY }; + pub const IMPERSONATE: CTL_CODE = .{ .DeviceType = .NAMED_PIPE, .Function = 7, .Method = .BUFFERED, .Access = .ANY }; + pub const SET_CLIENT_PROCESS: CTL_CODE = .{ .DeviceType = .NAMED_PIPE, .Function = 8, .Method = .BUFFERED, .Access = .ANY }; + pub const QUERY_CLIENT_PROCESS: CTL_CODE = .{ .DeviceType = .NAMED_PIPE, .Function = 9, .Method = .BUFFERED, .Access = .ANY }; + pub const GET_PIPE_ATTRIBUTE: CTL_CODE = .{ .DeviceType = .NAMED_PIPE, .Function = 10, .Method = .BUFFERED, .Access = .ANY }; + pub const SET_PIPE_ATTRIBUTE: CTL_CODE = .{ .DeviceType = .NAMED_PIPE, .Function = 11, .Method = .BUFFERED, .Access = .ANY }; + pub const GET_CONNECTION_ATTRIBUTE: CTL_CODE = .{ .DeviceType = .NAMED_PIPE, .Function = 12, .Method = .BUFFERED, .Access = .ANY }; + pub const SET_CONNECTION_ATTRIBUTE: CTL_CODE = .{ .DeviceType = .NAMED_PIPE, .Function = 13, .Method = .BUFFERED, .Access = .ANY }; + pub const GET_HANDLE_ATTRIBUTE: CTL_CODE = .{ .DeviceType = .NAMED_PIPE, .Function = 14, .Method = .BUFFERED, .Access = .ANY }; + pub const SET_HANDLE_ATTRIBUTE: CTL_CODE = .{ .DeviceType = .NAMED_PIPE, .Function = 15, .Method = .BUFFERED, .Access = .ANY }; + pub const FLUSH: CTL_CODE = .{ .DeviceType = .NAMED_PIPE, .Function = 16, .Method = .BUFFERED, .Access = .{ .WRITE = true } }; + + pub const INTERNAL_READ: CTL_CODE = .{ .DeviceType = .NAMED_PIPE, .Function = 2045, .Method = .BUFFERED, .Access = .{ .READ = true } }; + pub const INTERNAL_WRITE: CTL_CODE = .{ .DeviceType = .NAMED_PIPE, .Function = 2046, .Method = .BUFFERED, .Access = .{ .WRITE = true } }; + pub const INTERNAL_TRANSCEIVE: CTL_CODE = .{ .DeviceType = .NAMED_PIPE, .Function = 2047, .Method = .NEITHER, .Access = .{ .READ = true, .WRITE = true } }; + pub const INTERNAL_READ_OVFLOW: CTL_CODE = .{ .DeviceType = .NAMED_PIPE, .Function = 2048, .Method = .BUFFERED, .Access = .{ .READ = true } }; + }; +}; + +pub const MAXIMUM_REPARSE_DATA_BUFFER_SIZE: ULONG = 16 * 1024; + +pub const IO_REPARSE_TAG = packed struct(ULONG) { + Value: u12, + Index: u4 = 0, + ReservedBits: u12 = 0, + /// Can have children if a directory. + IsDirectory: bool = false, + /// Represents another named entity in the system. + IsSurrogate: bool = false, + /// Must be `false` for non-Microsoft tags. + IsReserved: bool = false, + /// Owned by Microsoft. + IsMicrosoft: bool = false, + + pub const RESERVED_INVALID: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .IsReserved = true, .Index = 0x8, .Value = 0x000 }; + pub const MOUNT_POINT: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .IsSurrogate = true, .Value = 0x003 }; + pub const HSM: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .IsReserved = true, .Value = 0x004 }; + pub const DRIVE_EXTENDER: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .Value = 0x005 }; + pub const HSM2: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .Value = 0x006 }; + pub const SIS: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .Value = 0x007 }; + pub const WIM: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .Value = 0x008 }; + pub const CSV: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .Value = 0x009 }; + pub const DFS: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .Value = 0x00A }; + pub const FILTER_MANAGER: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .Value = 0x00B }; + pub const SYMLINK: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .IsSurrogate = true, .Value = 0x00C }; + pub const IIS_CACHE: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .IsSurrogate = true, .Value = 0x010 }; + pub const DFSR: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .Value = 0x012 }; + pub const DEDUP: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .Value = 0x013 }; + pub const APPXSTRM: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .IsReserved = true, .Value = 0x014 }; + pub const NFS: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .Value = 0x014 }; + pub const FILE_PLACEHOLDER: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .Value = 0x015 }; + pub const DFM: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .Value = 0x016 }; + pub const WOF: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .Value = 0x017 }; + pub inline fn WCI(index: u1) IO_REPARSE_TAG { + return .{ .IsMicrosoft = true, .IsDirectory = index == 0x1, .Index = index, .Value = 0x018 }; + } + pub const GLOBAL_REPARSE: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .IsSurrogate = true, .Value = 0x0019 }; + pub inline fn CLOUD(index: u4) IO_REPARSE_TAG { + return .{ .IsMicrosoft = true, .IsDirectory = true, .Index = index, .Value = 0x01A }; + } + pub const APPEXECLINK: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .Value = 0x01B }; + pub const PROJFS: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .IsDirectory = true, .Value = 0x01C }; + pub const LX_SYMLINK: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .IsSurrogate = true, .Value = 0x01D }; + pub const STORAGE_SYNC: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .Value = 0x01E }; + pub const WCI_TOMBSTONE: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .IsSurrogate = true, .Value = 0x01F }; + pub const UNHANDLED: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .Value = 0x020 }; + pub const ONEDRIVE: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .Value = 0x021 }; + pub const PROJFS_TOMBSTONE: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .IsSurrogate = true, .Value = 0x022 }; + pub const AF_UNIX: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .Value = 0x023 }; + pub const LX_FIFO: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .Value = 0x024 }; + pub const LX_CHR: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .Value = 0x025 }; + pub const LX_BLK: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .Value = 0x026 }; + pub const LX_STORAGE_SYNC_FOLDER: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .IsDirectory = true, .Value = 0x027 }; + pub inline fn WCI_LINK(index: u1) IO_REPARSE_TAG { + return .{ .IsMicrosoft = true, .IsSurrogate = true, .Index = index, .Value = 0x027 }; + } + pub const DATALESS_CIM: IO_REPARSE_TAG = .{ .IsMicrosoft = true, .IsSurrogate = true, .Value = 0x28 }; +}; + +// ref: km/wdm.h + +pub const ACCESS_MASK = packed struct(DWORD) { + SPECIFIC: Specific = .{ .bits = 0 }, + STANDARD: Standard = .{}, + Reserved21: u3 = 0, + ACCESS_SYSTEM_SECURITY: bool = false, + MAXIMUM_ALLOWED: bool = false, + Reserved26: u2 = 0, + GENERIC: Generic = .{}, + + pub const Specific = packed union { + bits: u16, + + // ref: km/wdm.h + + /// Define access rights to files and directories + FILE: File, + FILE_DIRECTORY: File.Directory, + FILE_PIPE: File.Pipe, + /// Registry Specific Access Rights. + KEY: Key, + /// Object Manager Object Type Specific Access Rights. + OBJECT_TYPE: ObjectType, + /// Object Manager Directory Specific Access Rights. + DIRECTORY: Directory, + /// Object Manager Symbolic Link Specific Access Rights. + SYMBOLIC_LINK: SymbolicLink, + /// Section Access Rights. + SECTION: Section, + /// Session Specific Access Rights. + SESSION: Session, + /// Process Specific Access Rights. + PROCESS: Process, + /// Thread Specific Access Rights. + THREAD: Thread, + /// Partition Specific Access Rights. + MEMORY_PARTITION: MemoryPartition, + /// Generic mappings for transaction manager rights. + TRANSACTIONMANAGER: TransactionManager, + /// Generic mappings for transaction rights. + TRANSACTION: Transaction, + /// Generic mappings for resource manager rights. + RESOURCEMANAGER: ResourceManager, + /// Generic mappings for enlistment rights. + ENLISTMENT: Enlistment, + /// Event Specific Access Rights. + EVENT: Event, + /// Semaphore Specific Access Rights. + SEMAPHORE: Semaphore, + + // ref: km/ntifs.h + + /// Token Specific Access Rights. + TOKEN: Token, + + // um/winnt.h + + /// Job Object Specific Access Rights. + JOB_OBJECT: JobObject, + /// Mutant Specific Access Rights. + MUTANT: Mutant, + /// Timer Specific Access Rights. + TIMER: Timer, + /// I/O Completion Specific Access Rights. + IO_COMPLETION: IoCompletion, + + pub const File = packed struct(u16) { + READ_DATA: bool = false, + WRITE_DATA: bool = false, + APPEND_DATA: bool = false, + READ_EA: bool = false, + WRITE_EA: bool = false, + EXECUTE: bool = false, + Reserved6: u1 = 0, + READ_ATTRIBUTES: bool = false, + WRITE_ATTRIBUTES: bool = false, + Reserved9: u7 = 0, + + pub const ALL_ACCESS: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .REQUIRED, + .SYNCHRONIZE = true, + }, + .SPECIFIC = .{ .FILE = .{ + .READ_DATA = true, + .WRITE_DATA = true, + .APPEND_DATA = true, + .READ_EA = true, + .WRITE_EA = true, + .EXECUTE = true, + .Reserved6 = maxInt(@FieldType(File, "Reserved6")), + .READ_ATTRIBUTES = true, + .WRITE_ATTRIBUTES = true, + } }, + }; + + pub const GENERIC_READ: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .READ, + .SYNCHRONIZE = true, + }, + .SPECIFIC = .{ .FILE = .{ + .READ_DATA = true, + .READ_ATTRIBUTES = true, + .READ_EA = true, + } }, + }; + + pub const GENERIC_WRITE: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .WRITE, + .SYNCHRONIZE = true, + }, + .SPECIFIC = .{ .FILE = .{ + .WRITE_DATA = true, + .WRITE_ATTRIBUTES = true, + .WRITE_EA = true, + .APPEND_DATA = true, + } }, + }; + + pub const GENERIC_EXECUTE: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .EXECUTE, + .SYNCHRONIZE = true, + }, + .SPECIFIC = .{ .FILE = .{ + .READ_ATTRIBUTES = true, + .EXECUTE = true, + } }, + }; + + pub const Directory = packed struct(u16) { + LIST: bool = false, + ADD_FILE: bool = false, + ADD_SUBDIRECTORY: bool = false, + READ_EA: bool = false, + WRITE_EA: bool = false, + TRAVERSE: bool = false, + DELETE_CHILD: bool = false, + READ_ATTRIBUTES: bool = false, + WRITE_ATTRIBUTES: bool = false, + Reserved9: u7 = 0, + }; + + pub const Pipe = packed struct(u16) { + READ_DATA: bool = false, + WRITE_DATA: bool = false, + CREATE_PIPE_INSTANCE: bool = false, + Reserved3: u4 = 0, + READ_ATTRIBUTES: bool = false, + WRITE_ATTRIBUTES: bool = false, + Reserved9: u7 = 0, + }; + }; + + pub const Key = packed struct(u16) { + /// Required to query the values of a registry key. + QUERY_VALUE: bool = false, + /// Required to create, delete, or set a registry value. + SET_VALUE: bool = false, + /// Required to create a subkey of a registry key. + CREATE_SUB_KEY: bool = false, + /// Required to enumerate the subkeys of a registry key. + ENUMERATE_SUB_KEYS: bool = false, + /// Required to request change notifications for a registry key or for subkeys of a registry key. + NOTIFY: bool = false, + /// Reserved for system use. + CREATE_LINK: bool = false, + Reserved6: u2 = 0, + /// Indicates that an application on 64-bit Windows should operate on the 64-bit registry view. + /// This flag is ignored by 32-bit Windows. + WOW64_64KEY: bool = false, + /// Indicates that an application on 64-bit Windows should operate on the 32-bit registry view. + /// This flag is ignored by 32-bit Windows. + WOW64_32KEY: bool = false, + Reserved10: u6 = 0, + + pub const WOW64_RES: ACCESS_MASK = .{ + .SPECIFIC = .{ .KEY = .{ + .WOW64_32KEY = true, + .WOW64_64KEY = true, + } }, + }; + + /// Combines the STANDARD_RIGHTS_READ, KEY_QUERY_VALUE, KEY_ENUMERATE_SUB_KEYS, and KEY_NOTIFY values. + pub const READ: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .READ, + .SYNCHRONIZE = false, + }, + .SPECIFIC = .{ .KEY = .{ + .QUERY_VALUE = true, + .ENUMERATE_SUB_KEYS = true, + .NOTIFY = true, + } }, + }; + + /// Combines the STANDARD_RIGHTS_WRITE, KEY_SET_VALUE, and KEY_CREATE_SUB_KEY access rights. + pub const WRITE: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .WRITE, + .SYNCHRONIZE = false, + }, + .SPECIFIC = .{ .KEY = .{ + .SET_VALUE = true, + .CREATE_SUB_KEY = true, + } }, + }; + + /// Equivalent to KEY_READ. + pub const EXECUTE = READ; + + pub const ALL_ACCESS: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .ALL, + .SYNCHRONIZE = false, + }, + .SPECIFIC = .{ .KEY = .{ + .QUERY_VALUE = true, + .SET_VALUE = true, + .CREATE_SUB_KEY = true, + .ENUMERATE_SUB_KEYS = true, + .NOTIFY = true, + .CREATE_LINK = true, + } }, + }; + }; + + pub const ObjectType = packed struct(u16) { + CREATE: bool = false, + Reserved1: u15 = 0, + + pub const ALL_ACCESS: ACCESS_MASK = .{ + .STANDARD = .{ .RIGHTS = .REQUIRED }, + .SPECIFIC = .{ .OBJECT_TYPE = .{ + .CREATE = true, + } }, + }; + }; + + pub const Directory = packed struct(u16) { + QUERY: bool = false, + TRAVERSE: bool = false, + CREATE_OBJECT: bool = false, + CREATE_SUBDIRECTORY: bool = false, + Reserved3: u12 = 0, + + pub const ALL_ACCESS: ACCESS_MASK = .{ + .STANDARD = .{ .RIGHTS = .REQUIRED }, + .SPECIFIC = .{ .DIRECTORY = .{ + .QUERY = true, + .TRAVERSE = true, + .CREATE_OBJECT = true, + .CREATE_SUBDIRECTORY = true, + } }, + }; + }; + + pub const SymbolicLink = packed struct(u16) { + QUERY: bool = false, + SET: bool = false, + Reserved2: u14 = 0, + + pub const ALL_ACCESS: ACCESS_MASK = .{ + .STANDARD = .{ .RIGHTS = .REQUIRED }, + .SPECIFIC = .{ .SYMBOLIC_LINK = .{ + .QUERY = true, + } }, + }; + + pub const ALL_ACCESS_EX: ACCESS_MASK = .{ + .STANDARD = .{ .RIGHTS = .REQUIRED }, + .SPECIFIC = .{ .SYMBOLIC_LINK = .{ + .QUERY = true, + .SET = true, + .Reserved2 = maxInt(@FieldType(SymbolicLink, "Reserved2")), + } }, + }; + }; + + pub const Section = packed struct(u16) { + QUERY: bool = false, + MAP_WRITE: bool = false, + MAP_READ: bool = false, + MAP_EXECUTE: bool = false, + EXTEND_SIZE: bool = false, + /// not included in `ALL_ACCESS` + MAP_EXECUTE_EXPLICIT: bool = false, + Reserved6: u10 = 0, + + pub const ALL_ACCESS: ACCESS_MASK = .{ + .STANDARD = .{ .RIGHTS = .REQUIRED }, + .SPECIFIC = .{ .SECTION = .{ + .QUERY = true, + .MAP_WRITE = true, + .MAP_READ = true, + .MAP_EXECUTE = true, + .EXTEND_SIZE = true, + } }, + }; + }; + + pub const Session = packed struct(u16) { + QUERY_ACCESS: bool = false, + MODIFY_ACCESS: bool = false, + Reserved2: u14 = 0, + + pub const ALL_ACCESS: ACCESS_MASK = .{ + .STANDARD = .{ .RIGHTS = .REQUIRED }, + .SPECIFIC = .{ .SESSION = .{ + .QUERY_ACCESS = true, + .MODIFY_ACCESS = true, + } }, + }; + }; + + pub const Process = packed struct(u16) { + TERMINATE: bool = false, + CREATE_THREAD: bool = false, + SET_SESSIONID: bool = false, + VM_OPERATION: bool = false, + VM_READ: bool = false, + VM_WRITE: bool = false, + DUP_HANDLE: bool = false, + CREATE_PROCESS: bool = false, + SET_QUOTA: bool = false, + SET_INFORMATION: bool = false, + QUERY_INFORMATION: bool = false, + SUSPEND_RESUME: bool = false, + QUERY_LIMITED_INFORMATION: bool = false, + SET_LIMITED_INFORMATION: bool = false, + Reserved14: u2 = 0, + + pub const ALL_ACCESS: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .REQUIRED, + .SYNCHRONIZE = true, + }, + .SPECIFIC = .{ .PROCESS = .{ + .TERMINATE = true, + .CREATE_THREAD = true, + .SET_SESSIONID = true, + .VM_OPERATION = true, + .VM_READ = true, + .VM_WRITE = true, + .DUP_HANDLE = true, + .CREATE_PROCESS = true, + .SET_QUOTA = true, + .SET_INFORMATION = true, + .QUERY_INFORMATION = true, + .SUSPEND_RESUME = true, + .QUERY_LIMITED_INFORMATION = true, + .SET_LIMITED_INFORMATION = true, + .Reserved14 = maxInt(@FieldType(Process, "Reserved14")), + } }, + }; + }; + + pub const Thread = packed struct(u16) { + TERMINATE: bool = false, + SUSPEND_RESUME: bool = false, + ALERT: bool = false, + GET_CONTEXT: bool = false, + SET_CONTEXT: bool = false, + SET_INFORMATION: bool = false, + QUERY_INFORMATION: bool = false, + SET_THREAD_TOKEN: bool = false, + IMPERSONATE: bool = false, + DIRECT_IMPERSONATION: bool = false, + SET_LIMITED_INFORMATION: bool = false, + QUERY_LIMITED_INFORMATION: bool = false, + RESUME: bool = false, + Reserved13: u3 = 0, + + pub const ALL_ACCESS: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .REQUIRED, + .SYNCHRONIZE = true, + }, + .SPECIFIC = .{ .THREAD = .{ + .TERMINATE = true, + .SUSPEND_RESUME = true, + .ALERT = true, + .GET_CONTEXT = true, + .SET_CONTEXT = true, + .SET_INFORMATION = true, + .QUERY_INFORMATION = true, + .SET_THREAD_TOKEN = true, + .IMPERSONATE = true, + .DIRECT_IMPERSONATION = true, + .SET_LIMITED_INFORMATION = true, + .QUERY_LIMITED_INFORMATION = true, + .RESUME = true, + .Reserved13 = maxInt(@FieldType(Thread, "Reserved13")), + } }, + }; + }; + + pub const MemoryPartition = packed struct(u16) { + QUERY_ACCESS: bool = false, + MODIFY_ACCESS: bool = false, + Required2: u14 = 0, + + pub const ALL_ACCESS: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .REQUIRED, + .SYNCHRONIZE = true, + }, + .SPECIFIC = .{ .MEMORY_PARTITION = .{ + .QUERY_ACCESS = true, + .MODIFY_ACCESS = true, + } }, + }; + }; + + pub const TransactionManager = packed struct(u16) { + QUERY_INFORMATION: bool = false, + SET_INFORMATION: bool = false, + RECOVER: bool = false, + RENAME: bool = false, + CREATE_RM: bool = false, + /// The following right is intended for DTC's use only; it will be deprecated, and no one else should take a dependency on it. + BIND_TRANSACTION: bool = false, + Reserved6: u10 = 0, + + pub const GENERIC_READ: ACCESS_MASK = .{ + .STANDARD = .{ .RIGHTS = .READ }, + .SPECIFIC = .{ .TRANSACTIONMANAGER = .{ + .QUERY_INFORMATION = true, + } }, + }; + + pub const GENERIC_WRITE: ACCESS_MASK = .{ + .STANDARD = .{ .RIGHTS = .WRITE }, + .SPECIFIC = .{ .TRANSACTIONMANAGER = .{ + .SET_INFORMATION = true, + .RECOVER = true, + .RENAME = true, + .CREATE_RM = true, + } }, + }; + + pub const GENERIC_EXECUTE: ACCESS_MASK = .{ + .STANDARD = .{ .RIGHTS = .EXECUTE }, + .SPECIFIC = .{ .TRANSACTIONMANAGER = .{} }, + }; + + pub const ALL_ACCESS: ACCESS_MASK = .{ + .STANDARD = .{ .RIGHTS = .REQUIRED }, + .SPECIFIC = .{ .TRANSACTIONMANAGER = .{ + .QUERY_INFORMATION = true, + .SET_INFORMATION = true, + .RECOVER = true, + .RENAME = true, + .CREATE_RM = true, + .BIND_TRANSACTION = true, + } }, + }; + }; + + pub const Transaction = packed struct(u16) { + QUERY_INFORMATION: bool = false, + SET_INFORMATION: bool = false, + ENLIST: bool = false, + COMMIT: bool = false, + ROLLBACK: bool = false, + PROPAGATE: bool = false, + RIGHT_RESERVED1: bool = false, + Reserved7: u9 = 0, + + pub const GENERIC_READ: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .READ, + .SYNCHRONIZE = true, + }, + .SPECIFIC = .{ .TRANSACTION = .{ + .QUERY_INFORMATION = true, + } }, + }; + + pub const GENERIC_WRITE: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .WRITE, + .SYNCHRONIZE = true, + }, + .SPECIFIC = .{ .TRANSACTION = .{ + .SET_INFORMATION = true, + .COMMIT = true, + .ENLIST = true, + .ROLLBACK = true, + .PROPAGATE = true, + } }, + }; + + pub const GENERIC_EXECUTE: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .EXECUTE, + .SYNCHRONIZE = true, + }, + .SPECIFIC = .{ .TRANSACTION = .{ + .COMMIT = true, + .ROLLBACK = true, + } }, + }; + + pub const ALL_ACCESS: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .REQUIRED, + .SYNCHRONIZE = true, + }, + .SPECIFIC = .{ .TRANSACTION = .{ + .QUERY_INFORMATION = true, + .SET_INFORMATION = true, + .COMMIT = true, + .ENLIST = true, + .ROLLBACK = true, + .PROPAGATE = true, + } }, + }; + + pub const RESOURCE_MANAGER_RIGHTS: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .{ + .READ_CONTROL = true, + }, + .SYNCHRONIZE = true, + }, + .SPECIFIC = .{ .TRANSACTION = .{ + .QUERY_INFORMATION = true, + .SET_INFORMATION = true, + .ENLIST = true, + .ROLLBACK = true, + .PROPAGATE = true, + } }, + }; + }; + + pub const ResourceManager = packed struct(u16) { + QUERY_INFORMATION: bool = false, + SET_INFORMATION: bool = false, + RECOVER: bool = false, + ENLIST: bool = false, + GET_NOTIFICATION: bool = false, + REGISTER_PROTOCOL: bool = false, + COMPLETE_PROPAGATION: bool = false, + Reserved7: u9 = 0, + + pub const GENERIC_READ: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .READ, + .SYNCHRONIZE = true, + }, + .SPECIFIC = .{ .RESOURCEMANAGER = .{ + .QUERY_INFORMATION = true, + } }, + }; + + pub const GENERIC_WRITE: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .WRITE, + .SYNCHRONIZE = true, + }, + .SPECIFIC = .{ .RESOURCEMANAGER = .{ + .SET_INFORMATION = true, + .RECOVER = true, + .ENLIST = true, + .GET_NOTIFICATION = true, + .REGISTER_PROTOCOL = true, + .COMPLETE_PROPAGATION = true, + } }, + }; + + pub const GENERIC_EXECUTE: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .EXECUTE, + .SYNCHRONIZE = true, + }, + .SPECIFIC = .{ .RESOURCEMANAGER = .{ + .RECOVER = true, + .ENLIST = true, + .GET_NOTIFICATION = true, + .COMPLETE_PROPAGATION = true, + } }, + }; + + pub const ALL_ACCESS: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .REQUIRED, + .SYNCHRONIZE = true, + }, + .SPECIFIC = .{ .RESOURCEMANAGER = .{ + .QUERY_INFORMATION = true, + .SET_INFORMATION = true, + .RECOVER = true, + .ENLIST = true, + .GET_NOTIFICATION = true, + .REGISTER_PROTOCOL = true, + .COMPLETE_PROPAGATION = true, + } }, + }; + }; + + pub const Enlistment = packed struct(u16) { + QUERY_INFORMATION: bool = false, + SET_INFORMATION: bool = false, + RECOVER: bool = false, + SUBORDINATE_RIGHTS: bool = false, + SUPERIOR_RIGHTS: bool = false, + Reserved5: u11 = 0, + + pub const GENERIC_READ: ACCESS_MASK = .{ + .STANDARD = .{ .RIGHTS = .READ }, + .SPECIFIC = .{ .ENLISTMENT = .{ + .QUERY_INFORMATION = true, + } }, + }; + + pub const GENERIC_WRITE: ACCESS_MASK = .{ + .STANDARD = .{ .RIGHTS = .WRITE }, + .SPECIFIC = .{ .ENLISTMENT = .{ + .SET_INFORMATION = true, + .RECOVER = true, + .SUBORDINATE_RIGHTS = true, + .SUPERIOR_RIGHTS = true, + } }, + }; + + pub const GENERIC_EXECUTE: ACCESS_MASK = .{ + .STANDARD = .{ .RIGHTS = .EXECUTE }, + .SPECIFIC = .{ .ENLISTMENT = .{ + .RECOVER = true, + .SUBORDINATE_RIGHTS = true, + .SUPERIOR_RIGHTS = true, + } }, + }; + + pub const ALL_ACCESS: ACCESS_MASK = .{ + .STANDARD = .{ .RIGHTS = .REQUIRED }, + .SPECIFIC = .{ .ENLISTMENT = .{ + .QUERY_INFORMATION = true, + .SET_INFORMATION = true, + .RECOVER = true, + .SUBORDINATE_RIGHTS = true, + .SUPERIOR_RIGHTS = true, + } }, + }; + }; + + pub const Event = packed struct(u16) { + QUERY_STATE: bool = false, + MODIFY_STATE: bool = false, + Reserved2: u14 = 0, + + pub const ALL_ACCESS: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .REQUIRED, + .SYNCHRONIZE = true, + }, + .SPECIFIC = .{ .EVENT = .{ + .QUERY_STATE = true, + .MODIFY_STATE = true, + } }, + }; + }; + + pub const Semaphore = packed struct(u16) { + QUERY_STATE: bool = false, + MODIFY_STATE: bool = false, + Reserved2: u14 = 0, + + pub const ALL_ACCESS: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .REQUIRED, + .SYNCHRONIZE = true, + }, + .SPECIFIC = .{ .SEMAPHORE = .{ + .QUERY_STATE = true, + .MODIFY_STATE = true, + } }, + }; + }; + + pub const Token = packed struct(u16) { + ASSIGN_PRIMARY: bool = false, + DUPLICATE: bool = false, + IMPERSONATE: bool = false, + QUERY: bool = false, + QUERY_SOURCE: bool = false, + ADJUST_PRIVILEGES: bool = false, + ADJUST_GROUPS: bool = false, + ADJUST_DEFAULT: bool = false, + ADJUST_SESSIONID: bool = false, + Reserved9: u7 = 0, + + pub const ALL_ACCESS_P: ACCESS_MASK = .{ + .STANDARD = .{ .RIGHTS = .REQUIRED }, + .SPECIFIC = .{ .TOKEN = .{ + .ASSIGN_PRIMARY = true, + .DUPLICATE = true, + .IMPERSONATE = true, + .QUERY = true, + .QUERY_SOURCE = true, + .ADJUST_PRIVILEGES = true, + .ADJUST_GROUPS = true, + .ADJUST_DEFAULT = true, + } }, + }; + + pub const ALL_ACCESS: ACCESS_MASK = .{ + .STANDARD = .{ .RIGHTS = .REQUIRED }, + .SPECIFIC = .{ .TOKEN = .{ + .ASSIGN_PRIMARY = true, + .DUPLICATE = true, + .IMPERSONATE = true, + .QUERY = true, + .QUERY_SOURCE = true, + .ADJUST_PRIVILEGES = true, + .ADJUST_GROUPS = true, + .ADJUST_DEFAULT = true, + .ADJUST_SESSIONID = true, + } }, + }; + + pub const READ: ACCESS_MASK = .{ + .STANDARD = .{ .RIGHTS = .READ }, + .SPECIFIC = .{ .TOKEN = .{ + .QUERY = true, + } }, + }; + + pub const WRITE: ACCESS_MASK = .{ + .STANDARD = .{ .RIGHTS = .WRITE }, + .SPECIFIC = .{ .TOKEN = .{ + .ADJUST_PRIVILEGES = true, + .ADJUST_GROUPS = true, + .ADJUST_DEFAULT = true, + } }, + }; + + pub const EXECUTE: ACCESS_MASK = .{ + .STANDARD = .{ .RIGHTS = .EXECUTE }, + .SPECIFIC = .{ .TOKEN = .{} }, + }; + + pub const TRUST_CONSTRAINT_MASK: ACCESS_MASK = .{ + .STANDARD = .{ .RIGHTS = .READ }, + .SPECIFIC = .{ .TOKEN = .{ + .QUERY = true, + .QUERY_SOURCE = true, + } }, + }; + + pub const TRUST_ALLOWED_MASK: ACCESS_MASK = .{ + .STANDARD = .{ .RIGHTS = .READ }, + .SPECIFIC = .{ .TOKEN = .{ + .QUERY = true, + .QUERY_SOURCE = true, + .DUPLICATE = true, + .IMPERSONATE = true, + } }, + }; + }; + + pub const JobObject = packed struct(u16) { + ASSIGN_PROCESS: bool = false, + SET_ATTRIBUTES: bool = false, + QUERY: bool = false, + TERMINATE: bool = false, + SET_SECURITY_ATTRIBUTES: bool = false, + IMPERSONATE: bool = false, + Reserved6: u10 = 0, + + pub const ALL_ACCESS: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .REQUIRED, + .SYNCHRONIZE = true, + }, + .SPECIFIC = .{ .JOB_OBJECT = .{ + .ASSIGN_PROCESS = true, + .SET_ATTRIBUTES = true, + .QUERY = true, + .TERMINATE = true, + .SET_SECURITY_ATTRIBUTES = true, + .IMPERSONATE = true, + } }, + }; + }; + + pub const Mutant = packed struct(u16) { + QUERY_STATE: bool = false, + Reserved1: u15 = 0, + + pub const ALL_ACCESS: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .REQUIRED, + .SYNCHRONIZE = true, + }, + .SPECIFIC = .{ .MUTANT = .{ + .QUERY_STATE = true, + } }, + }; + }; + + pub const Timer = packed struct(u16) { + QUERY_STATE: bool = false, + MODIFY_STATE: bool = false, + Reserved2: u14 = 0, + + pub const ALL_ACCESS: ACCESS_MASK = .{ + .STANDARD = .{ + .RIGHTS = .REQUIRED, + .SYNCHRONIZE = true, + }, + .SPECIFIC = .{ .TIMER = .{ + .QUERY_STATE = true, + .MODIFY_STATE = true, + } }, + }; + }; + + pub const IoCompletion = packed struct(u16) { + Reserved0: u1 = 0, + MODIFY_STATE: bool = false, + Reserved2: u14 = 0, + + pub const ALL_ACCESS: ACCESS_MASK = .{ + .STANDARD = .{ .RIGHTS = .REQUIRED, .SYNCHRONIZE = true }, + .SPECIFIC = .{ .IO_COMPLETION = .{ + .Reserved0 = maxInt(@FieldType(IoCompletion, "Reserved0")), + .MODIFY_STATE = true, + } }, + }; + }; + + pub const RIGHTS_ALL: Specific = .{ .bits = maxInt(@FieldType(Specific, "bits")) }; + }; + + pub const Standard = packed struct(u5) { + RIGHTS: Rights = .{}, + SYNCHRONIZE: bool = false, + + pub const RIGHTS_ALL: Standard = .{ + .RIGHTS = .ALL, + .SYNCHRONIZE = true, + }; + + pub const Rights = packed struct(u4) { + DELETE: bool = false, + READ_CONTROL: bool = false, + WRITE_DAC: bool = false, + WRITE_OWNER: bool = false, + + pub const REQUIRED: Rights = .{ + .DELETE = true, + .READ_CONTROL = true, + .WRITE_DAC = true, + .WRITE_OWNER = true, + }; + + pub const READ: Rights = .{ + .READ_CONTROL = true, + }; + pub const WRITE: Rights = .{ + .READ_CONTROL = true, + }; + pub const EXECUTE: Rights = .{ + .READ_CONTROL = true, + }; + + pub const ALL = REQUIRED; + }; + }; + + pub const Generic = packed struct(u4) { + ALL: bool = false, + EXECUTE: bool = false, + WRITE: bool = false, + READ: bool = false, + }; +}; + +pub const DEVICE_TYPE = packed struct(ULONG) { + FileDevice: CTL_CODE.FILE_DEVICE, + Reserved16: u16 = 0, +}; + +pub const FS_INFORMATION_CLASS = enum(c_int) { + Volume = 1, + Label = 2, + Size = 3, + Device = 4, + Attribute = 5, + Control = 6, + FullSize = 7, + ObjectId = 8, + DriverPath = 9, + VolumeFlags = 10, + SectorSize = 11, + DataCopy = 12, + MetadataSize = 13, + FullSizeEx = 14, + Guid = 15, + _, + + pub const Maximum: @typeInfo(@This()).@"enum".tag_type = 1 + @typeInfo(@This()).@"enum".fields.len; +}; + +pub const SECTION_INHERIT = enum(c_int) { + Share = 1, + Unmap = 2, +}; + +pub const PAGE = packed struct(ULONG) { + NOACCESS: bool = false, + READONLY: bool = false, + READWRITE: bool = false, + WRITECOPY: bool = false, + + EXECUTE: bool = false, + EXECUTE_READ: bool = false, + EXECUTE_READWRITE: bool = false, + EXECUTE_WRITECOPY: bool = false, + + GUARD: bool = false, + NOCACHE: bool = false, + WRITECOMBINE: bool = false, + + GRAPHICS_NOACCESS: bool = false, + GRAPHICS_READONLY: bool = false, + GRAPHICS_READWRITE: bool = false, + GRAPHICS_EXECUTE: bool = false, + GRAPHICS_EXECUTE_READ: bool = false, + GRAPHICS_EXECUTE_READWRITE: bool = false, + GRAPHICS_COHERENT: bool = false, + GRAPHICS_NOCACHE: bool = false, + + Reserved19: u12 = 0, + + REVERT_TO_FILE_MAP: bool = false, +}; + +pub const MEM = struct { + pub const ALLOCATE = packed struct(ULONG) { + Reserved0: u12 = 0, + COMMIT: bool = false, + RESERVE: bool = false, + REPLACE_PLACEHOLDER: bool = false, + Reserved15: u3 = 0, + RESERVE_PLACEHOLDER: bool = false, + RESET: bool = false, + TOP_DOWN: bool = false, + WRITE_WATCH: bool = false, + PHYSICAL: bool = false, + Reserved23: u1 = 0, + RESET_UNDO: bool = false, + Reserved25: u4 = 0, + LARGE_PAGES: bool = false, + Reserved30: u1 = 0, + @"4MB_PAGES": bool = false, + + pub const @"64K_PAGES": ALLOCATE = .{ + .LARGE_PAGES = true, + .PHYSICAL = true, + }; + }; + + pub const FREE = packed struct(ULONG) { + COALESCE_PLACEHOLDERS: bool = false, + PRESERVE_PLACEHOLDER: bool = false, + Reserved2: u12 = 0, + DECOMMIT: bool = false, + RELEASE: bool = false, + FREE: bool = false, + Reserved17: u15 = 0, + }; + + pub const MAP = packed struct(ULONG) { + Reserved0: u13 = 0, + RESERVE: bool = false, + REPLACE_PLACEHOLDER: bool = false, + Reserved15: u14 = 0, + LARGE_PAGES: bool = false, + Reserved30: u2 = 0, + }; + + pub const UNMAP = packed struct(ULONG) { + WITH_TRANSIENT_BOOST: bool = false, + PRESERVE_PLACEHOLDER: bool = false, + Reserved2: u30 = 0, + }; + + pub const EXTENDED_PARAMETER = extern struct { + s: packed struct(ULONG64) { + Type: TYPE, + Reserved: u56, + }, + u: extern union { + ULong64: ULONG64, + Pointer: PVOID, + Size: SIZE_T, + Handle: HANDLE, + ULong: ULONG, + }, + + pub const TYPE = enum(u8) { + InvalidType = 0, + AddressRequirements, + NumaNode, + PartitionHandle, + UserPhysicalHandle, + AttributeFlags, + ImageMachine, + _, + + pub const Max: @typeInfo(@This()).@"enum".tag_type = @typeInfo(@This()).@"enum".fields.len; + }; + }; +}; + +pub const SEC = packed struct(ULONG) { + Reserved0: u17 = 0, + HUGE_PAGES: bool = false, + PARTITION_OWNER_HANDLE: bool = false, + @"64K_PAGES": bool = false, + Reserved19: u3 = 0, + FILE: bool = false, + IMAGE: bool = false, + PROTECTED_IMAGE: bool = false, + RESERVE: bool = false, + COMMIT: bool = false, + NOCACHE: bool = false, + Reserved29: u1 = 0, + WRITECOMBINE: bool = false, + LARGE_PAGES: bool = false, + + pub const IMAGE_NO_EXECUTE: SEC = .{ + .IMAGE = true, + .NOCACHE = true, + }; +}; + +pub const ERESOURCE = opaque {}; + +// ref: shared/ntdef.h + +pub const EVENT_TYPE = enum(c_int) { + Notification, + Synchronization, +}; + +pub const TIMER_TYPE = enum(c_int) { + Notification, + Synchronization, +}; + +pub const WAIT_TYPE = enum(c_int) { + All, + Any, +}; + +pub const LOGICAL = ULONG; + +pub const NTSTATUS = @import("windows/ntstatus.zig").NTSTATUS; + +// ref: um/heapapi.h + +pub fn GetProcessHeap() ?*HEAP { + return peb().ProcessHeap; +} + +// ref: um/winternl.h + +pub const OBJECT_ATTRIBUTES = extern struct { + Length: ULONG, + RootDirectory: ?HANDLE, + ObjectName: *UNICODE_STRING, + Attributes: ATTRIBUTES, + SecurityDescriptor: ?*anyopaque, + SecurityQualityOfService: ?*anyopaque, + + // Valid values for the Attributes field + pub const ATTRIBUTES = packed struct(ULONG) { + Reserved0: u1 = 0, + INHERIT: bool = false, + Reserved2: u2 = 0, + PERMANENT: bool = false, + EXCLUSIVE: bool = false, + /// If name-lookup code should ignore the case of the ObjectName member rather than performing an exact-match search. + CASE_INSENSITIVE: bool = true, + OPENIF: bool = false, + OPENLINK: bool = false, + KERNEL_HANDLE: bool = false, + FORCE_ACCESS_CHECK: bool = false, + IGNORE_IMPERSONATED_DEVICEMAP: bool = false, + DONT_REPARSE: bool = false, + Reserved13: u19 = 0, + + pub const VALID_ATTRIBUTES: ATTRIBUTES = .{ + .INHERIT = true, + .PERMANENT = true, + .EXCLUSIVE = true, + .CASE_INSENSITIVE = true, + .OPENIF = true, + .OPENLINK = true, + .KERNEL_HANDLE = true, + .FORCE_ACCESS_CHECK = true, + .IGNORE_IMPERSONATED_DEVICEMAP = true, + .DONT_REPARSE = true, + }; + }; +}; + +// ref none pub const OpenError = error{ IsDir, @@ -52,8 +2308,8 @@ pub const OpenFileOptions = struct { access_mask: ACCESS_MASK, dir: ?HANDLE = null, sa: ?*SECURITY_ATTRIBUTES = null, - share_access: ULONG = FILE_SHARE_WRITE | FILE_SHARE_READ | FILE_SHARE_DELETE, - creation: ULONG, + share_access: FILE.SHARE = .VALID_FLAGS, + creation: FILE.CREATE_DISPOSITION, /// If true, tries to open path as a directory. /// Defaults to false. filter: Filter = .file_only, @@ -82,32 +2338,22 @@ pub fn OpenFile(sub_path_w: []const u16, options: OpenFileOptions) OpenError!HAN var result: HANDLE = undefined; const path_len_bytes = math.cast(u16, sub_path_w.len * 2) orelse return error.NameTooLong; - var nt_name = UNICODE_STRING{ + var nt_name: UNICODE_STRING = .{ .Length = path_len_bytes, .MaximumLength = path_len_bytes, .Buffer = @constCast(sub_path_w.ptr), }; - var attr = OBJECT_ATTRIBUTES{ + const attr: OBJECT_ATTRIBUTES = .{ .Length = @sizeOf(OBJECT_ATTRIBUTES), .RootDirectory = if (std.fs.path.isAbsoluteWindowsWtf16(sub_path_w)) null else options.dir, - .Attributes = if (options.sa) |ptr| blk: { // Note we do not use OBJ_CASE_INSENSITIVE here. - const inherit: ULONG = if (ptr.bInheritHandle == TRUE) OBJ_INHERIT else 0; - break :blk inherit; - } else 0, + .Attributes = .{ + .INHERIT = if (options.sa) |sa| sa.bInheritHandle != FALSE else false, + }, .ObjectName = &nt_name, .SecurityDescriptor = if (options.sa) |ptr| ptr.lpSecurityDescriptor else null, .SecurityQualityOfService = null, }; var io: IO_STATUS_BLOCK = undefined; - const blocking_flag: ULONG = FILE_SYNCHRONOUS_IO_NONALERT; - const file_or_dir_flag: ULONG = switch (options.filter) { - .file_only => FILE_NON_DIRECTORY_FILE, - .dir_only => FILE_DIRECTORY_FILE, - .any => 0, - }; - // If we're not following symlinks, we need to ensure we don't pass in any synchronization flags such as FILE_SYNCHRONOUS_IO_NONALERT. - const flags: ULONG = if (options.follow_symlinks) file_or_dir_flag | blocking_flag else file_or_dir_flag | FILE_OPEN_REPARSE_POINT; - while (true) { const rc = ntdll.NtCreateFile( &result, @@ -115,10 +2361,15 @@ pub fn OpenFile(sub_path_w: []const u16, options: OpenFileOptions) OpenError!HAN &attr, &io, null, - FILE_ATTRIBUTE_NORMAL, + .{ .NORMAL = true }, options.share_access, options.creation, - flags, + .{ + .DIRECTORY_FILE = options.filter == .dir_only, + .NON_DIRECTORY_FILE = options.filter == .file_only, + .IO = if (options.follow_symlinks) .SYNCHRONOUS_NONALERT else .ASYNCHRONOUS, + .OPEN_REPARSE_POINT = !options.follow_symlinks, + }, null, 0, ); @@ -201,16 +2452,16 @@ pub fn CreatePipe(rd: *HANDLE, wr: *HANDLE, sattr: *const SECURITY_ATTRIBUTES) C const dev_handle = opt_dev_handle orelse blk: { const str = std.unicode.utf8ToUtf16LeStringLiteral("\\Device\\NamedPipe\\"); const len: u16 = @truncate(str.len * @sizeOf(u16)); - const name = UNICODE_STRING{ + const name: UNICODE_STRING = .{ .Length = len, .MaximumLength = len, .Buffer = @ptrCast(@constCast(str)), }; - const attrs = OBJECT_ATTRIBUTES{ + const attrs: OBJECT_ATTRIBUTES = .{ .ObjectName = @constCast(&name), .Length = @sizeOf(OBJECT_ATTRIBUTES), .RootDirectory = null, - .Attributes = 0, + .Attributes = .{}, .SecurityDescriptor = null, .SecurityQualityOfService = null, }; @@ -219,14 +2470,17 @@ pub fn CreatePipe(rd: *HANDLE, wr: *HANDLE, sattr: *const SECURITY_ATTRIBUTES) C var handle: HANDLE = undefined; switch (ntdll.NtCreateFile( &handle, - GENERIC_READ | SYNCHRONIZE, + .{ + .STANDARD = .{ .SYNCHRONIZE = true }, + .GENERIC = .{ .READ = true }, + }, @constCast(&attrs), &iosb, null, - 0, - FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, - FILE_OPEN, - FILE_SYNCHRONOUS_IO_NONALERT, + .{}, + .VALID_FLAGS, + .OPEN, + .{ .IO = .SYNCHRONOUS_NONALERT }, null, 0, )) { @@ -242,16 +2496,15 @@ pub fn CreatePipe(rd: *HANDLE, wr: *HANDLE, sattr: *const SECURITY_ATTRIBUTES) C } else break :blk handle; }; - const name = UNICODE_STRING{ .Buffer = null, .Length = 0, .MaximumLength = 0 }; - var attrs = OBJECT_ATTRIBUTES{ + const name: UNICODE_STRING = .{ .Buffer = null, .Length = 0, .MaximumLength = 0 }; + var attrs: OBJECT_ATTRIBUTES = .{ .ObjectName = @constCast(&name), .Length = @sizeOf(OBJECT_ATTRIBUTES), .RootDirectory = dev_handle, - .Attributes = OBJ_CASE_INSENSITIVE, + .Attributes = .{ .INHERIT = sattr.bInheritHandle != FALSE }, .SecurityDescriptor = sattr.lpSecurityDescriptor, .SecurityQualityOfService = null, }; - if (sattr.bInheritHandle != 0) attrs.Attributes |= OBJ_INHERIT; // 120 second relative timeout in 100ns units. const default_timeout: LARGE_INTEGER = (-120 * std.time.ns_per_s) / 100; @@ -259,15 +2512,21 @@ pub fn CreatePipe(rd: *HANDLE, wr: *HANDLE, sattr: *const SECURITY_ATTRIBUTES) C var read: HANDLE = undefined; switch (ntdll.NtCreateNamedPipeFile( &read, - GENERIC_READ | FILE_WRITE_ATTRIBUTES | SYNCHRONIZE, + .{ + .SPECIFIC = .{ .FILE_PIPE = .{ + .WRITE_ATTRIBUTES = true, + } }, + .STANDARD = .{ .SYNCHRONIZE = true }, + .GENERIC = .{ .READ = true }, + }, &attrs, &iosb, - FILE_SHARE_READ | FILE_SHARE_WRITE, - FILE_CREATE, - FILE_SYNCHRONOUS_IO_NONALERT, - FILE_PIPE_BYTE_STREAM_TYPE, - FILE_PIPE_BYTE_STREAM_MODE, - FILE_PIPE_QUEUE_OPERATION, + .{ .READ = true, .WRITE = true }, + .CREATE, + .{ .IO = .SYNCHRONOUS_NONALERT }, + .{ .TYPE = .BYTE_STREAM }, + .{ .MODE = .BYTE_STREAM }, + .{ .OPERATION = .QUEUE }, 1, 4096, 4096, @@ -285,14 +2544,23 @@ pub fn CreatePipe(rd: *HANDLE, wr: *HANDLE, sattr: *const SECURITY_ATTRIBUTES) C var write: HANDLE = undefined; switch (ntdll.NtCreateFile( &write, - GENERIC_WRITE | SYNCHRONIZE | FILE_READ_ATTRIBUTES, + .{ + .SPECIFIC = .{ .FILE_PIPE = .{ + .READ_ATTRIBUTES = true, + } }, + .STANDARD = .{ .SYNCHRONIZE = true }, + .GENERIC = .{ .WRITE = true }, + }, &attrs, &iosb, null, - 0, - FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, - FILE_OPEN, - FILE_SYNCHRONOUS_IO_NONALERT | FILE_NON_DIRECTORY_FILE, + .{}, + .VALID_FLAGS, + .OPEN, + .{ + .IO = .SYNCHRONOUS_NONALERT, + .NON_DIRECTORY_FILE = true, + }, null, 0, )) { @@ -306,71 +2574,49 @@ pub fn CreatePipe(rd: *HANDLE, wr: *HANDLE, sattr: *const SECURITY_ATTRIBUTES) C wr.* = write; } -pub const DeviceIoControlError = error{ - AccessDenied, - /// The volume does not contain a recognized file system. File system - /// drivers might not be loaded, or the volume may be corrupt. - UnrecognizedVolume, - Unexpected, -}; - /// A Zig wrapper around `NtDeviceIoControlFile` and `NtFsControlFile` syscalls. /// It implements similar behavior to `DeviceIoControl` and is meant to serve /// as a direct substitute for that call. /// TODO work out if we need to expose other arguments to the underlying syscalls. pub fn DeviceIoControl( - h: HANDLE, - ioControlCode: ULONG, - in: ?[]const u8, - out: ?[]u8, -) DeviceIoControlError!void { - // Logic from: https://doxygen.reactos.org/d3/d74/deviceio_8c.html - const is_fsctl = (ioControlCode >> 16) == FILE_DEVICE_FILE_SYSTEM; - - var io: IO_STATUS_BLOCK = undefined; - const in_ptr = if (in) |i| i.ptr else null; - const in_len = if (in) |i| @as(ULONG, @intCast(i.len)) else 0; - const out_ptr = if (out) |o| o.ptr else null; - const out_len = if (out) |o| @as(ULONG, @intCast(o.len)) else 0; - - const rc = blk: { - if (is_fsctl) { - break :blk ntdll.NtFsControlFile( - h, - null, - null, - null, - &io, - ioControlCode, - in_ptr, - in_len, - out_ptr, - out_len, - ); - } else { - break :blk ntdll.NtDeviceIoControlFile( - h, - null, - null, - null, - &io, - ioControlCode, - in_ptr, - in_len, - out_ptr, - out_len, - ); - } + device: HANDLE, + io_control_code: CTL_CODE, + opts: struct { + event: ?HANDLE = null, + apc_routine: ?*const IO_APC_ROUTINE = null, + apc_context: ?*anyopaque = null, + io_status_block: ?*IO_STATUS_BLOCK = null, + in: []const u8 = &.{}, + out: []u8 = &.{}, + }, +) NTSTATUS { + var io_status_block: IO_STATUS_BLOCK = undefined; + return switch (io_control_code.DeviceType) { + .FILE_SYSTEM, .NAMED_PIPE => ntdll.NtFsControlFile( + device, + opts.event, + opts.apc_routine, + opts.apc_context, + opts.io_status_block orelse &io_status_block, + io_control_code, + if (opts.in.len > 0) opts.in.ptr else null, + @intCast(opts.in.len), + if (opts.out.len > 0) opts.out.ptr else null, + @intCast(opts.out.len), + ), + else => ntdll.NtDeviceIoControlFile( + device, + opts.event, + opts.apc_routine, + opts.apc_context, + opts.io_status_block orelse &io_status_block, + io_control_code, + if (opts.in.len > 0) opts.in.ptr else null, + @intCast(opts.in.len), + if (opts.out.len > 0) opts.out.ptr else null, + @intCast(opts.out.len), + ), }; - switch (rc) { - .SUCCESS => {}, - .PRIVILEGE_NOT_HELD => return error.AccessDenied, - .ACCESS_DENIED => return error.AccessDenied, - .INVALID_DEVICE_REQUEST => return error.AccessDenied, // Not supported by the underlying filesystem - .INVALID_PARAMETER => unreachable, - .UNRECOGNIZED_VOLUME => return error.UnrecognizedVolume, - else => return unexpectedStatus(rc), - } } pub fn GetOverlappedResult(h: HANDLE, overlapped: *OVERLAPPED, wait: bool) !DWORD { @@ -704,7 +2950,7 @@ pub const SetCurrentDirectoryError = error{ pub fn SetCurrentDirectory(path_name: []const u16) SetCurrentDirectoryError!void { const path_len_bytes = math.cast(u16, path_name.len * 2) orelse return error.NameTooLong; - var nt_name = UNICODE_STRING{ + var nt_name: UNICODE_STRING = .{ .Length = path_len_bytes, .MaximumLength = path_len_bytes, .Buffer = @constCast(path_name.ptr), @@ -761,9 +3007,6 @@ pub const CreateSymbolicLinkError = error{ NoDevice, NetworkNotFound, BadPathName, - /// The volume does not contain a recognized file system. File system - /// drivers might not be loaded, or the volume may be corrupt. - UnrecognizedVolume, Unexpected, }; @@ -780,7 +3023,7 @@ pub fn CreateSymbolicLink( is_directory: bool, ) CreateSymbolicLinkError!void { const SYMLINK_DATA = extern struct { - ReparseTag: ULONG, + ReparseTag: IO_REPARSE_TAG, ReparseDataLength: USHORT, Reserved: USHORT, SubstituteNameOffset: USHORT, @@ -791,9 +3034,12 @@ pub fn CreateSymbolicLink( }; const symlink_handle = OpenFile(sym_link_path, .{ - .access_mask = SYNCHRONIZE | GENERIC_READ | GENERIC_WRITE, + .access_mask = .{ + .STANDARD = .{ .SYNCHRONIZE = true }, + .GENERIC = .{ .WRITE = true, .READ = true }, + }, .dir = dir, - .creation = FILE_CREATE, + .creation = .CREATE, .filter = if (is_directory) .dir_only else .file_only, }) catch |err| switch (err) { error.IsDir => return error.PathAlreadyExists, @@ -845,8 +3091,8 @@ pub fn CreateSymbolicLink( const buf_len = @sizeOf(SYMLINK_DATA) + final_target_path.len * 4; const header_len = @sizeOf(ULONG) + @sizeOf(USHORT) * 2; const target_is_absolute = std.fs.path.isAbsoluteWindowsWtf16(final_target_path); - const symlink_data = SYMLINK_DATA{ - .ReparseTag = IO_REPARSE_TAG_SYMLINK, + const symlink_data: SYMLINK_DATA = .{ + .ReparseTag = .SYMLINK, .ReparseDataLength = @intCast(buf_len - header_len), .Reserved = 0, .SubstituteNameOffset = @intCast(final_target_path.len * 2), @@ -860,7 +3106,14 @@ pub fn CreateSymbolicLink( @memcpy(buffer[@sizeOf(SYMLINK_DATA)..][0 .. final_target_path.len * 2], @as([*]const u8, @ptrCast(final_target_path))); const paths_start = @sizeOf(SYMLINK_DATA) + final_target_path.len * 2; @memcpy(buffer[paths_start..][0 .. final_target_path.len * 2], @as([*]const u8, @ptrCast(final_target_path))); - _ = try DeviceIoControl(symlink_handle, FSCTL_SET_REPARSE_POINT, buffer[0..buf_len], null); + const rc = DeviceIoControl(symlink_handle, FSCTL.SET_REPARSE_POINT, .{ .in = buffer[0..buf_len] }); + switch (rc) { + .SUCCESS => {}, + .PRIVILEGE_NOT_HELD => return error.AccessDenied, + .ACCESS_DENIED => return error.AccessDenied, + .INVALID_DEVICE_REQUEST => return error.AccessDenied, // Not supported by the underlying filesystem + else => return unexpectedStatus(rc), + } } pub const ReadLinkError = error{ @@ -872,15 +3125,21 @@ pub const ReadLinkError = error{ BadPathName, AntivirusInterference, UnsupportedReparsePointType, + NotLink, }; /// `sub_path_w` will never be accessed after `out_buffer` has been written to, so it /// is safe to reuse a single buffer for both. pub fn ReadLink(dir: ?HANDLE, sub_path_w: []const u16, out_buffer: []u16) ReadLinkError![]u16 { const result_handle = OpenFile(sub_path_w, .{ - .access_mask = FILE_READ_ATTRIBUTES | SYNCHRONIZE, + .access_mask = .{ + .SPECIFIC = .{ .FILE = .{ + .READ_ATTRIBUTES = true, + } }, + .STANDARD = .{ .SYNCHRONIZE = true }, + }, .dir = dir, - .creation = FILE_OPEN, + .creation = .OPEN, .follow_symlinks = false, .filter = .any, }) catch |err| switch (err) { @@ -894,15 +3153,17 @@ pub fn ReadLink(dir: ?HANDLE, sub_path_w: []const u16, out_buffer: []u16) ReadLi defer CloseHandle(result_handle); var reparse_buf: [MAXIMUM_REPARSE_DATA_BUFFER_SIZE]u8 align(@alignOf(REPARSE_DATA_BUFFER)) = undefined; - _ = DeviceIoControl(result_handle, FSCTL_GET_REPARSE_POINT, null, reparse_buf[0..]) catch |err| switch (err) { - error.AccessDenied => return error.Unexpected, - error.UnrecognizedVolume => return error.Unexpected, - else => |e| return e, - }; + const rc = DeviceIoControl(result_handle, FSCTL.GET_REPARSE_POINT, .{ .out = reparse_buf[0..] }); + switch (rc) { + .SUCCESS => {}, + .NOT_A_REPARSE_POINT => return error.NotLink, + else => return unexpectedStatus(rc), + } const reparse_struct: *const REPARSE_DATA_BUFFER = @ptrCast(@alignCast(&reparse_buf[0])); - switch (reparse_struct.ReparseTag) { - IO_REPARSE_TAG_SYMLINK => { + const IoReparseTagInt = @typeInfo(IO_REPARSE_TAG).@"struct".backing_integer.?; + switch (@as(IoReparseTagInt, @bitCast(reparse_struct.ReparseTag))) { + @as(IoReparseTagInt, @bitCast(IO_REPARSE_TAG.SYMLINK)) => { const buf: *const SYMBOLIC_LINK_REPARSE_BUFFER = @ptrCast(@alignCast(&reparse_struct.DataBuffer[0])); const offset = buf.SubstituteNameOffset >> 1; const len = buf.SubstituteNameLength >> 1; @@ -910,16 +3171,14 @@ pub fn ReadLink(dir: ?HANDLE, sub_path_w: []const u16, out_buffer: []u16) ReadLi const is_relative = buf.Flags & SYMLINK_FLAG_RELATIVE != 0; return parseReadLinkPath(path_buf[offset..][0..len], is_relative, out_buffer); }, - IO_REPARSE_TAG_MOUNT_POINT => { + @as(IoReparseTagInt, @bitCast(IO_REPARSE_TAG.MOUNT_POINT)) => { const buf: *const MOUNT_POINT_REPARSE_BUFFER = @ptrCast(@alignCast(&reparse_struct.DataBuffer[0])); const offset = buf.SubstituteNameOffset >> 1; const len = buf.SubstituteNameLength >> 1; const path_buf = @as([*]const u16, &buf.PathBuffer); return parseReadLinkPath(path_buf[offset..][0..len], false, out_buffer); }, - else => { - return error.UnsupportedReparsePointType; - }, + else => return error.UnsupportedReparsePointType, } } @@ -956,13 +3215,8 @@ pub const DeleteFileOptions = struct { }; pub fn DeleteFile(sub_path_w: []const u16, options: DeleteFileOptions) DeleteFileError!void { - const create_options_flags: ULONG = if (options.remove_dir) - FILE_DIRECTORY_FILE | FILE_OPEN_REPARSE_POINT - else - FILE_NON_DIRECTORY_FILE | FILE_OPEN_REPARSE_POINT; // would we ever want to delete the target instead? - const path_len_bytes = @as(u16, @intCast(sub_path_w.len * 2)); - var nt_name = UNICODE_STRING{ + var nt_name: UNICODE_STRING = .{ .Length = path_len_bytes, .MaximumLength = path_len_bytes, // The Windows API makes this mutable, but it will not mutate here. @@ -978,26 +3232,32 @@ pub fn DeleteFile(sub_path_w: []const u16, options: DeleteFileOptions) DeleteFil return error.FileBusy; } - var attr = OBJECT_ATTRIBUTES{ - .Length = @sizeOf(OBJECT_ATTRIBUTES), - .RootDirectory = if (std.fs.path.isAbsoluteWindowsWtf16(sub_path_w)) null else options.dir, - .Attributes = 0, // Note we do not use OBJ_CASE_INSENSITIVE here. - .ObjectName = &nt_name, - .SecurityDescriptor = null, - .SecurityQualityOfService = null, - }; var io: IO_STATUS_BLOCK = undefined; var tmp_handle: HANDLE = undefined; var rc = ntdll.NtCreateFile( &tmp_handle, - SYNCHRONIZE | DELETE, - &attr, + .{ .STANDARD = .{ + .RIGHTS = .{ .DELETE = true }, + .SYNCHRONIZE = true, + } }, + &.{ + .Length = @sizeOf(OBJECT_ATTRIBUTES), + .RootDirectory = if (std.fs.path.isAbsoluteWindowsWtf16(sub_path_w)) null else options.dir, + .Attributes = .{}, + .ObjectName = &nt_name, + .SecurityDescriptor = null, + .SecurityQualityOfService = null, + }, &io, null, - 0, - FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, - FILE_OPEN, - create_options_flags, + .{}, + .VALID_FLAGS, + .OPEN, + .{ + .DIRECTORY_FILE = options.remove_dir, + .NON_DIRECTORY_FILE = !options.remove_dir, + .OPEN_REPARSE_POINT = true, // would we ever want to delete the target instead? + }, null, 0, ); @@ -1031,18 +3291,17 @@ pub fn DeleteFile(sub_path_w: []const u16, options: DeleteFileOptions) DeleteFil // FileDispositionInformation if the return value lets us know that some aspect of it is not supported. const need_fallback = need_fallback: { // Deletion with posix semantics if the filesystem supports it. - var info = FILE_DISPOSITION_INFORMATION_EX{ - .Flags = FILE_DISPOSITION_DELETE | - FILE_DISPOSITION_POSIX_SEMANTICS | - FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE, - }; - + const info: FILE.DISPOSITION.INFORMATION.EX = .{ .Flags = .{ + .DELETE = true, + .POSIX_SEMANTICS = true, + .IGNORE_READONLY_ATTRIBUTE = true, + } }; rc = ntdll.NtSetInformationFile( tmp_handle, &io, &info, - @sizeOf(FILE_DISPOSITION_INFORMATION_EX), - .FileDispositionInformationEx, + @sizeOf(FILE.DISPOSITION.INFORMATION.EX), + .DispositionEx, ); switch (rc) { .SUCCESS => return, @@ -1061,16 +3320,15 @@ pub fn DeleteFile(sub_path_w: []const u16, options: DeleteFileOptions) DeleteFil if (need_fallback) { // Deletion with file pending semantics, which requires waiting or moving // files to get them removed (from here). - var file_dispo = FILE_DISPOSITION_INFORMATION{ + const file_dispo: FILE.DISPOSITION.INFORMATION = .{ .DeleteFile = TRUE, }; - rc = ntdll.NtSetInformationFile( tmp_handle, &io, &file_dispo, - @sizeOf(FILE_DISPOSITION_INFORMATION), - .FileDispositionInformation, + @sizeOf(FILE.DISPOSITION.INFORMATION), + .Disposition, ); } switch (rc) { @@ -1112,8 +3370,14 @@ pub fn RenameFile( ) RenameError!void { const src_fd = OpenFile(old_path_w, .{ .dir = old_dir_fd, - .access_mask = SYNCHRONIZE | GENERIC_WRITE | DELETE, - .creation = FILE_OPEN, + .access_mask = .{ + .STANDARD = .{ + .RIGHTS = .{ .DELETE = true }, + .SYNCHRONIZE = true, + }, + .GENERIC = .{ .WRITE = true }, + }, + .creation = .OPEN, .filter = .any, // This function is supposed to rename both files and directories. .follow_symlinks = false, }) catch |err| switch (err) { @@ -1135,29 +3399,23 @@ pub fn RenameFile( // The strategy here is just to try using FileRenameInformationEx and fall back to // FileRenameInformation if the return value lets us know that some aspect of it is not supported. const need_fallback = need_fallback: { - const struct_buf_len = @sizeOf(FILE_RENAME_INFORMATION_EX) + (PATH_MAX_WIDE * 2); - var rename_info_buf: [struct_buf_len]u8 align(@alignOf(FILE_RENAME_INFORMATION_EX)) = undefined; - const struct_len = @sizeOf(FILE_RENAME_INFORMATION_EX) + new_path_w.len * 2; - if (struct_len > struct_buf_len) return error.NameTooLong; - - const rename_info: *FILE_RENAME_INFORMATION_EX = @ptrCast(&rename_info_buf); - var io_status_block: IO_STATUS_BLOCK = undefined; - - var flags: ULONG = FILE_RENAME_POSIX_SEMANTICS | FILE_RENAME_IGNORE_READONLY_ATTRIBUTE; - if (replace_if_exists) flags |= FILE_RENAME_REPLACE_IF_EXISTS; - rename_info.* = .{ - .Flags = flags, + const rename_info: FILE.RENAME_INFORMATION = .init(.{ + .Flags = .{ + .REPLACE_IF_EXISTS = replace_if_exists, + .POSIX_SEMANTICS = true, + .IGNORE_READONLY_ATTRIBUTE = true, + }, .RootDirectory = if (std.fs.path.isAbsoluteWindowsWtf16(new_path_w)) null else new_dir_fd, - .FileNameLength = @intCast(new_path_w.len * 2), // already checked error.NameTooLong - .FileName = undefined, - }; - @memcpy((&rename_info.FileName).ptr, new_path_w); + .FileName = new_path_w, + }); + var io_status_block: IO_STATUS_BLOCK = undefined; + const rename_info_buf = rename_info.toBuffer(); rc = ntdll.NtSetInformationFile( src_fd, &io_status_block, - rename_info, - @intCast(struct_len), // already checked for error.NameTooLong - .FileRenameInformationEx, + rename_info_buf.ptr, + @intCast(rename_info_buf.len), // already checked for error.NameTooLong + .RenameEx, ); switch (rc) { .SUCCESS => return, @@ -1174,28 +3432,19 @@ pub fn RenameFile( }; if (need_fallback) { - const struct_buf_len = @sizeOf(FILE_RENAME_INFORMATION) + (PATH_MAX_WIDE * 2); - var rename_info_buf: [struct_buf_len]u8 align(@alignOf(FILE_RENAME_INFORMATION)) = undefined; - const struct_len = @sizeOf(FILE_RENAME_INFORMATION) + new_path_w.len * 2; - if (struct_len > struct_buf_len) return error.NameTooLong; - - const rename_info: *FILE_RENAME_INFORMATION = @ptrCast(&rename_info_buf); - var io_status_block: IO_STATUS_BLOCK = undefined; - - rename_info.* = .{ - .Flags = @intFromBool(replace_if_exists), + const rename_info: FILE.RENAME_INFORMATION = .init(.{ + .Flags = .{ .REPLACE_IF_EXISTS = replace_if_exists }, .RootDirectory = if (std.fs.path.isAbsoluteWindowsWtf16(new_path_w)) null else new_dir_fd, - .FileNameLength = @intCast(new_path_w.len * 2), // already checked error.NameTooLong - .FileName = undefined, - }; - @memcpy((&rename_info.FileName).ptr, new_path_w); - + .FileName = new_path_w, + }); + var io_status_block: IO_STATUS_BLOCK = undefined; + const rename_info_buf = rename_info.toBuffer(); rc = ntdll.NtSetInformationFile( src_fd, &io_status_block, - rename_info, - @intCast(struct_len), // already checked for error.NameTooLong - .FileRenameInformation, + rename_info_buf.ptr, + @intCast(rename_info_buf.len), // already checked for error.NameTooLong + .Rename, ); } @@ -1308,7 +3557,7 @@ pub fn QueryObjectName(handle: HANDLE, out_buffer: []u16) QueryObjectNameError![ const info = @as(*OBJECT_NAME_INFORMATION, @ptrCast(out_buffer_aligned)); // buffer size is specified in bytes - const out_buffer_len = std.math.cast(ULONG, out_buffer_aligned.len * 2) orelse std.math.maxInt(ULONG); + const out_buffer_len = std.math.cast(ULONG, out_buffer_aligned.len * 2) orelse maxInt(ULONG); // last argument would return the length required for full_buffer, not exposed here return switch (ntdll.NtQueryObject(handle, .ObjectNameInformation, info, out_buffer_len, null)) { .SUCCESS => blk: { @@ -1440,9 +3689,8 @@ pub fn GetFinalPathNameByHandle( // This is the NT namespaced version of \\.\MountPointManager const mgmt_path_u16 = std.unicode.utf8ToUtf16LeStringLiteral("\\??\\MountPointManager"); const mgmt_handle = OpenFile(mgmt_path_u16, .{ - .access_mask = SYNCHRONIZE, - .share_access = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, - .creation = FILE_OPEN, + .access_mask = .{ .STANDARD = .{ .SYNCHRONIZE = true } }, + .creation = .OPEN, }) catch |err| switch (err) { error.IsDir => return error.Unexpected, error.NotDir => return error.Unexpected, @@ -1462,10 +3710,14 @@ pub fn GetFinalPathNameByHandle( input_struct.DeviceNameLength = @intCast(volume_name_u16.len * 2); @memcpy(input_buf[@sizeOf(MOUNTMGR_MOUNT_POINT)..][0 .. volume_name_u16.len * 2], @as([*]const u8, @ptrCast(volume_name_u16.ptr))); - DeviceIoControl(mgmt_handle, IOCTL_MOUNTMGR_QUERY_POINTS, &input_buf, &output_buf) catch |err| switch (err) { - error.AccessDenied => return error.Unexpected, - else => |e| return e, - }; + { + const rc = DeviceIoControl(mgmt_handle, IOCTL.MOUNTMGR.QUERY_POINTS, .{ .in = &input_buf, .out = &output_buf }); + switch (rc) { + .SUCCESS => {}, + .OBJECT_NAME_NOT_FOUND => return error.FileNotFound, + else => return unexpectedStatus(rc), + } + } const mount_points_struct: *const MOUNTMGR_MOUNT_POINTS = @ptrCast(&output_buf[0]); const mount_points = @as( @@ -1517,10 +3769,12 @@ pub fn GetFinalPathNameByHandle( vol_input_struct.DeviceNameLength = @intCast(symlink.len * 2); @memcpy(@as([*]WCHAR, &vol_input_struct.DeviceName)[0..symlink.len], symlink); - DeviceIoControl(mgmt_handle, IOCTL_MOUNTMGR_QUERY_DOS_VOLUME_PATH, &vol_input_buf, &vol_output_buf) catch |err| switch (err) { - error.AccessDenied => return error.Unexpected, - else => |e| return e, - }; + const rc = DeviceIoControl(mgmt_handle, IOCTL.MOUNTMGR.QUERY_DOS_VOLUME_PATH, .{ .in = &vol_input_buf, .out = &vol_output_buf }); + switch (rc) { + .SUCCESS => {}, + .UNRECOGNIZED_VOLUME => return error.UnrecognizedVolume, + else => return unexpectedStatus(rc), + } const volume_paths_struct: *const MOUNTMGR_VOLUME_PATHS = @ptrCast(&vol_output_buf[0]); const volume_path = std.mem.sliceTo(@as( [*]const u16, @@ -1758,7 +4012,7 @@ pub fn VirtualProtect(lpAddress: ?LPVOID, dwSize: SIZE_T, flNewProtect: DWORD, l // ntdll takes an extra level of indirection here var addr = lpAddress; var size = dwSize; - switch (ntdll.NtProtectVirtualMemory(self_process_handle, &addr, &size, flNewProtect, lpflOldProtect)) { + switch (ntdll.NtProtectVirtualMemory(GetCurrentProcess(), &addr, &size, flNewProtect, lpflOldProtect)) { .SUCCESS => {}, .INVALID_ADDRESS => return error.InvalidAddress, else => |st| return unexpectedStatus(st), @@ -2018,7 +4272,7 @@ pub const LockFileError = error{ pub fn LockFile( FileHandle: HANDLE, Event: ?HANDLE, - ApcRoutine: ?*IO_APC_ROUTINE, + ApcRoutine: ?*const IO_APC_ROUTINE, ApcContext: ?*anyopaque, IoStatusBlock: *IO_STATUS_BLOCK, ByteOffset: *const LARGE_INTEGER, @@ -2057,7 +4311,7 @@ pub fn UnlockFile( IoStatusBlock: *IO_STATUS_BLOCK, ByteOffset: *const LARGE_INTEGER, Length: *const LARGE_INTEGER, - Key: ?*ULONG, + Key: ULONG, ) !void { const rc = ntdll.NtUnlockFile(FileHandle, IoStatusBlock, ByteOffset, Length, Key); switch (rc) { @@ -2168,13 +4422,13 @@ pub fn eqlIgnoreCaseWtf16(a: []const u16, b: []const u16) bool { // Use RtlEqualUnicodeString on Windows when not in comptime to avoid including a // redundant copy of the uppercase data. const a_bytes = @as(u16, @intCast(a.len * 2)); - const a_string = UNICODE_STRING{ + const a_string: UNICODE_STRING = .{ .Length = a_bytes, .MaximumLength = a_bytes, .Buffer = @constCast(a.ptr), }; const b_bytes = @as(u16, @intCast(b.len * 2)); - const b_string = UNICODE_STRING{ + const b_string: UNICODE_STRING = .{ .Length = b_bytes, .MaximumLength = b_bytes, .Buffer = @constCast(b.ptr), @@ -2206,7 +4460,7 @@ pub fn eqlIgnoreCaseWtf8(a: []const u8, b: []const u8) bool { const a_cp = a_wtf8_it.nextCodepoint() orelse break; const b_cp = b_wtf8_it.nextCodepoint() orelse return false; - if (a_cp <= std.math.maxInt(u16) and b_cp <= std.math.maxInt(u16)) { + if (a_cp <= maxInt(u16) and b_cp <= maxInt(u16)) { if (a_cp != b_cp and upcaseImpl(@intCast(a_cp)) != upcaseImpl(@intCast(b_cp))) { return false; } @@ -2783,7 +5037,10 @@ pub fn unexpectedWSAError(err: ws2_32.WinsockError) UnexpectedError { /// and you get an unexpected status. pub fn unexpectedStatus(status: NTSTATUS) UnexpectedError { if (std.posix.unexpected_error_tracing) { - std.debug.print("error.Unexpected NTSTATUS=0x{x}\n", .{@intFromEnum(status)}); + std.debug.print("error.Unexpected NTSTATUS=0x{x} ({s})\n", .{ + @intFromEnum(status), + std.enums.tagName(NTSTATUS, status) orelse "<unnamed>", + }); std.debug.dumpCurrentStackTrace(.{ .first_address = @returnAddress() }); } return error.Unexpected; @@ -2791,20 +5048,25 @@ pub fn unexpectedStatus(status: NTSTATUS) UnexpectedError { pub fn statusBug(status: NTSTATUS) UnexpectedError { switch (builtin.mode) { - .Debug => std.debug.panic("programmer bug caused syscall status: {t}", .{status}), + .Debug => std.debug.panic("programmer bug caused syscall status: 0x{x} ({s})", .{ + @intFromEnum(status), + std.enums.tagName(NTSTATUS, status) orelse "<unnamed>", + }), else => return error.Unexpected, } } pub fn errorBug(err: Win32Error) UnexpectedError { switch (builtin.mode) { - .Debug => std.debug.panic("programmer bug caused syscall status: {t}", .{err}), + .Debug => std.debug.panic("programmer bug caused syscall error: 0x{x} ({s})", .{ + @intFromEnum(err), + std.enums.tagName(Win32Error, err) orelse "<unnamed>", + }), else => return error.Unexpected, } } pub const Win32Error = @import("windows/win32error.zig").Win32Error; -pub const NTSTATUS = @import("windows/ntstatus.zig").NTSTATUS; pub const LANG = @import("windows/lang.zig"); pub const SUBLANG = @import("windows/sublang.zig"); @@ -2885,217 +5147,9 @@ pub const PCTSTR = @compileError("Deprecated: choose between `PCSTR` or `PCWSTR` pub const TRUE = 1; pub const FALSE = 0; -pub const DEVICE_TYPE = ULONG; -pub const FILE_DEVICE_BEEP: DEVICE_TYPE = 0x0001; -pub const FILE_DEVICE_CD_ROM: DEVICE_TYPE = 0x0002; -pub const FILE_DEVICE_CD_ROM_FILE_SYSTEM: DEVICE_TYPE = 0x0003; -pub const FILE_DEVICE_CONTROLLER: DEVICE_TYPE = 0x0004; -pub const FILE_DEVICE_DATALINK: DEVICE_TYPE = 0x0005; -pub const FILE_DEVICE_DFS: DEVICE_TYPE = 0x0006; -pub const FILE_DEVICE_DISK: DEVICE_TYPE = 0x0007; -pub const FILE_DEVICE_DISK_FILE_SYSTEM: DEVICE_TYPE = 0x0008; -pub const FILE_DEVICE_FILE_SYSTEM: DEVICE_TYPE = 0x0009; -pub const FILE_DEVICE_INPORT_PORT: DEVICE_TYPE = 0x000a; -pub const FILE_DEVICE_KEYBOARD: DEVICE_TYPE = 0x000b; -pub const FILE_DEVICE_MAILSLOT: DEVICE_TYPE = 0x000c; -pub const FILE_DEVICE_MIDI_IN: DEVICE_TYPE = 0x000d; -pub const FILE_DEVICE_MIDI_OUT: DEVICE_TYPE = 0x000e; -pub const FILE_DEVICE_MOUSE: DEVICE_TYPE = 0x000f; -pub const FILE_DEVICE_MULTI_UNC_PROVIDER: DEVICE_TYPE = 0x0010; -pub const FILE_DEVICE_NAMED_PIPE: DEVICE_TYPE = 0x0011; -pub const FILE_DEVICE_NETWORK: DEVICE_TYPE = 0x0012; -pub const FILE_DEVICE_NETWORK_BROWSER: DEVICE_TYPE = 0x0013; -pub const FILE_DEVICE_NETWORK_FILE_SYSTEM: DEVICE_TYPE = 0x0014; -pub const FILE_DEVICE_NULL: DEVICE_TYPE = 0x0015; -pub const FILE_DEVICE_PARALLEL_PORT: DEVICE_TYPE = 0x0016; -pub const FILE_DEVICE_PHYSICAL_NETCARD: DEVICE_TYPE = 0x0017; -pub const FILE_DEVICE_PRINTER: DEVICE_TYPE = 0x0018; -pub const FILE_DEVICE_SCANNER: DEVICE_TYPE = 0x0019; -pub const FILE_DEVICE_SERIAL_MOUSE_PORT: DEVICE_TYPE = 0x001a; -pub const FILE_DEVICE_SERIAL_PORT: DEVICE_TYPE = 0x001b; -pub const FILE_DEVICE_SCREEN: DEVICE_TYPE = 0x001c; -pub const FILE_DEVICE_SOUND: DEVICE_TYPE = 0x001d; -pub const FILE_DEVICE_STREAMS: DEVICE_TYPE = 0x001e; -pub const FILE_DEVICE_TAPE: DEVICE_TYPE = 0x001f; -pub const FILE_DEVICE_TAPE_FILE_SYSTEM: DEVICE_TYPE = 0x0020; -pub const FILE_DEVICE_TRANSPORT: DEVICE_TYPE = 0x0021; -pub const FILE_DEVICE_UNKNOWN: DEVICE_TYPE = 0x0022; -pub const FILE_DEVICE_VIDEO: DEVICE_TYPE = 0x0023; -pub const FILE_DEVICE_VIRTUAL_DISK: DEVICE_TYPE = 0x0024; -pub const FILE_DEVICE_WAVE_IN: DEVICE_TYPE = 0x0025; -pub const FILE_DEVICE_WAVE_OUT: DEVICE_TYPE = 0x0026; -pub const FILE_DEVICE_8042_PORT: DEVICE_TYPE = 0x0027; -pub const FILE_DEVICE_NETWORK_REDIRECTOR: DEVICE_TYPE = 0x0028; -pub const FILE_DEVICE_BATTERY: DEVICE_TYPE = 0x0029; -pub const FILE_DEVICE_BUS_EXTENDER: DEVICE_TYPE = 0x002a; -pub const FILE_DEVICE_MODEM: DEVICE_TYPE = 0x002b; -pub const FILE_DEVICE_VDM: DEVICE_TYPE = 0x002c; -pub const FILE_DEVICE_MASS_STORAGE: DEVICE_TYPE = 0x002d; -pub const FILE_DEVICE_SMB: DEVICE_TYPE = 0x002e; -pub const FILE_DEVICE_KS: DEVICE_TYPE = 0x002f; -pub const FILE_DEVICE_CHANGER: DEVICE_TYPE = 0x0030; -pub const FILE_DEVICE_SMARTCARD: DEVICE_TYPE = 0x0031; -pub const FILE_DEVICE_ACPI: DEVICE_TYPE = 0x0032; -pub const FILE_DEVICE_DVD: DEVICE_TYPE = 0x0033; -pub const FILE_DEVICE_FULLSCREEN_VIDEO: DEVICE_TYPE = 0x0034; -pub const FILE_DEVICE_DFS_FILE_SYSTEM: DEVICE_TYPE = 0x0035; -pub const FILE_DEVICE_DFS_VOLUME: DEVICE_TYPE = 0x0036; -pub const FILE_DEVICE_SERENUM: DEVICE_TYPE = 0x0037; -pub const FILE_DEVICE_TERMSRV: DEVICE_TYPE = 0x0038; -pub const FILE_DEVICE_KSEC: DEVICE_TYPE = 0x0039; -pub const FILE_DEVICE_FIPS: DEVICE_TYPE = 0x003a; -pub const FILE_DEVICE_INFINIBAND: DEVICE_TYPE = 0x003b; -// TODO: missing values? -pub const FILE_DEVICE_VMBUS: DEVICE_TYPE = 0x003e; -pub const FILE_DEVICE_CRYPT_PROVIDER: DEVICE_TYPE = 0x003f; -pub const FILE_DEVICE_WPD: DEVICE_TYPE = 0x0040; -pub const FILE_DEVICE_BLUETOOTH: DEVICE_TYPE = 0x0041; -pub const FILE_DEVICE_MT_COMPOSITE: DEVICE_TYPE = 0x0042; -pub const FILE_DEVICE_MT_TRANSPORT: DEVICE_TYPE = 0x0043; -pub const FILE_DEVICE_BIOMETRIC: DEVICE_TYPE = 0x0044; -pub const FILE_DEVICE_PMI: DEVICE_TYPE = 0x0045; -pub const FILE_DEVICE_EHSTOR: DEVICE_TYPE = 0x0046; -pub const FILE_DEVICE_DEVAPI: DEVICE_TYPE = 0x0047; -pub const FILE_DEVICE_GPIO: DEVICE_TYPE = 0x0048; -pub const FILE_DEVICE_USBEX: DEVICE_TYPE = 0x0049; -pub const FILE_DEVICE_CONSOLE: DEVICE_TYPE = 0x0050; -pub const FILE_DEVICE_NFP: DEVICE_TYPE = 0x0051; -pub const FILE_DEVICE_SYSENV: DEVICE_TYPE = 0x0052; -pub const FILE_DEVICE_VIRTUAL_BLOCK: DEVICE_TYPE = 0x0053; -pub const FILE_DEVICE_POINT_OF_SERVICE: DEVICE_TYPE = 0x0054; -pub const FILE_DEVICE_STORAGE_REPLICATION: DEVICE_TYPE = 0x0055; -pub const FILE_DEVICE_TRUST_ENV: DEVICE_TYPE = 0x0056; -pub const FILE_DEVICE_UCM: DEVICE_TYPE = 0x0057; -pub const FILE_DEVICE_UCMTCPCI: DEVICE_TYPE = 0x0058; -pub const FILE_DEVICE_PERSISTENT_MEMORY: DEVICE_TYPE = 0x0059; -pub const FILE_DEVICE_NVDIMM: DEVICE_TYPE = 0x005a; -pub const FILE_DEVICE_HOLOGRAPHIC: DEVICE_TYPE = 0x005b; -pub const FILE_DEVICE_SDFXHCI: DEVICE_TYPE = 0x005c; - -/// https://docs.microsoft.com/en-us/windows-hardware/drivers/kernel/buffer-descriptions-for-i-o-control-codes -pub const TransferType = enum(u2) { - METHOD_BUFFERED = 0, - METHOD_IN_DIRECT = 1, - METHOD_OUT_DIRECT = 2, - METHOD_NEITHER = 3, -}; - -pub const FILE_ANY_ACCESS = 0; -pub const FILE_READ_ACCESS = 1; -pub const FILE_WRITE_ACCESS = 2; - -/// https://docs.microsoft.com/en-us/windows-hardware/drivers/kernel/defining-i-o-control-codes -pub fn CTL_CODE(deviceType: u16, function: u12, method: TransferType, access: u2) DWORD { - return (@as(DWORD, deviceType) << 16) | - (@as(DWORD, access) << 14) | - (@as(DWORD, function) << 2) | - @intFromEnum(method); -} - -pub const INVALID_HANDLE_VALUE = @as(HANDLE, @ptrFromInt(maxInt(usize))); +pub const INVALID_HANDLE_VALUE: HANDLE = @ptrFromInt(maxInt(usize)); -pub const INVALID_FILE_ATTRIBUTES = @as(DWORD, maxInt(DWORD)); - -pub const FILE_ALL_INFORMATION = extern struct { - BasicInformation: FILE_BASIC_INFORMATION, - StandardInformation: FILE_STANDARD_INFORMATION, - InternalInformation: FILE_INTERNAL_INFORMATION, - EaInformation: FILE_EA_INFORMATION, - AccessInformation: FILE_ACCESS_INFORMATION, - PositionInformation: FILE_POSITION_INFORMATION, - ModeInformation: FILE_MODE_INFORMATION, - AlignmentInformation: FILE_ALIGNMENT_INFORMATION, - NameInformation: FILE_NAME_INFORMATION, -}; - -pub const FILE_BASIC_INFORMATION = extern struct { - CreationTime: LARGE_INTEGER, - LastAccessTime: LARGE_INTEGER, - LastWriteTime: LARGE_INTEGER, - ChangeTime: LARGE_INTEGER, - FileAttributes: ULONG, -}; - -pub const FILE_STANDARD_INFORMATION = extern struct { - AllocationSize: LARGE_INTEGER, - EndOfFile: LARGE_INTEGER, - NumberOfLinks: ULONG, - DeletePending: BOOLEAN, - Directory: BOOLEAN, -}; - -pub const FILE_INTERNAL_INFORMATION = extern struct { - IndexNumber: LARGE_INTEGER, -}; - -pub const FILE_EA_INFORMATION = extern struct { - EaSize: ULONG, -}; - -pub const FILE_ACCESS_INFORMATION = extern struct { - AccessFlags: ACCESS_MASK, -}; - -pub const FILE_POSITION_INFORMATION = extern struct { - CurrentByteOffset: LARGE_INTEGER, -}; - -pub const FILE_END_OF_FILE_INFORMATION = extern struct { - EndOfFile: LARGE_INTEGER, -}; - -pub const FILE_MODE_INFORMATION = extern struct { - Mode: ULONG, -}; - -pub const FILE_ALIGNMENT_INFORMATION = extern struct { - AlignmentRequirement: ULONG, -}; - -pub const FILE_NAME_INFORMATION = extern struct { - FileNameLength: ULONG, - FileName: [1]WCHAR, -}; - -pub const FILE_DISPOSITION_INFORMATION_EX = extern struct { - /// combination of FILE_DISPOSITION_* flags - Flags: ULONG, -}; - -pub const FILE_DISPOSITION_DO_NOT_DELETE: ULONG = 0x00000000; -pub const FILE_DISPOSITION_DELETE: ULONG = 0x00000001; -pub const FILE_DISPOSITION_POSIX_SEMANTICS: ULONG = 0x00000002; -pub const FILE_DISPOSITION_FORCE_IMAGE_SECTION_CHECK: ULONG = 0x00000004; -pub const FILE_DISPOSITION_ON_CLOSE: ULONG = 0x00000008; -pub const FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE: ULONG = 0x00000010; - -// FILE_RENAME_INFORMATION.Flags -pub const FILE_RENAME_REPLACE_IF_EXISTS = 0x00000001; -pub const FILE_RENAME_POSIX_SEMANTICS = 0x00000002; -pub const FILE_RENAME_SUPPRESS_PIN_STATE_INHERITANCE = 0x00000004; -pub const FILE_RENAME_SUPPRESS_STORAGE_RESERVE_INHERITANCE = 0x00000008; -pub const FILE_RENAME_NO_INCREASE_AVAILABLE_SPACE = 0x00000010; -pub const FILE_RENAME_NO_DECREASE_AVAILABLE_SPACE = 0x00000020; -pub const FILE_RENAME_PRESERVE_AVAILABLE_SPACE = 0x00000030; -pub const FILE_RENAME_IGNORE_READONLY_ATTRIBUTE = 0x00000040; -pub const FILE_RENAME_FORCE_RESIZE_TARGET_SR = 0x00000080; -pub const FILE_RENAME_FORCE_RESIZE_SOURCE_SR = 0x00000100; -pub const FILE_RENAME_FORCE_RESIZE_SR = 0x00000180; - -pub const FILE_RENAME_INFORMATION = extern struct { - Flags: BOOLEAN, - RootDirectory: ?HANDLE, - FileNameLength: ULONG, - FileName: [1]WCHAR, -}; - -// FileRenameInformationEx (since .win10_rs1) -pub const FILE_RENAME_INFORMATION_EX = extern struct { - Flags: ULONG, - RootDirectory: ?HANDLE, - FileNameLength: ULONG, - FileName: [1]WCHAR, -}; +pub const INVALID_FILE_ATTRIBUTES: DWORD = maxInt(DWORD); pub const IO_STATUS_BLOCK = extern struct { // "DUMMYUNIONNAME" expands to "u" @@ -3106,130 +5160,6 @@ pub const IO_STATUS_BLOCK = extern struct { Information: ULONG_PTR, }; -pub const FILE_INFORMATION_CLASS = enum(c_int) { - FileDirectoryInformation = 1, - FileFullDirectoryInformation, - FileBothDirectoryInformation, - FileBasicInformation, - FileStandardInformation, - FileInternalInformation, - FileEaInformation, - FileAccessInformation, - FileNameInformation, - FileRenameInformation, - FileLinkInformation, - FileNamesInformation, - FileDispositionInformation, - FilePositionInformation, - FileFullEaInformation, - FileModeInformation, - FileAlignmentInformation, - FileAllInformation, - FileAllocationInformation, - FileEndOfFileInformation, - FileAlternateNameInformation, - FileStreamInformation, - FilePipeInformation, - FilePipeLocalInformation, - FilePipeRemoteInformation, - FileMailslotQueryInformation, - FileMailslotSetInformation, - FileCompressionInformation, - FileObjectIdInformation, - FileCompletionInformation, - FileMoveClusterInformation, - FileQuotaInformation, - FileReparsePointInformation, - FileNetworkOpenInformation, - FileAttributeTagInformation, - FileTrackingInformation, - FileIdBothDirectoryInformation, - FileIdFullDirectoryInformation, - FileValidDataLengthInformation, - FileShortNameInformation, - FileIoCompletionNotificationInformation, - FileIoStatusBlockRangeInformation, - FileIoPriorityHintInformation, - FileSfioReserveInformation, - FileSfioVolumeInformation, - FileHardLinkInformation, - FileProcessIdsUsingFileInformation, - FileNormalizedNameInformation, - FileNetworkPhysicalNameInformation, - FileIdGlobalTxDirectoryInformation, - FileIsRemoteDeviceInformation, - FileUnusedInformation, - FileNumaNodeInformation, - FileStandardLinkInformation, - FileRemoteProtocolInformation, - FileRenameInformationBypassAccessCheck, - FileLinkInformationBypassAccessCheck, - FileVolumeNameInformation, - FileIdInformation, - FileIdExtdDirectoryInformation, - FileReplaceCompletionInformation, - FileHardLinkFullIdInformation, - FileIdExtdBothDirectoryInformation, - FileDispositionInformationEx, - FileRenameInformationEx, - FileRenameInformationExBypassAccessCheck, - FileDesiredStorageClassInformation, - FileStatInformation, - FileMemoryPartitionInformation, - FileStatLxInformation, - FileCaseSensitiveInformation, - FileLinkInformationEx, - FileLinkInformationExBypassAccessCheck, - FileStorageReserveIdInformation, - FileCaseSensitiveInformationForceAccessCheck, - FileMaximumInformation, -}; - -pub const FILE_ATTRIBUTE_TAG_INFO = extern struct { - FileAttributes: DWORD, - ReparseTag: DWORD, -}; - -/// "If this bit is set, the file or directory represents another named entity in the system." -/// https://learn.microsoft.com/en-us/windows/win32/fileio/reparse-point-tags -pub const reparse_tag_name_surrogate_bit = 0x20000000; - -pub const FILE_DISPOSITION_INFORMATION = extern struct { - DeleteFile: BOOLEAN, -}; - -pub const FILE_FS_DEVICE_INFORMATION = extern struct { - DeviceType: DEVICE_TYPE, - Characteristics: ULONG, -}; - -pub const FILE_FS_VOLUME_INFORMATION = extern struct { - VolumeCreationTime: LARGE_INTEGER, - VolumeSerialNumber: ULONG, - VolumeLabelLength: ULONG, - SupportsObjects: BOOLEAN, - // Flexible array member - VolumeLabel: [1]WCHAR, -}; - -pub const FS_INFORMATION_CLASS = enum(c_int) { - FileFsVolumeInformation = 1, - FileFsLabelInformation, - FileFsSizeInformation, - FileFsDeviceInformation, - FileFsAttributeInformation, - FileFsControlInformation, - FileFsFullSizeInformation, - FileFsObjectIdInformation, - FileFsDriverPathInformation, - FileFsVolumeFlagsInformation, - FileFsSectorSizeInformation, - FileFsDataCopyInformation, - FileFsMetadataSizeInformation, - FileFsFullSizeInformationEx, - FileFsMaximumInformation, -}; - pub const OVERLAPPED = extern struct { Internal: ULONG_PTR, InternalHigh: ULONG_PTR, @@ -3331,129 +5261,16 @@ pub const PIPE_READMODE_MESSAGE = 0x00000002; pub const PIPE_WAIT = 0x00000000; pub const PIPE_NOWAIT = 0x00000001; -pub const GENERIC_READ = 0x80000000; -pub const GENERIC_WRITE = 0x40000000; -pub const GENERIC_EXECUTE = 0x20000000; -pub const GENERIC_ALL = 0x10000000; - -pub const FILE_SHARE_DELETE = 0x00000004; -pub const FILE_SHARE_READ = 0x00000001; -pub const FILE_SHARE_WRITE = 0x00000002; - -pub const DELETE = 0x00010000; -pub const READ_CONTROL = 0x00020000; -pub const WRITE_DAC = 0x00040000; -pub const WRITE_OWNER = 0x00080000; -pub const SYNCHRONIZE = 0x00100000; -pub const STANDARD_RIGHTS_READ = READ_CONTROL; -pub const STANDARD_RIGHTS_WRITE = READ_CONTROL; -pub const STANDARD_RIGHTS_EXECUTE = READ_CONTROL; -pub const STANDARD_RIGHTS_REQUIRED = DELETE | READ_CONTROL | WRITE_DAC | WRITE_OWNER; -pub const MAXIMUM_ALLOWED = 0x02000000; - -// disposition for NtCreateFile -pub const FILE_SUPERSEDE = 0; -pub const FILE_OPEN = 1; -pub const FILE_CREATE = 2; -pub const FILE_OPEN_IF = 3; -pub const FILE_OVERWRITE = 4; -pub const FILE_OVERWRITE_IF = 5; -pub const FILE_MAXIMUM_DISPOSITION = 5; - -// flags for NtCreateFile and NtOpenFile -pub const FILE_READ_DATA = 0x00000001; -pub const FILE_LIST_DIRECTORY = 0x00000001; -pub const FILE_WRITE_DATA = 0x00000002; -pub const FILE_ADD_FILE = 0x00000002; -pub const FILE_APPEND_DATA = 0x00000004; -pub const FILE_ADD_SUBDIRECTORY = 0x00000004; -pub const FILE_CREATE_PIPE_INSTANCE = 0x00000004; -pub const FILE_READ_EA = 0x00000008; -pub const FILE_WRITE_EA = 0x00000010; -pub const FILE_EXECUTE = 0x00000020; -pub const FILE_TRAVERSE = 0x00000020; -pub const FILE_DELETE_CHILD = 0x00000040; -pub const FILE_READ_ATTRIBUTES = 0x00000080; -pub const FILE_WRITE_ATTRIBUTES = 0x00000100; - -pub const FILE_DIRECTORY_FILE = 0x00000001; -pub const FILE_WRITE_THROUGH = 0x00000002; -pub const FILE_SEQUENTIAL_ONLY = 0x00000004; -pub const FILE_NO_INTERMEDIATE_BUFFERING = 0x00000008; -pub const FILE_SYNCHRONOUS_IO_ALERT = 0x00000010; -pub const FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020; -pub const FILE_NON_DIRECTORY_FILE = 0x00000040; -pub const FILE_CREATE_TREE_CONNECTION = 0x00000080; -pub const FILE_COMPLETE_IF_OPLOCKED = 0x00000100; -pub const FILE_NO_EA_KNOWLEDGE = 0x00000200; -pub const FILE_OPEN_FOR_RECOVERY = 0x00000400; -pub const FILE_RANDOM_ACCESS = 0x00000800; -pub const FILE_DELETE_ON_CLOSE = 0x00001000; -pub const FILE_OPEN_BY_FILE_ID = 0x00002000; -pub const FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000; -pub const FILE_NO_COMPRESSION = 0x00008000; -pub const FILE_RESERVE_OPFILTER = 0x00100000; -pub const FILE_OPEN_REPARSE_POINT = 0x00200000; -pub const FILE_OPEN_OFFLINE_FILE = 0x00400000; -pub const FILE_OPEN_FOR_FREE_SPACE_QUERY = 0x00800000; - pub const CREATE_ALWAYS = 2; pub const CREATE_NEW = 1; pub const OPEN_ALWAYS = 4; pub const OPEN_EXISTING = 3; pub const TRUNCATE_EXISTING = 5; -pub const FILE_ATTRIBUTE_ARCHIVE = 0x20; -pub const FILE_ATTRIBUTE_COMPRESSED = 0x800; -pub const FILE_ATTRIBUTE_DEVICE = 0x40; -pub const FILE_ATTRIBUTE_DIRECTORY = 0x10; -pub const FILE_ATTRIBUTE_ENCRYPTED = 0x4000; -pub const FILE_ATTRIBUTE_HIDDEN = 0x2; -pub const FILE_ATTRIBUTE_INTEGRITY_STREAM = 0x8000; -pub const FILE_ATTRIBUTE_NORMAL = 0x80; -pub const FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x2000; -pub const FILE_ATTRIBUTE_NO_SCRUB_DATA = 0x20000; -pub const FILE_ATTRIBUTE_OFFLINE = 0x1000; -pub const FILE_ATTRIBUTE_READONLY = 0x1; -pub const FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS = 0x400000; -pub const FILE_ATTRIBUTE_RECALL_ON_OPEN = 0x40000; -pub const FILE_ATTRIBUTE_REPARSE_POINT = 0x400; -pub const FILE_ATTRIBUTE_SPARSE_FILE = 0x200; -pub const FILE_ATTRIBUTE_SYSTEM = 0x4; -pub const FILE_ATTRIBUTE_TEMPORARY = 0x100; -pub const FILE_ATTRIBUTE_VIRTUAL = 0x10000; - -pub const FILE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1ff; -pub const FILE_GENERIC_READ = STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE; -pub const FILE_GENERIC_WRITE = STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE; -pub const FILE_GENERIC_EXECUTE = STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE; - -// Flags for NtCreateNamedPipeFile -// NamedPipeType -pub const FILE_PIPE_BYTE_STREAM_TYPE = 0x0; -pub const FILE_PIPE_MESSAGE_TYPE = 0x1; -pub const FILE_PIPE_ACCEPT_REMOTE_CLIENTS = 0x0; -pub const FILE_PIPE_REJECT_REMOTE_CLIENTS = 0x2; -pub const FILE_PIPE_TYPE_VALID_MASK = 0x3; -// CompletionMode -pub const FILE_PIPE_QUEUE_OPERATION = 0x0; -pub const FILE_PIPE_COMPLETE_OPERATION = 0x1; -// ReadMode -pub const FILE_PIPE_BYTE_STREAM_MODE = 0x0; -pub const FILE_PIPE_MESSAGE_MODE = 0x1; - // flags for CreateEvent pub const CREATE_EVENT_INITIAL_SET = 0x00000002; pub const CREATE_EVENT_MANUAL_RESET = 0x00000001; -pub const EVENT_ALL_ACCESS = 0x1F0003; -pub const EVENT_MODIFY_STATE = 0x0002; - -// MEMORY_BASIC_INFORMATION.Type flags for VirtualQuery -pub const MEM_IMAGE = 0x1000000; -pub const MEM_MAPPED = 0x40000; -pub const MEM_PRIVATE = 0x20000; - pub const PROCESS_INFORMATION = extern struct { hProcess: HANDLE, hThread: HANDLE, @@ -3521,45 +5338,6 @@ pub const FILE_BEGIN = 0; pub const FILE_CURRENT = 1; pub const FILE_END = 2; -pub const HEAP_CREATE_ENABLE_EXECUTE = 0x00040000; -pub const HEAP_REALLOC_IN_PLACE_ONLY = 0x00000010; -pub const HEAP_GENERATE_EXCEPTIONS = 0x00000004; -pub const HEAP_NO_SERIALIZE = 0x00000001; - -// AllocationType values -pub const MEM_COMMIT = 0x1000; -pub const MEM_RESERVE = 0x2000; -pub const MEM_FREE = 0x10000; -pub const MEM_RESET = 0x80000; -pub const MEM_RESET_UNDO = 0x1000000; -pub const MEM_LARGE_PAGES = 0x20000000; -pub const MEM_PHYSICAL = 0x400000; -pub const MEM_TOP_DOWN = 0x100000; -pub const MEM_WRITE_WATCH = 0x200000; -pub const MEM_RESERVE_PLACEHOLDER = 0x00040000; -pub const MEM_PRESERVE_PLACEHOLDER = 0x00000400; - -// Protect values -pub const PAGE_EXECUTE = 0x10; -pub const PAGE_EXECUTE_READ = 0x20; -pub const PAGE_EXECUTE_READWRITE = 0x40; -pub const PAGE_EXECUTE_WRITECOPY = 0x80; -pub const PAGE_NOACCESS = 0x01; -pub const PAGE_READONLY = 0x02; -pub const PAGE_READWRITE = 0x04; -pub const PAGE_WRITECOPY = 0x08; -pub const PAGE_TARGETS_INVALID = 0x40000000; -pub const PAGE_TARGETS_NO_UPDATE = 0x40000000; // Same as PAGE_TARGETS_INVALID -pub const PAGE_GUARD = 0x100; -pub const PAGE_NOCACHE = 0x200; -pub const PAGE_WRITECOMBINE = 0x400; - -// FreeType values -pub const MEM_COALESCE_PLACEHOLDERS = 0x1; -pub const MEM_RESERVE_PLACEHOLDERS = 0x2; -pub const MEM_DECOMMIT = 0x4000; -pub const MEM_RELEASE = 0x8000; - pub const PTHREAD_START_ROUTINE = *const fn (LPVOID) callconv(.winapi) DWORD; pub const LPTHREAD_START_ROUTINE = PTHREAD_START_ROUTINE; @@ -3743,38 +5521,8 @@ pub const PIMAGE_TLS_CALLBACK = ?*const fn (PVOID, DWORD, PVOID) callconv(.winap pub const PROV_RSA_FULL = 1; pub const REGSAM = ACCESS_MASK; -pub const ACCESS_MASK = DWORD; pub const LSTATUS = LONG; -pub const SECTION_INHERIT = enum(c_int) { - ViewShare = 0, - ViewUnmap = 1, -}; - -pub const SECTION_QUERY = 0x0001; -pub const SECTION_MAP_WRITE = 0x0002; -pub const SECTION_MAP_READ = 0x0004; -pub const SECTION_MAP_EXECUTE = 0x0008; -pub const SECTION_EXTEND_SIZE = 0x0010; -pub const SECTION_ALL_ACCESS = - STANDARD_RIGHTS_REQUIRED | - SECTION_QUERY | - SECTION_MAP_WRITE | - SECTION_MAP_READ | - SECTION_MAP_EXECUTE | - SECTION_EXTEND_SIZE; - -pub const SEC_64K_PAGES = 0x80000; -pub const SEC_FILE = 0x800000; -pub const SEC_IMAGE = 0x1000000; -pub const SEC_PROTECTED_IMAGE = 0x2000000; -pub const SEC_RESERVE = 0x4000000; -pub const SEC_COMMIT = 0x8000000; -pub const SEC_IMAGE_NO_EXECUTE = SEC_IMAGE | SEC_NOCACHE; -pub const SEC_NOCACHE = 0x10000000; -pub const SEC_WRITECOMBINE = 0x40000000; -pub const SEC_LARGE_PAGES = 0x80000000; - pub const HKEY = *opaque {}; pub const HKEY_CLASSES_ROOT: HKEY = @ptrFromInt(0x80000000); @@ -3788,34 +5536,6 @@ pub const HKEY_CURRENT_CONFIG: HKEY = @ptrFromInt(0x80000005); pub const HKEY_DYN_DATA: HKEY = @ptrFromInt(0x80000006); pub const HKEY_CURRENT_USER_LOCAL_SETTINGS: HKEY = @ptrFromInt(0x80000007); -/// Combines the STANDARD_RIGHTS_REQUIRED, KEY_QUERY_VALUE, KEY_SET_VALUE, KEY_CREATE_SUB_KEY, -/// KEY_ENUMERATE_SUB_KEYS, KEY_NOTIFY, and KEY_CREATE_LINK access rights. -pub const KEY_ALL_ACCESS = 0xF003F; -/// Reserved for system use. -pub const KEY_CREATE_LINK = 0x0020; -/// Required to create a subkey of a registry key. -pub const KEY_CREATE_SUB_KEY = 0x0004; -/// Required to enumerate the subkeys of a registry key. -pub const KEY_ENUMERATE_SUB_KEYS = 0x0008; -/// Equivalent to KEY_READ. -pub const KEY_EXECUTE = 0x20019; -/// Required to request change notifications for a registry key or for subkeys of a registry key. -pub const KEY_NOTIFY = 0x0010; -/// Required to query the values of a registry key. -pub const KEY_QUERY_VALUE = 0x0001; -/// Combines the STANDARD_RIGHTS_READ, KEY_QUERY_VALUE, KEY_ENUMERATE_SUB_KEYS, and KEY_NOTIFY values. -pub const KEY_READ = 0x20019; -/// Required to create, delete, or set a registry value. -pub const KEY_SET_VALUE = 0x0002; -/// Indicates that an application on 64-bit Windows should operate on the 32-bit registry view. -/// This flag is ignored by 32-bit Windows. -pub const KEY_WOW64_32KEY = 0x0200; -/// Indicates that an application on 64-bit Windows should operate on the 64-bit registry view. -/// This flag is ignored by 32-bit Windows. -pub const KEY_WOW64_64KEY = 0x0100; -/// Combines the STANDARD_RIGHTS_WRITE, KEY_SET_VALUE, and KEY_CREATE_SUB_KEY access rights. -pub const KEY_WRITE = 0x20006; - /// Open symbolic link. pub const REG_OPTION_OPEN_LINK: DWORD = 0x8; @@ -4466,14 +6186,14 @@ pub const EXCEPTION_DISPOSITION = i32; pub const EXCEPTION_ROUTINE = *const fn ( ExceptionRecord: ?*EXCEPTION_RECORD, EstablisherFrame: PVOID, - ContextRecord: *(Self.CONTEXT), + ContextRecord: *CONTEXT, DispatcherContext: PVOID, ) callconv(.winapi) EXCEPTION_DISPOSITION; pub const UNWIND_HISTORY_TABLE_SIZE = 12; pub const UNWIND_HISTORY_TABLE_ENTRY = extern struct { ImageBase: ULONG64, - FunctionEntry: *Self.RUNTIME_FUNCTION, + FunctionEntry: *RUNTIME_FUNCTION, }; pub const UNWIND_HISTORY_TABLE = extern struct { @@ -4492,24 +6212,6 @@ pub const UNW_FLAG_EHANDLER = 0x1; pub const UNW_FLAG_UHANDLER = 0x2; pub const UNW_FLAG_CHAININFO = 0x4; -pub const OBJECT_ATTRIBUTES = extern struct { - Length: ULONG, - RootDirectory: ?HANDLE, - ObjectName: *UNICODE_STRING, - Attributes: ULONG, - SecurityDescriptor: ?*anyopaque, - SecurityQualityOfService: ?*anyopaque, -}; - -pub const OBJ_INHERIT = 0x00000002; -pub const OBJ_PERMANENT = 0x00000010; -pub const OBJ_EXCLUSIVE = 0x00000020; -pub const OBJ_CASE_INSENSITIVE = 0x00000040; -pub const OBJ_OPENIF = 0x00000080; -pub const OBJ_OPENLINK = 0x00000100; -pub const OBJ_KERNEL_HANDLE = 0x00000200; -pub const OBJ_VALID_ATTRIBUTES = 0x000003F2; - pub const UNICODE_STRING = extern struct { Length: c_ushort, MaximumLength: c_ushort, @@ -4617,7 +6319,7 @@ pub const PEB = extern struct { Ldr: *PEB_LDR_DATA, ProcessParameters: *RTL_USER_PROCESS_PARAMETERS, SubSystemData: PVOID, - ProcessHeap: HANDLE, + ProcessHeap: ?*HEAP, // Versions: 5.1+ FastPebLock: *RTL_CRITICAL_SECTION, @@ -4862,7 +6564,7 @@ pub const FILE_DIRECTORY_INFORMATION = extern struct { ChangeTime: LARGE_INTEGER, EndOfFile: LARGE_INTEGER, AllocationSize: LARGE_INTEGER, - FileAttributes: ULONG, + FileAttributes: FILE.ATTRIBUTE, FileNameLength: ULONG, FileName: [1]WCHAR, }; @@ -4876,7 +6578,7 @@ pub const FILE_BOTH_DIR_INFORMATION = extern struct { ChangeTime: LARGE_INTEGER, EndOfFile: LARGE_INTEGER, AllocationSize: LARGE_INTEGER, - FileAttributes: ULONG, + FileAttributes: FILE.ATTRIBUTE, FileNameLength: ULONG, EaSize: ULONG, ShortNameLength: CHAR, @@ -4905,7 +6607,7 @@ pub fn FileInformationIterator(comptime FileInformationType: type) type { }; } -pub const IO_APC_ROUTINE = *const fn (PVOID, *IO_STATUS_BLOCK, ULONG) callconv(.winapi) void; +pub const IO_APC_ROUTINE = fn (?*anyopaque, *IO_STATUS_BLOCK, ULONG) callconv(.winapi) void; pub const CURDIR = extern struct { DosPath: UNICODE_STRING, @@ -4974,7 +6676,7 @@ pub const GetProcessMemoryInfoError = error{ pub fn GetProcessMemoryInfo(hProcess: HANDLE) GetProcessMemoryInfoError!VM_COUNTERS { var vmc: VM_COUNTERS = undefined; - const rc = ntdll.NtQueryInformationProcess(hProcess, .ProcessVmCounters, &vmc, @sizeOf(VM_COUNTERS), null); + const rc = ntdll.NtQueryInformationProcess(hProcess, .VmCounters, &vmc, @sizeOf(VM_COUNTERS), null); switch (rc) { .SUCCESS => return vmc, .ACCESS_DENIED => return error.AccessDenied, @@ -5029,7 +6731,7 @@ pub const OSVERSIONINFOW = extern struct { pub const RTL_OSVERSIONINFOW = OSVERSIONINFOW; pub const REPARSE_DATA_BUFFER = extern struct { - ReparseTag: ULONG, + ReparseTag: IO_REPARSE_TAG, ReparseDataLength: USHORT, Reserved: USHORT, DataBuffer: [1]UCHAR, @@ -5049,18 +6751,11 @@ pub const MOUNT_POINT_REPARSE_BUFFER = extern struct { PrintNameLength: USHORT, PathBuffer: [1]WCHAR, }; -pub const MAXIMUM_REPARSE_DATA_BUFFER_SIZE: ULONG = 16 * 1024; -pub const FSCTL_SET_REPARSE_POINT: DWORD = 0x900a4; -pub const FSCTL_GET_REPARSE_POINT: DWORD = 0x900a8; -pub const IO_REPARSE_TAG_SYMLINK: ULONG = 0xa000000c; -pub const IO_REPARSE_TAG_MOUNT_POINT: ULONG = 0xa0000003; pub const SYMLINK_FLAG_RELATIVE: ULONG = 0x1; pub const SYMBOLIC_LINK_FLAG_DIRECTORY: DWORD = 0x1; pub const SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE: DWORD = 0x2; -pub const MOUNTMGRCONTROLTYPE = 0x0000006D; - pub const MOUNTMGR_MOUNT_POINT = extern struct { SymbolicLinkNameOffset: ULONG, SymbolicLinkNameLength: USHORT, @@ -5077,7 +6772,6 @@ pub const MOUNTMGR_MOUNT_POINTS = extern struct { NumberOfMountPoints: ULONG, MountPoints: [1]MOUNTMGR_MOUNT_POINT, }; -pub const IOCTL_MOUNTMGR_QUERY_POINTS = CTL_CODE(MOUNTMGRCONTROLTYPE, 2, .METHOD_BUFFERED, FILE_ANY_ACCESS); pub const MOUNTMGR_TARGET_NAME = extern struct { DeviceNameLength: USHORT, @@ -5087,7 +6781,6 @@ pub const MOUNTMGR_VOLUME_PATHS = extern struct { MultiSzLength: ULONG, MultiSz: [1]WCHAR, }; -pub const IOCTL_MOUNTMGR_QUERY_DOS_VOLUME_PATH = CTL_CODE(MOUNTMGRCONTROLTYPE, 12, .METHOD_BUFFERED, FILE_ANY_ACCESS); pub const OBJECT_INFORMATION_CLASS = enum(c_int) { ObjectBasicInformation = 0, @@ -5479,113 +7172,6 @@ pub const SYSTEM_BASIC_INFORMATION = extern struct { NumberOfProcessors: UCHAR, }; -pub const THREADINFOCLASS = enum(c_int) { - ThreadBasicInformation, - ThreadTimes, - ThreadPriority, - ThreadBasePriority, - ThreadAffinityMask, - ThreadImpersonationToken, - ThreadDescriptorTableEntry, - ThreadEnableAlignmentFaultFixup, - ThreadEventPair_Reusable, - ThreadQuerySetWin32StartAddress, - ThreadZeroTlsCell, - ThreadPerformanceCount, - ThreadAmILastThread, - ThreadIdealProcessor, - ThreadPriorityBoost, - ThreadSetTlsArrayAddress, - ThreadIsIoPending, - // Windows 2000+ from here - ThreadHideFromDebugger, - // Windows XP+ from here - ThreadBreakOnTermination, - ThreadSwitchLegacyState, - ThreadIsTerminated, - // Windows Vista+ from here - ThreadLastSystemCall, - ThreadIoPriority, - ThreadCycleTime, - ThreadPagePriority, - ThreadActualBasePriority, - ThreadTebInformation, - ThreadCSwitchMon, - // Windows 7+ from here - ThreadCSwitchPmu, - ThreadWow64Context, - ThreadGroupInformation, - ThreadUmsInformation, - ThreadCounterProfiling, - ThreadIdealProcessorEx, - // Windows 8+ from here - ThreadCpuAccountingInformation, - // Windows 8.1+ from here - ThreadSuspendCount, - // Windows 10+ from here - ThreadHeterogeneousCpuPolicy, - ThreadContainerId, - ThreadNameInformation, - ThreadSelectedCpuSets, - ThreadSystemThreadInformation, - ThreadActualGroupAffinity, -}; - -pub const PROCESSINFOCLASS = enum(c_int) { - ProcessBasicInformation, - ProcessQuotaLimits, - ProcessIoCounters, - ProcessVmCounters, - ProcessTimes, - ProcessBasePriority, - ProcessRaisePriority, - ProcessDebugPort, - ProcessExceptionPort, - ProcessAccessToken, - ProcessLdtInformation, - ProcessLdtSize, - ProcessDefaultHardErrorMode, - ProcessIoPortHandlers, - ProcessPooledUsageAndLimits, - ProcessWorkingSetWatch, - ProcessUserModeIOPL, - ProcessEnableAlignmentFaultFixup, - ProcessPriorityClass, - ProcessWx86Information, - ProcessHandleCount, - ProcessAffinityMask, - ProcessPriorityBoost, - ProcessDeviceMap, - ProcessSessionInformation, - ProcessForegroundInformation, - ProcessWow64Information, - ProcessImageFileName, - ProcessLUIDDeviceMapsEnabled, - ProcessBreakOnTermination, - ProcessDebugObjectHandle, - ProcessDebugFlags, - ProcessHandleTracing, - ProcessIoPriority, - ProcessExecuteFlags, - ProcessTlsInformation, - ProcessCookie, - ProcessImageInformation, - ProcessCycleTime, - ProcessPagePriority, - ProcessInstrumentationCallback, - ProcessThreadStackAllocation, - ProcessWorkingSetWatchEx, - ProcessImageFileNameWin32, - ProcessImageFileMapping, - ProcessAffinityUpdateMode, - ProcessMemoryAllocationMode, - ProcessGroupInformation, - ProcessTokenVirtualizationEnabled, - ProcessConsoleHostProcess, - ProcessWindowInformation, - MaxProcessInfoClass, -}; - pub const PROCESS_BASIC_INFORMATION = extern struct { ExitStatus: NTSTATUS, PebBaseAddress: *PEB, @@ -5641,7 +7227,7 @@ pub fn ProcessBaseAddress(handle: HANDLE) ProcessBaseAddressError!HMODULE { var nread: DWORD = 0; const rc = ntdll.NtQueryInformationProcess( handle, - .ProcessBasicInformation, + .BasicInformation, &info, @sizeOf(PROCESS_BASIC_INFORMATION), &nread, diff --git a/lib/std/os/windows/kernel32.zig b/lib/std/os/windows/kernel32.zig index 7f746057a9..81cae8fac8 100644 --- a/lib/std/os/windows/kernel32.zig +++ b/lib/std/os/windows/kernel32.zig @@ -1,6 +1,7 @@ const std = @import("../../std.zig"); const windows = std.os.windows; +const ACCESS_MASK = windows.ACCESS_MASK; const BOOL = windows.BOOL; const CONDITION_VARIABLE = windows.CONDITION_VARIABLE; const CONSOLE_SCREEN_BUFFER_INFO = windows.CONSOLE_SCREEN_BUFFER_INFO; @@ -66,7 +67,7 @@ pub extern "kernel32" fn CancelIoEx( pub extern "kernel32" fn CreateFileW( lpFileName: LPCWSTR, - dwDesiredAccess: DWORD, + dwDesiredAccess: ACCESS_MASK, dwShareMode: DWORD, lpSecurityAttributes: ?*SECURITY_ATTRIBUTES, dwCreationDisposition: DWORD, @@ -160,7 +161,7 @@ pub extern "kernel32" fn DuplicateHandle( hSourceHandle: HANDLE, hTargetProcessHandle: HANDLE, lpTargetHandle: *HANDLE, - dwDesiredAccess: DWORD, + dwDesiredAccess: ACCESS_MASK, bInheritHandle: BOOL, dwOptions: DWORD, ) callconv(.winapi) BOOL; @@ -308,9 +309,6 @@ pub extern "kernel32" fn CreateThread( lpThreadId: ?*DWORD, ) callconv(.winapi) ?HANDLE; -// TODO: Wrapper around RtlDelayExecution. -pub extern "kernel32" fn SwitchToThread() callconv(.winapi) BOOL; - // Locks, critical sections, initializers pub extern "kernel32" fn InitOnceExecuteOnce( @@ -401,34 +399,6 @@ pub extern "kernel32" fn ReadConsoleOutputCharacterW( lpNumberOfCharsRead: *DWORD, ) callconv(.winapi) BOOL; -// Memory Mapping/Allocation - -// TODO: Wrapper around RtlCreateHeap. -pub extern "kernel32" fn HeapCreate( - flOptions: DWORD, - dwInitialSize: SIZE_T, - dwMaximumSize: SIZE_T, -) callconv(.winapi) ?HANDLE; - -// TODO: Fowrarder to RtlFreeHeap before win11_zn. -// Since win11_zn this function points to unexported symbol RtlFreeHeapFast. -// See https://github.com/ziglang/zig/pull/25766#discussion_r2479727640 -pub extern "kernel32" fn HeapFree( - hHeap: HANDLE, - dwFlags: DWORD, - lpMem: LPVOID, -) callconv(.winapi) BOOL; - -// TODO: Wrapper around RtlValidateHeap (BOOLEAN -> BOOL) -pub extern "kernel32" fn HeapValidate( - hHeap: HANDLE, - dwFlags: DWORD, - lpMem: ?*const anyopaque, -) callconv(.winapi) BOOL; - -// TODO: Getter for peb.ProcessHeap -pub extern "kernel32" fn GetProcessHeap() callconv(.winapi) ?HANDLE; - // Code Libraries/Modules // TODO: Wrapper around LdrGetDllFullName. diff --git a/lib/std/os/windows/ntdll.zig b/lib/std/os/windows/ntdll.zig index d323fbd5b5..f41284dda6 100644 --- a/lib/std/os/windows/ntdll.zig +++ b/lib/std/os/windows/ntdll.zig @@ -1,277 +1,279 @@ const std = @import("../../std.zig"); const windows = std.os.windows; +const ACCESS_MASK = windows.ACCESS_MASK; const BOOL = windows.BOOL; +const BOOLEAN = windows.BOOLEAN; +const CONDITION_VARIABLE = windows.CONDITION_VARIABLE; +const CONTEXT = windows.CONTEXT; +const CRITICAL_SECTION = windows.CRITICAL_SECTION; +const CTL_CODE = windows.CTL_CODE; +const CURDIR = windows.CURDIR; const DWORD = windows.DWORD; const DWORD64 = windows.DWORD64; -const ULONG = windows.ULONG; -const ULONG_PTR = windows.ULONG_PTR; -const NTSTATUS = windows.NTSTATUS; -const WORD = windows.WORD; +const ERESOURCE = windows.ERESOURCE; +const EVENT_TYPE = windows.EVENT_TYPE; +const EXCEPTION_ROUTINE = windows.EXCEPTION_ROUTINE; +const FILE = windows.FILE; +const FS_INFORMATION_CLASS = windows.FS_INFORMATION_CLASS; const HANDLE = windows.HANDLE; -const ACCESS_MASK = windows.ACCESS_MASK; +const HEAP = windows.HEAP; const IO_APC_ROUTINE = windows.IO_APC_ROUTINE; -const BOOLEAN = windows.BOOLEAN; -const OBJECT_ATTRIBUTES = windows.OBJECT_ATTRIBUTES; -const PVOID = windows.PVOID; const IO_STATUS_BLOCK = windows.IO_STATUS_BLOCK; +const KNONVOLATILE_CONTEXT_POINTERS = windows.KNONVOLATILE_CONTEXT_POINTERS; const LARGE_INTEGER = windows.LARGE_INTEGER; +const LOGICAL = windows.LOGICAL; +const LONG = windows.LONG; +const LPCVOID = windows.LPCVOID; +const LPVOID = windows.LPVOID; +const MEM = windows.MEM; +const NTSTATUS = windows.NTSTATUS; +const OBJECT_ATTRIBUTES = windows.OBJECT_ATTRIBUTES; const OBJECT_INFORMATION_CLASS = windows.OBJECT_INFORMATION_CLASS; -const FILE_INFORMATION_CLASS = windows.FILE_INFORMATION_CLASS; -const FS_INFORMATION_CLASS = windows.FS_INFORMATION_CLASS; -const UNICODE_STRING = windows.UNICODE_STRING; -const RTL_OSVERSIONINFOW = windows.RTL_OSVERSIONINFOW; -const FILE_BASIC_INFORMATION = windows.FILE_BASIC_INFORMATION; -const SIZE_T = windows.SIZE_T; -const CURDIR = windows.CURDIR; +const PAGE = windows.PAGE; const PCWSTR = windows.PCWSTR; +const PROCESSINFOCLASS = windows.PROCESSINFOCLASS; +const PVOID = windows.PVOID; +const RTL_OSVERSIONINFOW = windows.RTL_OSVERSIONINFOW; const RTL_QUERY_REGISTRY_TABLE = windows.RTL_QUERY_REGISTRY_TABLE; -const CONTEXT = windows.CONTEXT; -const UNWIND_HISTORY_TABLE = windows.UNWIND_HISTORY_TABLE; const RUNTIME_FUNCTION = windows.RUNTIME_FUNCTION; -const KNONVOLATILE_CONTEXT_POINTERS = windows.KNONVOLATILE_CONTEXT_POINTERS; -const EXCEPTION_ROUTINE = windows.EXCEPTION_ROUTINE; +const SEC = windows.SEC; +const SECTION_INHERIT = windows.SECTION_INHERIT; +const SIZE_T = windows.SIZE_T; +const SRWLOCK = windows.SRWLOCK; const SYSTEM_INFORMATION_CLASS = windows.SYSTEM_INFORMATION_CLASS; const THREADINFOCLASS = windows.THREADINFOCLASS; -const PROCESSINFOCLASS = windows.PROCESSINFOCLASS; -const LPVOID = windows.LPVOID; -const LPCVOID = windows.LPCVOID; -const SECTION_INHERIT = windows.SECTION_INHERIT; +const ULONG = windows.ULONG; +const ULONG_PTR = windows.ULONG_PTR; +const UNICODE_STRING = windows.UNICODE_STRING; +const UNWIND_HISTORY_TABLE = windows.UNWIND_HISTORY_TABLE; +const USHORT = windows.USHORT; const VECTORED_EXCEPTION_HANDLER = windows.VECTORED_EXCEPTION_HANDLER; -const CRITICAL_SECTION = windows.CRITICAL_SECTION; -const SRWLOCK = windows.SRWLOCK; -const CONDITION_VARIABLE = windows.CONDITION_VARIABLE; +const WORD = windows.WORD; -pub extern "ntdll" fn NtQueryInformationProcess( - ProcessHandle: HANDLE, - ProcessInformationClass: PROCESSINFOCLASS, - ProcessInformation: *anyopaque, - ProcessInformationLength: ULONG, - ReturnLength: ?*ULONG, -) callconv(.winapi) NTSTATUS; +// ref: km/ntifs.h -pub extern "ntdll" fn NtQueryInformationThread( - ThreadHandle: HANDLE, - ThreadInformationClass: THREADINFOCLASS, - ThreadInformation: *anyopaque, - ThreadInformationLength: ULONG, - ReturnLength: ?*ULONG, -) callconv(.winapi) NTSTATUS; +pub extern "ntdll" fn RtlCreateHeap( + Flags: HEAP.FLAGS.CREATE, + HeapBase: ?PVOID, + ReserveSize: SIZE_T, + CommitSize: SIZE_T, + Lock: ?*ERESOURCE, + Parameters: ?*const HEAP.RTL_PARAMETERS, +) callconv(.winapi) ?*HEAP; -pub extern "ntdll" fn NtQuerySystemInformation( - SystemInformationClass: SYSTEM_INFORMATION_CLASS, - SystemInformation: PVOID, - SystemInformationLength: ULONG, - ReturnLength: ?*ULONG, -) callconv(.winapi) NTSTATUS; +pub extern "ntdll" fn RtlDestroyHeap( + HeapHandle: *HEAP, +) callconv(.winapi) ?*HEAP; -pub extern "ntdll" fn NtSetInformationThread( - ThreadHandle: HANDLE, - ThreadInformationClass: THREADINFOCLASS, - ThreadInformation: *const anyopaque, - ThreadInformationLength: ULONG, -) callconv(.winapi) NTSTATUS; +pub extern "ntdll" fn RtlAllocateHeap( + HeapHandle: *HEAP, + Flags: HEAP.FLAGS.ALLOCATION, + Size: SIZE_T, +) callconv(.winapi) ?PVOID; + +pub extern "ntdll" fn RtlFreeHeap( + HeapHandle: *HEAP, + Flags: HEAP.FLAGS.ALLOCATION, + BaseAddress: ?PVOID, +) callconv(.winapi) LOGICAL; -pub extern "ntdll" fn RtlGetVersion( - lpVersionInformation: *RTL_OSVERSIONINFOW, -) callconv(.winapi) NTSTATUS; pub extern "ntdll" fn RtlCaptureStackBackTrace( - FramesToSkip: DWORD, - FramesToCapture: DWORD, + FramesToSkip: ULONG, + FramesToCapture: ULONG, BackTrace: **anyopaque, - BackTraceHash: ?*DWORD, -) callconv(.winapi) WORD; -pub extern "ntdll" fn RtlCaptureContext(ContextRecord: *CONTEXT) callconv(.winapi) void; -pub extern "ntdll" fn RtlLookupFunctionEntry( - ControlPc: DWORD64, - ImageBase: *DWORD64, - HistoryTable: *UNWIND_HISTORY_TABLE, -) callconv(.winapi) ?*RUNTIME_FUNCTION; -pub extern "ntdll" fn RtlVirtualUnwind( - HandlerType: DWORD, - ImageBase: DWORD64, - ControlPc: DWORD64, - FunctionEntry: *RUNTIME_FUNCTION, - ContextRecord: *CONTEXT, - HandlerData: *?PVOID, - EstablisherFrame: *DWORD64, - ContextPointers: ?*KNONVOLATILE_CONTEXT_POINTERS, -) callconv(.winapi) *EXCEPTION_ROUTINE; -pub extern "ntdll" fn RtlGetSystemTimePrecise() callconv(.winapi) LARGE_INTEGER; -pub extern "ntdll" fn NtQueryInformationFile( - FileHandle: HANDLE, - IoStatusBlock: *IO_STATUS_BLOCK, - FileInformation: *anyopaque, - Length: ULONG, - FileInformationClass: FILE_INFORMATION_CLASS, -) callconv(.winapi) NTSTATUS; -pub extern "ntdll" fn NtSetInformationFile( - FileHandle: HANDLE, - IoStatusBlock: *IO_STATUS_BLOCK, - FileInformation: PVOID, - Length: ULONG, - FileInformationClass: FILE_INFORMATION_CLASS, -) callconv(.winapi) NTSTATUS; + BackTraceHash: ?*ULONG, +) callconv(.winapi) USHORT; -pub extern "ntdll" fn NtQueryAttributesFile( - ObjectAttributes: *OBJECT_ATTRIBUTES, - FileAttributes: *FILE_BASIC_INFORMATION, -) callconv(.winapi) NTSTATUS; +pub extern "ntdll" fn RtlCaptureContext( + ContextRecord: *CONTEXT, +) callconv(.winapi) void; -pub extern "ntdll" fn RtlQueryPerformanceCounter(PerformanceCounter: *LARGE_INTEGER) callconv(.winapi) BOOL; -pub extern "ntdll" fn RtlQueryPerformanceFrequency(PerformanceFrequency: *LARGE_INTEGER) callconv(.winapi) BOOL; -pub extern "ntdll" fn NtQueryPerformanceCounter( - PerformanceCounter: *LARGE_INTEGER, - PerformanceFrequency: ?*LARGE_INTEGER, +pub extern "ntdll" fn NtSetInformationThread( + ThreadHandle: HANDLE, + ThreadInformationClass: THREADINFOCLASS, + ThreadInformation: *const anyopaque, + ThreadInformationLength: ULONG, ) callconv(.winapi) NTSTATUS; pub extern "ntdll" fn NtCreateFile( FileHandle: *HANDLE, DesiredAccess: ACCESS_MASK, - ObjectAttributes: *OBJECT_ATTRIBUTES, + ObjectAttributes: *const OBJECT_ATTRIBUTES, IoStatusBlock: *IO_STATUS_BLOCK, - AllocationSize: ?*LARGE_INTEGER, - FileAttributes: ULONG, - ShareAccess: ULONG, - CreateDisposition: ULONG, - CreateOptions: ULONG, + AllocationSize: ?*const LARGE_INTEGER, + FileAttributes: FILE.ATTRIBUTE, + ShareAccess: FILE.SHARE, + CreateDisposition: FILE.CREATE_DISPOSITION, + CreateOptions: FILE.MODE, EaBuffer: ?*anyopaque, EaLength: ULONG, ) callconv(.winapi) NTSTATUS; -pub extern "ntdll" fn NtCreateSection( - SectionHandle: *HANDLE, - DesiredAccess: ACCESS_MASK, - ObjectAttributes: ?*OBJECT_ATTRIBUTES, - MaximumSize: ?*LARGE_INTEGER, - SectionPageProtection: ULONG, - AllocationAttributes: ULONG, - FileHandle: ?HANDLE, -) callconv(.winapi) NTSTATUS; -pub extern "ntdll" fn NtMapViewOfSection( - SectionHandle: HANDLE, - ProcessHandle: HANDLE, - BaseAddress: *PVOID, - ZeroBits: ?*ULONG, - CommitSize: SIZE_T, - SectionOffset: ?*LARGE_INTEGER, - ViewSize: *SIZE_T, - InheritDispostion: SECTION_INHERIT, - AllocationType: ULONG, - Win32Protect: ULONG, -) callconv(.winapi) NTSTATUS; -pub extern "ntdll" fn NtUnmapViewOfSection( - ProcessHandle: HANDLE, - BaseAddress: PVOID, -) callconv(.winapi) NTSTATUS; + pub extern "ntdll" fn NtDeviceIoControlFile( FileHandle: HANDLE, Event: ?HANDLE, - ApcRoutine: ?IO_APC_ROUTINE, + ApcRoutine: ?*const IO_APC_ROUTINE, ApcContext: ?*anyopaque, IoStatusBlock: *IO_STATUS_BLOCK, - IoControlCode: ULONG, + IoControlCode: CTL_CODE, InputBuffer: ?*const anyopaque, InputBufferLength: ULONG, OutputBuffer: ?PVOID, OutputBufferLength: ULONG, ) callconv(.winapi) NTSTATUS; + pub extern "ntdll" fn NtFsControlFile( FileHandle: HANDLE, Event: ?HANDLE, - ApcRoutine: ?IO_APC_ROUTINE, + ApcRoutine: ?*const IO_APC_ROUTINE, ApcContext: ?*anyopaque, IoStatusBlock: *IO_STATUS_BLOCK, - FsControlCode: ULONG, + FsControlCode: CTL_CODE, InputBuffer: ?*const anyopaque, InputBufferLength: ULONG, OutputBuffer: ?PVOID, OutputBufferLength: ULONG, ) callconv(.winapi) NTSTATUS; -pub extern "ntdll" fn NtClose(Handle: HANDLE) callconv(.winapi) NTSTATUS; -pub extern "ntdll" fn RtlDosPathNameToNtPathName_U( - DosPathName: [*:0]const u16, - NtPathName: *UNICODE_STRING, - NtFileNamePart: ?*?[*:0]const u16, - DirectoryInfo: ?*CURDIR, -) callconv(.winapi) BOOL; -pub extern "ntdll" fn RtlFreeUnicodeString(UnicodeString: *UNICODE_STRING) callconv(.winapi) void; -/// Returns the number of bytes written to `Buffer`. -/// If the returned count is larger than `BufferByteLength`, the buffer was too small. -/// If the returned count is zero, an error occurred. -pub extern "ntdll" fn RtlGetFullPathName_U( - FileName: [*:0]const u16, - BufferByteLength: ULONG, - Buffer: [*]u16, - ShortName: ?*[*:0]const u16, -) callconv(.winapi) windows.ULONG; +pub extern "ntdll" fn NtLockFile( + FileHandle: HANDLE, + Event: ?HANDLE, + ApcRoutine: ?*const IO_APC_ROUTINE, + ApcContext: ?*anyopaque, + IoStatusBlock: *IO_STATUS_BLOCK, + ByteOffset: *const LARGE_INTEGER, + Length: *const LARGE_INTEGER, + Key: ?*const ULONG, + FailImmediately: BOOLEAN, + ExclusiveLock: BOOLEAN, +) callconv(.winapi) NTSTATUS; + +pub extern "ntdll" fn NtOpenFile( + FileHandle: *HANDLE, + DesiredAccess: ACCESS_MASK, + ObjectAttributes: *const OBJECT_ATTRIBUTES, + IoStatusBlock: *IO_STATUS_BLOCK, + ShareAccess: FILE.SHARE, + OpenOptions: FILE.MODE, +) callconv(.winapi) NTSTATUS; pub extern "ntdll" fn NtQueryDirectoryFile( FileHandle: HANDLE, Event: ?HANDLE, - ApcRoutine: ?IO_APC_ROUTINE, + ApcRoutine: ?*const IO_APC_ROUTINE, ApcContext: ?*anyopaque, IoStatusBlock: *IO_STATUS_BLOCK, FileInformation: *anyopaque, Length: ULONG, - FileInformationClass: FILE_INFORMATION_CLASS, + FileInformationClass: FILE.INFORMATION_CLASS, ReturnSingleEntry: BOOLEAN, - FileName: ?*UNICODE_STRING, + FileName: ?*const UNICODE_STRING, RestartScan: BOOLEAN, ) callconv(.winapi) NTSTATUS; -pub extern "ntdll" fn NtCreateKeyedEvent( - KeyedEventHandle: *HANDLE, - DesiredAccess: ACCESS_MASK, - ObjectAttributes: ?PVOID, - Flags: ULONG, +pub extern "ntdll" fn NtQueryInformationFile( + FileHandle: HANDLE, + IoStatusBlock: *IO_STATUS_BLOCK, + FileInformation: *anyopaque, + Length: ULONG, + FileInformationClass: FILE.INFORMATION_CLASS, ) callconv(.winapi) NTSTATUS; -pub extern "ntdll" fn NtReleaseKeyedEvent( - EventHandle: ?HANDLE, - Key: ?*const anyopaque, - Alertable: BOOLEAN, - Timeout: ?*const LARGE_INTEGER, +pub extern "ntdll" fn NtQueryVolumeInformationFile( + FileHandle: HANDLE, + IoStatusBlock: *IO_STATUS_BLOCK, + FsInformation: *anyopaque, + Length: ULONG, + FsInformationClass: FS_INFORMATION_CLASS, ) callconv(.winapi) NTSTATUS; -pub extern "ntdll" fn NtWaitForKeyedEvent( - EventHandle: ?HANDLE, - Key: ?*const anyopaque, - Alertable: BOOLEAN, - Timeout: ?*const LARGE_INTEGER, +pub extern "ntdll" fn NtReadFile( + FileHandle: HANDLE, + Event: ?HANDLE, + ApcRoutine: ?*const IO_APC_ROUTINE, + ApcContext: ?*anyopaque, + IoStatusBlock: *IO_STATUS_BLOCK, + Buffer: *anyopaque, + Length: ULONG, + ByteOffset: ?*const LARGE_INTEGER, + Key: ?*const ULONG, +) callconv(.winapi) NTSTATUS; + +pub extern "ntdll" fn NtSetInformationFile( + FileHandle: HANDLE, + IoStatusBlock: *IO_STATUS_BLOCK, + FileInformation: *const anyopaque, + Length: ULONG, + FileInformationClass: FILE.INFORMATION_CLASS, +) callconv(.winapi) NTSTATUS; + +pub extern "ntdll" fn NtWriteFile( + FileHandle: HANDLE, + Event: ?HANDLE, + ApcRoutine: ?*const IO_APC_ROUTINE, + ApcContext: ?*anyopaque, + IoStatusBlock: *IO_STATUS_BLOCK, + Buffer: *const anyopaque, + Length: ULONG, + ByteOffset: ?*const LARGE_INTEGER, + Key: ?*const ULONG, ) callconv(.winapi) NTSTATUS; -pub extern "ntdll" fn RtlSetCurrentDirectory_U(PathName: *UNICODE_STRING) callconv(.winapi) NTSTATUS; +pub extern "ntdll" fn NtUnlockFile( + FileHandle: HANDLE, + IoStatusBlock: *IO_STATUS_BLOCK, + ByteOffset: *const LARGE_INTEGER, + Length: *const LARGE_INTEGER, + Key: ULONG, +) callconv(.winapi) NTSTATUS; pub extern "ntdll" fn NtQueryObject( Handle: HANDLE, ObjectInformationClass: OBJECT_INFORMATION_CLASS, - ObjectInformation: PVOID, + ObjectInformation: ?PVOID, ObjectInformationLength: ULONG, ReturnLength: ?*ULONG, ) callconv(.winapi) NTSTATUS; -pub extern "ntdll" fn NtQueryVolumeInformationFile( - FileHandle: HANDLE, - IoStatusBlock: *IO_STATUS_BLOCK, - FsInformation: *anyopaque, - Length: ULONG, - FsInformationClass: FS_INFORMATION_CLASS, +pub extern "ntdll" fn NtClose( + Handle: HANDLE, ) callconv(.winapi) NTSTATUS; -pub extern "ntdll" fn RtlWakeAddressAll( - Address: ?*const anyopaque, -) callconv(.winapi) void; +pub extern "ntdll" fn NtCreateSection( + SectionHandle: *HANDLE, + DesiredAccess: ACCESS_MASK, + ObjectAttributes: ?*const OBJECT_ATTRIBUTES, + MaximumSize: ?*const LARGE_INTEGER, + SectionPageProtection: PAGE, + AllocationAttributes: SEC, + FileHandle: ?HANDLE, +) callconv(.winapi) NTSTATUS; -pub extern "ntdll" fn RtlWakeAddressSingle( - Address: ?*const anyopaque, -) callconv(.winapi) void; +pub extern "ntdll" fn NtAllocateVirtualMemory( + ProcessHandle: HANDLE, + BaseAddress: *PVOID, + ZeroBits: ULONG_PTR, + RegionSize: *SIZE_T, + AllocationType: MEM.ALLOCATE, + Protect: PAGE, +) callconv(.winapi) NTSTATUS; -pub extern "ntdll" fn RtlWaitOnAddress( - Address: ?*const anyopaque, - CompareAddress: ?*const anyopaque, - AddressSize: SIZE_T, - Timeout: ?*const LARGE_INTEGER, +pub extern "ntdll" fn NtFreeVirtualMemory( + ProcessHandle: HANDLE, + BaseAddress: *PVOID, + RegionSize: *SIZE_T, + FreeType: MEM.FREE, +) callconv(.winapi) NTSTATUS; + +// ref: km/wdm.h + +pub extern "ntdll" fn RtlQueryRegistryValues( + RelativeTo: ULONG, + Path: PCWSTR, + QueryTable: [*]RTL_QUERY_REGISTRY_TABLE, + Context: ?*const anyopaque, + Environment: ?*const anyopaque, ) callconv(.winapi) NTSTATUS; pub extern "ntdll" fn RtlEqualUnicodeString( @@ -284,39 +286,153 @@ pub extern "ntdll" fn RtlUpcaseUnicodeChar( SourceCharacter: u16, ) callconv(.winapi) u16; -pub extern "ntdll" fn NtLockFile( - FileHandle: HANDLE, - Event: ?HANDLE, - ApcRoutine: ?*IO_APC_ROUTINE, - ApcContext: ?*anyopaque, - IoStatusBlock: *IO_STATUS_BLOCK, - ByteOffset: *const LARGE_INTEGER, - Length: *const LARGE_INTEGER, - Key: ?*ULONG, - FailImmediately: BOOLEAN, - ExclusiveLock: BOOLEAN, +pub extern "ntdll" fn RtlFreeUnicodeString( + UnicodeString: *UNICODE_STRING, +) callconv(.winapi) void; + +pub extern "ntdll" fn RtlGetVersion( + lpVersionInformation: *RTL_OSVERSIONINFOW, ) callconv(.winapi) NTSTATUS; -pub extern "ntdll" fn NtUnlockFile( - FileHandle: HANDLE, +// ref: um/winnt.h + +pub extern "ntdll" fn RtlLookupFunctionEntry( + ControlPc: usize, + ImageBase: *usize, + HistoryTable: *UNWIND_HISTORY_TABLE, +) callconv(.winapi) ?*RUNTIME_FUNCTION; + +pub extern "ntdll" fn RtlVirtualUnwind( + HandlerType: DWORD, + ImageBase: usize, + ControlPc: usize, + FunctionEntry: *RUNTIME_FUNCTION, + ContextRecord: *CONTEXT, + HandlerData: *?PVOID, + EstablisherFrame: *usize, + ContextPointers: ?*KNONVOLATILE_CONTEXT_POINTERS, +) callconv(.winapi) *EXCEPTION_ROUTINE; + +// ref: um/winternl.h + +pub extern "ntdll" fn NtWaitForSingleObject( + Handle: HANDLE, + Alertable: BOOLEAN, + Timeout: ?*const LARGE_INTEGER, +) callconv(.winapi) NTSTATUS; + +pub extern "ntdll" fn NtQueryInformationProcess( + ProcessHandle: HANDLE, + ProcessInformationClass: PROCESSINFOCLASS, + ProcessInformation: *anyopaque, + ProcessInformationLength: ULONG, + ReturnLength: ?*ULONG, +) callconv(.winapi) NTSTATUS; + +pub extern "ntdll" fn NtQueryInformationThread( + ThreadHandle: HANDLE, + ThreadInformationClass: THREADINFOCLASS, + ThreadInformation: *anyopaque, + ThreadInformationLength: ULONG, + ReturnLength: ?*ULONG, +) callconv(.winapi) NTSTATUS; + +pub extern "ntdll" fn NtQuerySystemInformation( + SystemInformationClass: SYSTEM_INFORMATION_CLASS, + SystemInformation: PVOID, + SystemInformationLength: ULONG, + ReturnLength: ?*ULONG, +) callconv(.winapi) NTSTATUS; + +// ref none + +pub extern "ntdll" fn NtQueryAttributesFile( + ObjectAttributes: *const OBJECT_ATTRIBUTES, + FileAttributes: *FILE.BASIC_INFORMATION, +) callconv(.winapi) NTSTATUS; + +pub extern "ntdll" fn NtCreateEvent( + EventHandle: *HANDLE, + DesiredAccess: ACCESS_MASK, + ObjectAttributes: ?*const OBJECT_ATTRIBUTES, + EventType: EVENT_TYPE, + InitialState: BOOLEAN, +) callconv(.winapi) NTSTATUS; +pub extern "ntdll" fn NtSetEvent( + EventHandle: HANDLE, + PreviousState: ?*LONG, +) callconv(.winapi) NTSTATUS; + +pub extern "ntdll" fn NtCreateKeyedEvent( + KeyedEventHandle: *HANDLE, + DesiredAccess: ACCESS_MASK, + ObjectAttributes: ?*const OBJECT_ATTRIBUTES, + Flags: ULONG, +) callconv(.winapi) NTSTATUS; +pub extern "ntdll" fn NtReleaseKeyedEvent( + EventHandle: ?HANDLE, + Key: ?*const anyopaque, + Alertable: BOOLEAN, + Timeout: ?*const LARGE_INTEGER, +) callconv(.winapi) NTSTATUS; +pub extern "ntdll" fn NtWaitForKeyedEvent( + EventHandle: ?HANDLE, + Key: ?*const anyopaque, + Alertable: BOOLEAN, + Timeout: ?*const LARGE_INTEGER, +) callconv(.winapi) NTSTATUS; + +pub extern "ntdll" fn NtCreateNamedPipeFile( + FileHandle: *HANDLE, + DesiredAccess: ACCESS_MASK, + ObjectAttributes: *const OBJECT_ATTRIBUTES, IoStatusBlock: *IO_STATUS_BLOCK, - ByteOffset: *const LARGE_INTEGER, - Length: *const LARGE_INTEGER, - Key: ?*ULONG, + ShareAccess: FILE.SHARE, + CreateDisposition: FILE.CREATE_DISPOSITION, + CreateOptions: FILE.MODE, + NamedPipeType: FILE.PIPE.TYPE, + ReadMode: FILE.PIPE.READ_MODE, + CompletionMode: FILE.PIPE.COMPLETION_MODE, + MaximumInstances: ULONG, + InboundQuota: ULONG, + OutboundQuota: ULONG, + DefaultTimeout: ?*const LARGE_INTEGER, +) callconv(.winapi) NTSTATUS; + +pub extern "ntdll" fn NtMapViewOfSection( + SectionHandle: HANDLE, + ProcessHandle: HANDLE, + BaseAddress: ?*PVOID, + ZeroBits: ?*const ULONG, + CommitSize: SIZE_T, + SectionOffset: ?*LARGE_INTEGER, + ViewSize: *SIZE_T, + InheritDispostion: SECTION_INHERIT, + AllocationType: MEM.MAP, + PageProtection: PAGE, +) callconv(.winapi) NTSTATUS; +pub extern "ntdll" fn NtUnmapViewOfSection( + ProcessHandle: HANDLE, + BaseAddress: PVOID, +) callconv(.winapi) NTSTATUS; +pub extern "ntdll" fn NtUnmapViewOfSectionEx( + ProcessHandle: HANDLE, + BaseAddress: PVOID, + UnmapFlags: MEM.UNMAP, ) callconv(.winapi) NTSTATUS; pub extern "ntdll" fn NtOpenKey( KeyHandle: *HANDLE, DesiredAccess: ACCESS_MASK, - ObjectAttributes: OBJECT_ATTRIBUTES, + ObjectAttributes: *const OBJECT_ATTRIBUTES, ) callconv(.winapi) NTSTATUS; -pub extern "ntdll" fn RtlQueryRegistryValues( - RelativeTo: ULONG, - Path: PCWSTR, - QueryTable: [*]RTL_QUERY_REGISTRY_TABLE, - Context: ?*anyopaque, - Environment: ?*anyopaque, +pub extern "ntdll" fn NtQueueApcThread( + ThreadHandle: HANDLE, + ApcRoutine: *const IO_APC_ROUTINE, + ApcArgument1: ?*anyopaque, + ApcArgument2: ?*anyopaque, + ApcArgument3: ?*anyopaque, ) callconv(.winapi) NTSTATUS; pub extern "ntdll" fn NtReadVirtualMemory( @@ -326,7 +442,6 @@ pub extern "ntdll" fn NtReadVirtualMemory( NumberOfBytesToRead: SIZE_T, NumberOfBytesRead: ?*SIZE_T, ) callconv(.winapi) NTSTATUS; - pub extern "ntdll" fn NtWriteVirtualMemory( ProcessHandle: HANDLE, BaseAddress: ?PVOID, @@ -334,51 +449,15 @@ pub extern "ntdll" fn NtWriteVirtualMemory( NumberOfBytesToWrite: SIZE_T, NumberOfBytesWritten: ?*SIZE_T, ) callconv(.winapi) NTSTATUS; - pub extern "ntdll" fn NtProtectVirtualMemory( ProcessHandle: HANDLE, BaseAddress: *?PVOID, NumberOfBytesToProtect: *SIZE_T, - NewAccessProtection: ULONG, - OldAccessProtection: *ULONG, + NewAccessProtection: PAGE, + OldAccessProtection: *PAGE, ) callconv(.winapi) NTSTATUS; -pub extern "ntdll" fn RtlExitUserProcess( - ExitStatus: u32, -) callconv(.winapi) noreturn; - -pub extern "ntdll" fn NtCreateNamedPipeFile( - FileHandle: *HANDLE, - DesiredAccess: ULONG, - ObjectAttributes: *OBJECT_ATTRIBUTES, - IoStatusBlock: *IO_STATUS_BLOCK, - ShareAccess: ULONG, - CreateDisposition: ULONG, - CreateOptions: ULONG, - NamedPipeType: ULONG, - ReadMode: ULONG, - CompletionMode: ULONG, - MaximumInstances: ULONG, - InboundQuota: ULONG, - OutboundQuota: ULONG, - DefaultTimeout: *LARGE_INTEGER, -) callconv(.winapi) NTSTATUS; - -pub extern "ntdll" fn NtAllocateVirtualMemory( - ProcessHandle: HANDLE, - BaseAddress: ?*PVOID, - ZeroBits: ULONG_PTR, - RegionSize: ?*SIZE_T, - AllocationType: ULONG, - PageProtection: ULONG, -) callconv(.winapi) NTSTATUS; - -pub extern "ntdll" fn NtFreeVirtualMemory( - ProcessHandle: HANDLE, - BaseAddress: ?*PVOID, - RegionSize: *SIZE_T, - FreeType: ULONG, -) callconv(.winapi) NTSTATUS; +pub extern "ntdll" fn NtYieldExecution() callconv(.winapi) NTSTATUS; pub extern "ntdll" fn RtlAddVectoredExceptionHandler( First: ULONG, @@ -388,6 +467,29 @@ pub extern "ntdll" fn RtlRemoveVectoredExceptionHandler( Handle: HANDLE, ) callconv(.winapi) ULONG; +pub extern "ntdll" fn RtlDosPathNameToNtPathName_U( + DosPathName: [*:0]const u16, + NtPathName: *UNICODE_STRING, + NtFileNamePart: ?*?[*:0]const u16, + DirectoryInfo: ?*CURDIR, +) callconv(.winapi) BOOL; + +pub extern "ntdll" fn RtlExitUserProcess( + ExitStatus: u32, +) callconv(.winapi) noreturn; + +/// Returns the number of bytes written to `Buffer`. +/// If the returned count is larger than `BufferByteLength`, the buffer was too small. +/// If the returned count is zero, an error occurred. +pub extern "ntdll" fn RtlGetFullPathName_U( + FileName: [*:0]const u16, + BufferByteLength: ULONG, + Buffer: [*]u16, + ShortName: ?*[*:0]const u16, +) callconv(.winapi) ULONG; + +pub extern "ntdll" fn RtlGetSystemTimePrecise() callconv(.winapi) LARGE_INTEGER; + pub extern "ntdll" fn RtlInitializeCriticalSection( lpCriticalSection: *CRITICAL_SECTION, ) callconv(.winapi) NTSTATUS; @@ -401,6 +503,28 @@ pub extern "ntdll" fn RtlDeleteCriticalSection( lpCriticalSection: *CRITICAL_SECTION, ) callconv(.winapi) NTSTATUS; +pub extern "ntdll" fn RtlQueryPerformanceCounter( + PerformanceCounter: *LARGE_INTEGER, +) callconv(.winapi) BOOL; +pub extern "ntdll" fn RtlQueryPerformanceFrequency( + PerformanceFrequency: *LARGE_INTEGER, +) callconv(.winapi) BOOL; +pub extern "ntdll" fn NtQueryPerformanceCounter( + PerformanceCounter: *LARGE_INTEGER, + PerformanceFrequency: ?*LARGE_INTEGER, +) callconv(.winapi) NTSTATUS; + +pub extern "ntdll" fn RtlReAllocateHeap( + HeapHandle: *HEAP, + Flags: HEAP.FLAGS.ALLOCATION, + BaseAddress: ?PVOID, + Size: SIZE_T, +) callconv(.winapi) ?PVOID; + +pub extern "ntdll" fn RtlSetCurrentDirectory_U( + PathName: *UNICODE_STRING, +) callconv(.winapi) NTSTATUS; + pub extern "ntdll" fn RtlTryAcquireSRWLockExclusive( SRWLock: *SRWLOCK, ) callconv(.winapi) BOOLEAN; @@ -411,21 +535,22 @@ pub extern "ntdll" fn RtlReleaseSRWLockExclusive( SRWLock: *SRWLOCK, ) callconv(.winapi) void; +pub extern "ntdll" fn RtlWakeAddressAll( + Address: ?*const anyopaque, +) callconv(.winapi) void; +pub extern "ntdll" fn RtlWakeAddressSingle( + Address: ?*const anyopaque, +) callconv(.winapi) void; +pub extern "ntdll" fn RtlWaitOnAddress( + Address: ?*const anyopaque, + CompareAddress: ?*const anyopaque, + AddressSize: SIZE_T, + Timeout: ?*const LARGE_INTEGER, +) callconv(.winapi) NTSTATUS; + pub extern "ntdll" fn RtlWakeConditionVariable( ConditionVariable: *CONDITION_VARIABLE, ) callconv(.winapi) void; pub extern "ntdll" fn RtlWakeAllConditionVariable( ConditionVariable: *CONDITION_VARIABLE, ) callconv(.winapi) void; - -pub extern "ntdll" fn RtlReAllocateHeap( - HeapHandle: HANDLE, - Flags: ULONG, - BaseAddress: PVOID, - Size: SIZE_T, -) callconv(.winapi) ?PVOID; -pub extern "ntdll" fn RtlAllocateHeap( - HeapHandle: HANDLE, - Flags: ULONG, - Size: SIZE_T, -) callconv(.winapi) ?PVOID; diff --git a/lib/std/posix.zig b/lib/std/posix.zig index d92ac920f6..feeeeb9220 100644 --- a/lib/std/posix.zig +++ b/lib/std/posix.zig @@ -119,7 +119,18 @@ pub const STDIN_FILENO = system.STDIN_FILENO; pub const STDOUT_FILENO = system.STDOUT_FILENO; pub const SYS = system.SYS; pub const Sigaction = system.Sigaction; -pub const Stat = system.Stat; +pub const Stat = switch (native_os) { + // Has no concept of `stat`. + .windows => void, + // The `stat` bits/wrappers are removed due to having to maintain the + // different varying `struct stat`s per target and libc, leading to runtime + // errors. + // + // Users targeting linux should add a comptime check and use `statx`, + // similar to how `std.fs.File.stat` does. + .linux => void, + else => system.Stat, +}; pub const T = system.T; pub const TCP = system.TCP; pub const VDSO = system.VDSO; @@ -480,15 +491,21 @@ fn fchmodat2(dirfd: fd_t, path: []const u8, mode: mode_t, flags: u32) FChmodAtEr } defer close(pathfd); - const stat = fstatatZ(pathfd, "", AT.EMPTY_PATH) catch |err| switch (err) { + const path_mode = if (linux.wrapped.statx( + pathfd, + "", + AT.EMPTY_PATH, + .{ .TYPE = true }, + )) |stx| blk: { + assert(stx.mask.TYPE); + break :blk stx.mode; + } else |err| switch (err) { error.NameTooLong => unreachable, error.FileNotFound => unreachable, - error.Streaming => unreachable, - error.BadPathName => return error.Unexpected, - error.Canceled => return error.Canceled, else => |e| return e, }; - if ((stat.mode & S.IFMT) == S.IFLNK) + // Even though we only wanted TYPE, the kernel can still fill in the additional bits. + if ((path_mode & S.IFMT) == S.IFLNK) return error.OperationNotSupported; var procfs_buf: ["/proc/self/fd/-2147483648\x00".len]u8 = undefined; @@ -1041,18 +1058,16 @@ pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void { if (native_os == .windows) { var io_status_block: windows.IO_STATUS_BLOCK = undefined; - var eof_info = windows.FILE_END_OF_FILE_INFORMATION{ + const eof_info: windows.FILE.END_OF_FILE_INFORMATION = .{ .EndOfFile = signed_len, }; - const rc = windows.ntdll.NtSetInformationFile( fd, &io_status_block, &eof_info, - @sizeOf(windows.FILE_END_OF_FILE_INFORMATION), - .FileEndOfFileInformation, + @sizeOf(windows.FILE.END_OF_FILE_INFORMATION), + .EndOfFile, ); - switch (rc) { .SUCCESS => return, .INVALID_HANDLE => unreachable, // Handle not open for writing @@ -2691,8 +2706,11 @@ pub fn mkdirW(dir_path_w: []const u16, mode: mode_t) MakeDirError!void { _ = mode; const sub_dir_handle = windows.OpenFile(dir_path_w, .{ .dir = fs.cwd().fd, - .access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE, - .creation = windows.FILE_CREATE, + .access_mask = .{ + .STANDARD = .{ .SYNCHRONIZE = true }, + .GENERIC = .{ .READ = true }, + }, + .creation = .CREATE, .filter = .dir_only, }) catch |err| switch (err) { error.IsDir => return error.Unexpected, @@ -3842,13 +3860,9 @@ pub fn fstat(fd: fd_t) FStatError!Stat { if (native_os == .wasi and !builtin.link_libc) { return Stat.fromFilestat(try std.os.fstat_wasi(fd)); } - if (native_os == .windows) { - @compileError("fstat is not yet implemented on Windows"); - } - const fstat_sym = if (lfs64_abi) system.fstat64 else system.fstat; var stat = mem.zeroes(Stat); - switch (errno(fstat_sym(fd, &stat))) { + switch (errno(system.fstat(fd, &stat))) { .SUCCESS => return stat, .INVAL => unreachable, .BADF => unreachable, // Always a race condition. @@ -3888,9 +3902,8 @@ pub fn fstatatZ(dirfd: fd_t, pathname: [*:0]const u8, flags: u32) FStatAtError!S @compileError("use std.Io instead"); } - const fstatat_sym = if (lfs64_abi) system.fstatat64 else system.fstatat; var stat = mem.zeroes(Stat); - switch (errno(fstatat_sym(dirfd, pathname, &stat, flags))) { + switch (errno(system.fstatat(dirfd, pathname, &stat, flags))) { .SUCCESS => return stat, .INVAL => unreachable, .BADF => unreachable, // Always a race condition. diff --git a/lib/std/posix/test.zig b/lib/std/posix/test.zig index aaaaa1d948..3bb5e64c73 100644 --- a/lib/std/posix/test.zig +++ b/lib/std/posix/test.zig @@ -16,6 +16,7 @@ const AtomicRmwOp = std.builtin.AtomicRmwOp; const AtomicOrder = std.builtin.AtomicOrder; const native_os = builtin.target.os.tag; const tmpDir = std.testing.tmpDir; +const AT = posix.AT; // NOTE: several additional tests are in test/standalone/posix/. Any tests that mutate // process-wide POSIX state (cwd, signals, etc) cannot be Zig unit tests and should be over there. @@ -45,6 +46,7 @@ test "check WASI CWD" { test "open smoke test" { if (native_os == .wasi) return error.SkipZigTest; if (native_os == .windows) return error.SkipZigTest; + if (native_os == .openbsd) return error.SkipZigTest; // TODO verify file attributes using `fstat` @@ -123,10 +125,24 @@ fn testReadlink(target_path: []const u8, symlink_path: []const u8) !void { try expect(mem.eql(u8, target_path, given)); } -test "linkat with different directories" { - if ((builtin.cpu.arch == .riscv32 or builtin.cpu.arch.isLoongArch()) and builtin.os.tag == .linux and !builtin.link_libc) return error.SkipZigTest; // No `fstatat()`. - if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; // `nstat.nlink` assertion is failing with LLVM 20+ for unclear reasons. +fn getLinkInfo(fd: posix.fd_t) !struct { posix.ino_t, posix.nlink_t } { + if (native_os == .linux) { + const stx = try linux.wrapped.statx( + fd, + "", + posix.AT.EMPTY_PATH, + .{ .INO = true, .NLINK = true }, + ); + std.debug.assert(stx.mask.INO); + std.debug.assert(stx.mask.NLINK); + return .{ stx.ino, stx.nlink }; + } + const st = try posix.fstat(fd); + return .{ st.ino, st.nlink }; +} + +test "linkat with different directories" { switch (native_os) { .wasi, .linux, .illumos => {}, else => return error.SkipZigTest, @@ -153,19 +169,49 @@ test "linkat with different directories" { defer nfd.close(); { - const estat = try posix.fstat(efd.handle); - const nstat = try posix.fstat(nfd.handle); - try testing.expectEqual(estat.ino, nstat.ino); - try testing.expectEqual(@as(@TypeOf(nstat.nlink), 2), nstat.nlink); + const eino, _ = try getLinkInfo(efd.handle); + const nino, const nlink = try getLinkInfo(nfd.handle); + try testing.expectEqual(eino, nino); + try testing.expectEqual(@as(posix.nlink_t, 2), nlink); } // Test 2: remove link try posix.unlinkat(subdir.fd, link_name, 0); + _, const elink = try getLinkInfo(efd.handle); + try testing.expectEqual(@as(posix.nlink_t, 1), elink); +} - { - const estat = try posix.fstat(efd.handle); - try testing.expectEqual(@as(@TypeOf(estat.nlink), 1), estat.nlink); - } +test "fstatat" { + if (posix.Stat == void) return error.SkipZigTest; + if (native_os == .wasi and !builtin.link_libc) return error.SkipZigTest; + + var tmp = tmpDir(.{}); + defer tmp.cleanup(); + + // create dummy file + const contents = "nonsense"; + try tmp.dir.writeFile(.{ .sub_path = "file.txt", .data = contents }); + + // fetch file's info on the opened fd directly + const file = try tmp.dir.openFile("file.txt", .{}); + const stat = try posix.fstat(file.handle); + defer file.close(); + + // now repeat but using `fstatat` instead + const statat = try posix.fstatat(tmp.dir.fd, "file.txt", posix.AT.SYMLINK_NOFOLLOW); + + try expectEqual(stat.dev, statat.dev); + try expectEqual(stat.ino, statat.ino); + try expectEqual(stat.nlink, statat.nlink); + try expectEqual(stat.mode, statat.mode); + try expectEqual(stat.uid, statat.uid); + try expectEqual(stat.gid, statat.gid); + try expectEqual(stat.rdev, statat.rdev); + try expectEqual(stat.size, statat.size); + try expectEqual(stat.blksize, statat.blksize); + // The stat.blocks/statat.blocks count is managed by the filesystem and may + // change if the file is stored in a journal or "inline". + // try expectEqual(stat.blocks, statat.blocks); } test "readlinkat" { @@ -521,7 +567,7 @@ test "getrlimit and setrlimit" { } test "sigrtmin/max" { - if (native_os == .wasi or native_os == .windows or native_os.isDarwin()) { + if (native_os == .wasi or native_os == .windows or native_os.isDarwin() or native_os == .openbsd) { return error.SkipZigTest; } @@ -536,7 +582,7 @@ test "sigset empty/full" { var set: posix.sigset_t = posix.sigemptyset(); for (1..posix.NSIG) |i| { - const sig = std.meta.intToEnum(posix.SIG, i) catch continue; + const sig = std.enums.fromInt(posix.SIG, i) orelse continue; try expectEqual(false, posix.sigismember(&set, sig)); } @@ -553,7 +599,9 @@ fn reserved_signo(i: usize) bool { if (native_os.isDarwin()) return false; if (!builtin.link_libc) return false; const max = if (native_os == .netbsd) 32 else 31; - return i > max and i < posix.sigrtmin(); + if (i > max) return true; + if (native_os == .openbsd) return false; // no RT signals + return i < posix.sigrtmin(); } test "sigset add/del" { @@ -565,29 +613,29 @@ test "sigset add/del" { // See that none are set, then set each one, see that they're all set, then // remove them all, and then see that none are set. for (1..posix.NSIG) |i| { - const sig = std.meta.intToEnum(posix.SIG, i) catch continue; + const sig = std.enums.fromInt(posix.SIG, i) orelse continue; try expectEqual(false, posix.sigismember(&sigset, sig)); } for (1..posix.NSIG) |i| { if (!reserved_signo(i)) { - const sig = std.meta.intToEnum(posix.SIG, i) catch continue; + const sig = std.enums.fromInt(posix.SIG, i) orelse continue; posix.sigaddset(&sigset, sig); } } for (1..posix.NSIG) |i| { if (!reserved_signo(i)) { - const sig = std.meta.intToEnum(posix.SIG, i) catch continue; + const sig = std.enums.fromInt(posix.SIG, i) orelse continue; try expectEqual(true, posix.sigismember(&sigset, sig)); } } for (1..posix.NSIG) |i| { if (!reserved_signo(i)) { - const sig = std.meta.intToEnum(posix.SIG, i) catch continue; + const sig = std.enums.fromInt(posix.SIG, i) orelse continue; posix.sigdelset(&sigset, sig); } } for (1..posix.NSIG) |i| { - const sig = std.meta.intToEnum(posix.SIG, i) catch continue; + const sig = std.enums.fromInt(posix.SIG, i) orelse continue; try expectEqual(false, posix.sigismember(&sigset, sig)); } } @@ -718,6 +766,7 @@ test "POSIX file locking with fcntl" { test "rename smoke test" { if (native_os == .wasi) return error.SkipZigTest; if (native_os == .windows) return error.SkipZigTest; + if (native_os == .openbsd) return error.SkipZigTest; var tmp = tmpDir(.{}); defer tmp.cleanup(); @@ -786,6 +835,7 @@ test "rename smoke test" { test "access smoke test" { if (native_os == .wasi) return error.SkipZigTest; if (native_os == .windows) return error.SkipZigTest; + if (native_os == .openbsd) return error.SkipZigTest; var tmp = tmpDir(.{}); defer tmp.cleanup(); @@ -906,14 +956,31 @@ test "pwrite with empty buffer" { try expectEqual(rc, 0); } +fn getFileMode(dir: posix.fd_t, path: []const u8) !posix.mode_t { + const path_z = try posix.toPosixPath(path); + const mode: posix.mode_t = if (native_os == .linux) blk: { + const stx = try linux.wrapped.statx( + dir, + &path_z, + posix.AT.SYMLINK_NOFOLLOW, + .{ .MODE = true }, + ); + std.debug.assert(stx.mask.MODE); + break :blk stx.mode; + } else blk: { + const st = try posix.fstatatZ(dir, &path_z, posix.AT.SYMLINK_NOFOLLOW); + break :blk st.mode; + }; + + return mode & 0b111_111_111; +} + fn expectMode(dir: posix.fd_t, file: []const u8, mode: posix.mode_t) !void { - const st = try posix.fstatat(dir, file, posix.AT.SYMLINK_NOFOLLOW); - try expectEqual(mode, st.mode & 0b111_111_111); + const actual = try getFileMode(dir, file); + try expectEqual(mode, actual & 0b111_111_111); } test "fchmodat smoke test" { - if (builtin.cpu.arch.isMIPS64() and (builtin.abi == .gnuabin32 or builtin.abi == .muslabin32)) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/23808 - if (!std.fs.has_executable_bit) return error.SkipZigTest; var tmp = tmpDir(.{}); @@ -928,13 +995,8 @@ test "fchmodat smoke test" { ); posix.close(fd); - if ((builtin.cpu.arch == .riscv32 or builtin.cpu.arch.isLoongArch()) and builtin.os.tag == .linux and !builtin.link_libc) return error.SkipZigTest; // No `fstatat()`. - try posix.symlinkat("regfile", tmp.dir.fd, "symlink"); - const sym_mode = blk: { - const st = try posix.fstatat(tmp.dir.fd, "symlink", posix.AT.SYMLINK_NOFOLLOW); - break :blk st.mode & 0b111_111_111; - }; + const sym_mode = try getFileMode(tmp.dir.fd, "symlink"); try posix.fchmodat(tmp.dir.fd, "regfile", 0o640, 0); try expectMode(tmp.dir.fd, "regfile", 0o640); diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index 30b0a841fc..5d4d65ec01 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -762,10 +762,12 @@ fn spawnWindows(self: *ChildProcess) SpawnError!void { const nul_handle = if (any_ignore) // "\Device\Null" or "\??\NUL" windows.OpenFile(&[_]u16{ '\\', 'D', 'e', 'v', 'i', 'c', 'e', '\\', 'N', 'u', 'l', 'l' }, .{ - .access_mask = windows.GENERIC_READ | windows.GENERIC_WRITE | windows.SYNCHRONIZE, - .share_access = windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE | windows.FILE_SHARE_DELETE, + .access_mask = .{ + .STANDARD = .{ .SYNCHRONIZE = true }, + .GENERIC = .{ .WRITE = true, .READ = true }, + }, .sa = &saAttr, - .creation = windows.OPEN_EXISTING, + .creation = .OPEN, }) catch |err| switch (err) { error.PathAlreadyExists => return error.Unexpected, // not possible for "NUL" error.PipeBusy => return error.Unexpected, // not possible for "NUL" @@ -1174,7 +1176,7 @@ fn windowsCreateProcessPathExt( &io_status, &file_information_buf, file_information_buf.len, - .FileDirectoryInformation, + .Directory, windows.FALSE, // single result &app_name_unicode_string, windows.FALSE, // restart iteration @@ -1198,7 +1200,7 @@ fn windowsCreateProcessPathExt( var it = windows.FileInformationIterator(windows.FILE_DIRECTORY_INFORMATION){ .buf = &file_information_buf }; while (it.next()) |info| { // Skip directories - if (info.FileAttributes & windows.FILE_ATTRIBUTE_DIRECTORY != 0) continue; + if (info.FileAttributes.DIRECTORY) continue; const filename = @as([*]u16, @ptrCast(&info.FileName))[0 .. info.FileNameLength / 2]; // Because all results start with the app_name since we're using the wildcard `app_name*`, // if the length is equal to app_name then this is an exact match @@ -1415,11 +1417,11 @@ fn windowsMakeAsyncPipe(rd: *?windows.HANDLE, wr: *?windows.HANDLE, sattr: *cons var sattr_copy = sattr.*; const write_handle = windows.kernel32.CreateFileW( pipe_path.ptr, - windows.GENERIC_WRITE, + .{ .GENERIC = .{ .WRITE = true } }, 0, &sattr_copy, windows.OPEN_EXISTING, - windows.FILE_ATTRIBUTE_NORMAL, + @bitCast(windows.FILE.ATTRIBUTE{ .NORMAL = true }), null, ); if (write_handle == windows.INVALID_HANDLE_VALUE) { diff --git a/lib/std/testing.zig b/lib/std/testing.zig index ee2aef36be..186cafad59 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -1160,7 +1160,7 @@ pub fn checkAllAllocationFailures(backing_allocator: std.mem.Allocator, comptime } else |err| switch (err) { error.OutOfMemory => { if (failing_allocator_inst.allocated_bytes != failing_allocator_inst.freed_bytes) { - const tty_config = std.Io.tty.detectConfig(.stderr()); + const tty_config: std.Io.tty.Config = .detect(.stderr()); print( "\nfail_index: {d}/{d}\nallocated bytes: {d}\nfreed bytes: {d}\nallocations: {d}\ndeallocations: {d}\nallocation that was made to fail: {f}", .{ diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig index 95c39ca5a1..b2eb2230bb 100644 --- a/lib/std/zig/Ast.zig +++ b/lib/std/zig/Ast.zig @@ -636,7 +636,6 @@ pub fn firstToken(tree: Ast, node: Node.Index) TokenIndex { .@"nosuspend", .asm_simple, .@"asm", - .asm_legacy, .array_type, .array_type_sentinel, .error_value, @@ -1050,11 +1049,6 @@ pub fn lastToken(tree: Ast, node: Node.Index) TokenIndex { n = @enumFromInt(tree.extra_data[@intFromEnum(members.end) - 1]); // last parameter } }, - .asm_legacy => { - _, const extra_index = tree.nodeData(n).node_and_extra; - const extra = tree.extraData(extra_index, Node.AsmLegacy); - return extra.rparen + end_offset; - }, .@"asm" => { _, const extra_index = tree.nodeData(n).node_and_extra; const extra = tree.extraData(extra_index, Node.Asm); @@ -1900,18 +1894,6 @@ pub fn asmSimple(tree: Ast, node: Node.Index) full.Asm { }); } -pub fn asmLegacy(tree: Ast, node: Node.Index) full.AsmLegacy { - const template, const extra_index = tree.nodeData(node).node_and_extra; - const extra = tree.extraData(extra_index, Node.AsmLegacy); - const items = tree.extraDataSlice(.{ .start = extra.items_start, .end = extra.items_end }, Node.Index); - return tree.legacyAsmComponents(.{ - .asm_token = tree.nodeMainToken(node), - .template = template, - .items = items, - .rparen = extra.rparen, - }); -} - pub fn asmFull(tree: Ast, node: Node.Index) full.Asm { const template, const extra_index = tree.nodeData(node).node_and_extra; const extra = tree.extraData(extra_index, Node.Asm); @@ -2217,67 +2199,6 @@ fn fullSwitchCaseComponents(tree: Ast, info: full.SwitchCase.Components, node: N return result; } -fn legacyAsmComponents(tree: Ast, info: full.AsmLegacy.Components) full.AsmLegacy { - var result: full.AsmLegacy = .{ - .ast = info, - .volatile_token = null, - .inputs = &.{}, - .outputs = &.{}, - .first_clobber = null, - }; - if (tree.tokenTag(info.asm_token + 1) == .keyword_volatile) { - result.volatile_token = info.asm_token + 1; - } - const outputs_end: usize = for (info.items, 0..) |item, i| { - switch (tree.nodeTag(item)) { - .asm_output => continue, - else => break i, - } - } else info.items.len; - - result.outputs = info.items[0..outputs_end]; - result.inputs = info.items[outputs_end..]; - - if (info.items.len == 0) { - // asm ("foo" ::: "a", "b"); - const template_token = tree.lastToken(info.template); - if (tree.tokenTag(template_token + 1) == .colon and - tree.tokenTag(template_token + 2) == .colon and - tree.tokenTag(template_token + 3) == .colon and - tree.tokenTag(template_token + 4) == .string_literal) - { - result.first_clobber = template_token + 4; - } - } else if (result.inputs.len != 0) { - // asm ("foo" :: [_] "" (y) : "a", "b"); - const last_input = result.inputs[result.inputs.len - 1]; - const rparen = tree.lastToken(last_input); - var i = rparen + 1; - // Allow a (useless) comma right after the closing parenthesis. - if (tree.tokenTag(i) == .comma) i = i + 1; - if (tree.tokenTag(i) == .colon and - tree.tokenTag(i + 1) == .string_literal) - { - result.first_clobber = i + 1; - } - } else { - // asm ("foo" : [_] "" (x) :: "a", "b"); - const last_output = result.outputs[result.outputs.len - 1]; - const rparen = tree.lastToken(last_output); - var i = rparen + 1; - // Allow a (useless) comma right after the closing parenthesis. - if (tree.tokenTag(i) == .comma) i = i + 1; - if (tree.tokenTag(i) == .colon and - tree.tokenTag(i + 1) == .colon and - tree.tokenTag(i + 2) == .string_literal) - { - result.first_clobber = i + 2; - } - } - - return result; -} - fn fullAsmComponents(tree: Ast, info: full.Asm.Components) full.Asm { var result: full.Asm = .{ .ast = info, @@ -2495,14 +2416,6 @@ pub fn fullAsm(tree: Ast, node: Node.Index) ?full.Asm { }; } -/// To be deleted after 0.15.0 is tagged -pub fn legacyAsm(tree: Ast, node: Node.Index) ?full.AsmLegacy { - return switch (tree.nodeTag(node)) { - .asm_legacy => tree.asmLegacy(node), - else => null, - }; -} - pub fn fullCall(tree: Ast, buffer: *[1]Ast.Node.Index, node: Node.Index) ?full.Call { return switch (tree.nodeTag(node)) { .call, .call_comma => tree.callFull(node), @@ -2897,21 +2810,6 @@ pub const full = struct { }; }; - pub const AsmLegacy = struct { - ast: Components, - volatile_token: ?TokenIndex, - first_clobber: ?TokenIndex, - outputs: []const Node.Index, - inputs: []const Node.Index, - - pub const Components = struct { - asm_token: TokenIndex, - template: Node.Index, - items: []const Node.Index, - rparen: TokenIndex, - }; - }; - pub const Call = struct { ast: Components, @@ -3908,14 +3806,6 @@ pub const Node = struct { /// /// The `main_token` field is the `asm` token. asm_simple, - /// `asm(lhs, a)`. - /// - /// The `data` field is a `.node_and_extra`: - /// 1. a `Node.Index` to lhs. - /// 2. a `ExtraIndex` to `AsmLegacy`. - /// - /// The `main_token` field is the `asm` token. - asm_legacy, /// `asm(a, b)`. /// /// The `data` field is a `.node_and_extra`: @@ -4092,14 +3982,6 @@ pub const Node = struct { callconv_expr: OptionalIndex, }; - /// To be removed after 0.15.0 is tagged - pub const AsmLegacy = struct { - items_start: ExtraIndex, - items_end: ExtraIndex, - /// Needed to make lastToken() work. - rparen: TokenIndex, - }; - pub const Asm = struct { items_start: ExtraIndex, items_end: ExtraIndex, diff --git a/lib/std/zig/Ast/Render.zig b/lib/std/zig/Ast/Render.zig index d3bdc87ce9..fb58d7e32f 100644 --- a/lib/std/zig/Ast/Render.zig +++ b/lib/std/zig/Ast/Render.zig @@ -896,9 +896,6 @@ fn renderExpression(r: *Render, node: Ast.Node.Index, space: Space) Error!void { .@"asm", => return renderAsm(r, tree.fullAsm(node).?, space), - // To be removed after 0.15.0 is tagged - .asm_legacy => return renderAsmLegacy(r, tree.legacyAsm(node).?, space), - .enum_literal => { try renderToken(r, tree.nodeMainToken(node) - 1, .none); // . return renderIdentifier(r, tree.nodeMainToken(node), space, .eagerly_unquote); // name @@ -2413,185 +2410,6 @@ fn renderContainerDecl( return renderToken(r, rbrace, space); // rbrace } -fn renderAsmLegacy( - r: *Render, - asm_node: Ast.full.AsmLegacy, - space: Space, -) Error!void { - const tree = r.tree; - const ais = r.ais; - - try renderToken(r, asm_node.ast.asm_token, .space); // asm - - if (asm_node.volatile_token) |volatile_token| { - try renderToken(r, volatile_token, .space); // volatile - try renderToken(r, volatile_token + 1, .none); // lparen - } else { - try renderToken(r, asm_node.ast.asm_token + 1, .none); // lparen - } - - if (asm_node.ast.items.len == 0) { - try ais.forcePushIndent(.normal); - if (asm_node.first_clobber) |first_clobber| { - // asm ("foo" ::: "a", "b") - // asm ("foo" ::: "a", "b",) - try renderExpression(r, asm_node.ast.template, .space); - // Render the three colons. - try renderToken(r, first_clobber - 3, .none); - try renderToken(r, first_clobber - 2, .none); - try renderToken(r, first_clobber - 1, .space); - - try ais.writeAll(".{ "); - - var tok_i = first_clobber; - while (true) : (tok_i += 1) { - try ais.writeByte('.'); - _ = try writeStringLiteralAsIdentifier(r, tok_i); - try ais.writeAll(" = true"); - - tok_i += 1; - switch (tree.tokenTag(tok_i)) { - .r_paren => { - try ais.writeAll(" }"); - ais.popIndent(); - return renderToken(r, tok_i, space); - }, - .comma => { - if (tree.tokenTag(tok_i + 1) == .r_paren) { - try ais.writeAll(" }"); - ais.popIndent(); - return renderToken(r, tok_i + 1, space); - } else { - try renderToken(r, tok_i, .space); - } - }, - else => unreachable, - } - } - } else { - unreachable; - } - } - - try ais.forcePushIndent(.normal); - try renderExpression(r, asm_node.ast.template, .newline); - ais.setIndentDelta(asm_indent_delta); - const colon1 = tree.lastToken(asm_node.ast.template) + 1; - - const colon2 = if (asm_node.outputs.len == 0) colon2: { - try renderToken(r, colon1, .newline); // : - break :colon2 colon1 + 1; - } else colon2: { - try renderToken(r, colon1, .space); // : - - try ais.forcePushIndent(.normal); - for (asm_node.outputs, 0..) |asm_output, i| { - if (i + 1 < asm_node.outputs.len) { - const next_asm_output = asm_node.outputs[i + 1]; - try renderAsmOutput(r, asm_output, .none); - - const comma = tree.firstToken(next_asm_output) - 1; - try renderToken(r, comma, .newline); // , - try renderExtraNewlineToken(r, tree.firstToken(next_asm_output)); - } else if (asm_node.inputs.len == 0 and asm_node.first_clobber == null) { - try ais.pushSpace(.comma); - try renderAsmOutput(r, asm_output, .comma); - ais.popSpace(); - ais.popIndent(); - ais.setIndentDelta(indent_delta); - ais.popIndent(); - return renderToken(r, asm_node.ast.rparen, space); // rparen - } else { - try ais.pushSpace(.comma); - try renderAsmOutput(r, asm_output, .comma); - ais.popSpace(); - const comma_or_colon = tree.lastToken(asm_output) + 1; - ais.popIndent(); - break :colon2 switch (tree.tokenTag(comma_or_colon)) { - .comma => comma_or_colon + 1, - else => comma_or_colon, - }; - } - } else unreachable; - }; - - const colon3 = if (asm_node.inputs.len == 0) colon3: { - try renderToken(r, colon2, .newline); // : - break :colon3 colon2 + 1; - } else colon3: { - try renderToken(r, colon2, .space); // : - try ais.forcePushIndent(.normal); - for (asm_node.inputs, 0..) |asm_input, i| { - if (i + 1 < asm_node.inputs.len) { - const next_asm_input = asm_node.inputs[i + 1]; - try renderAsmInput(r, asm_input, .none); - - const first_token = tree.firstToken(next_asm_input); - try renderToken(r, first_token - 1, .newline); // , - try renderExtraNewlineToken(r, first_token); - } else if (asm_node.first_clobber == null) { - try ais.pushSpace(.comma); - try renderAsmInput(r, asm_input, .comma); - ais.popSpace(); - ais.popIndent(); - ais.setIndentDelta(indent_delta); - ais.popIndent(); - return renderToken(r, asm_node.ast.rparen, space); // rparen - } else { - try ais.pushSpace(.comma); - try renderAsmInput(r, asm_input, .comma); - ais.popSpace(); - const comma_or_colon = tree.lastToken(asm_input) + 1; - ais.popIndent(); - break :colon3 switch (tree.tokenTag(comma_or_colon)) { - .comma => comma_or_colon + 1, - else => comma_or_colon, - }; - } - } - unreachable; - }; - - try renderToken(r, colon3, .space); // : - try ais.writeAll(".{ "); - const first_clobber = asm_node.first_clobber.?; - var tok_i = first_clobber; - while (true) { - switch (tree.tokenTag(tok_i + 1)) { - .r_paren => { - ais.setIndentDelta(indent_delta); - try ais.writeByte('.'); - const lexeme_len = try writeStringLiteralAsIdentifier(r, tok_i); - try ais.writeAll(" = true }"); - try renderSpace(r, tok_i, lexeme_len, .newline); - ais.popIndent(); - return renderToken(r, tok_i + 1, space); - }, - .comma => { - switch (tree.tokenTag(tok_i + 2)) { - .r_paren => { - ais.setIndentDelta(indent_delta); - try ais.writeByte('.'); - const lexeme_len = try writeStringLiteralAsIdentifier(r, tok_i); - try ais.writeAll(" = true }"); - try renderSpace(r, tok_i, lexeme_len, .newline); - ais.popIndent(); - return renderToken(r, tok_i + 2, space); - }, - else => { - try ais.writeByte('.'); - _ = try writeStringLiteralAsIdentifier(r, tok_i); - try ais.writeAll(" = true"); - try renderToken(r, tok_i + 1, .space); - tok_i += 2; - }, - } - }, - else => unreachable, - } - } -} - fn renderAsm( r: *Render, asm_node: Ast.full.Asm, diff --git a/lib/std/zig/AstGen.zig b/lib/std/zig/AstGen.zig index 9d2429ae22..2a6423a60d 100644 --- a/lib/std/zig/AstGen.zig +++ b/lib/std/zig/AstGen.zig @@ -507,7 +507,6 @@ fn lvalExpr(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Ins .bool_or, .@"asm", .asm_simple, - .asm_legacy, .string_literal, .number_literal, .call, @@ -814,12 +813,6 @@ fn expr(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) InnerE .@"asm", => return asmExpr(gz, scope, ri, node, tree.fullAsm(node).?), - .asm_legacy => { - return astgen.failNodeNotes(node, "legacy asm clobbers syntax", .{}, &[_]u32{ - try astgen.errNoteNode(node, "use 'zig fmt' to auto-upgrade", .{}), - }); - }, - .string_literal => return stringLiteral(gz, ri, node), .multiline_string_literal => return multilineStringLiteral(gz, ri, node), @@ -10502,7 +10495,6 @@ fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) BuiltinFn.Ev .@"asm", .asm_simple, - .asm_legacy, .identifier, .field_access, .deref, @@ -10746,7 +10738,6 @@ fn nodeImpliesMoreThanOnePossibleValue(tree: *const Ast, start_node: Ast.Node.In .tagged_union_enum_tag_trailing, .@"asm", .asm_simple, - .asm_legacy, .add, .add_wrap, .add_sat, @@ -10985,7 +10976,6 @@ fn nodeImpliesComptimeOnly(tree: *const Ast, start_node: Ast.Node.Index) bool { .tagged_union_enum_tag_trailing, .@"asm", .asm_simple, - .asm_legacy, .add, .add_wrap, .add_sat, @@ -13010,9 +13000,9 @@ const GenZir = struct { } const small: Zir.Inst.Asm.Small = .{ + .is_volatile = args.is_volatile, .outputs_len = @intCast(args.outputs.len), .inputs_len = @intCast(args.inputs.len), - .is_volatile = args.is_volatile, }; const new_index: Zir.Inst.Index = @enumFromInt(astgen.instructions.len); diff --git a/lib/std/zig/AstRlAnnotate.zig b/lib/std/zig/AstRlAnnotate.zig index c7e9abf2bb..e1a83fb75a 100644 --- a/lib/std/zig/AstRlAnnotate.zig +++ b/lib/std/zig/AstRlAnnotate.zig @@ -310,7 +310,6 @@ fn expr(astrl: *AstRlAnnotate, node: Ast.Node.Index, block: ?*Block, ri: ResultI .unreachable_literal, .asm_simple, .@"asm", - .asm_legacy, .enum_literal, .error_value, .anyframe_literal, diff --git a/lib/std/zig/Parse.zig b/lib/std/zig/Parse.zig index b230a834a4..7fe55d8426 100644 --- a/lib/std/zig/Parse.zig +++ b/lib/std/zig/Parse.zig @@ -2857,32 +2857,6 @@ fn expectAsmExpr(p: *Parse) !Node.Index { _ = p.eatToken(.colon) orelse break :clobbers .none; - // For automatic upgrades; delete after 0.15.0 released. - if (p.tokenTag(p.tok_i) == .string_literal) { - while (p.eatToken(.string_literal)) |_| { - switch (p.tokenTag(p.tok_i)) { - .comma => p.tok_i += 1, - .colon, .r_paren, .r_brace, .r_bracket => break, - // Likely just a missing comma; give error but continue parsing. - else => try p.warnExpected(.comma), - } - } - const rparen = try p.expectToken(.r_paren); - const span = try p.listToSpan(p.scratch.items[scratch_top..]); - return p.addNode(.{ - .tag = .asm_legacy, - .main_token = asm_token, - .data = .{ .node_and_extra = .{ - template, - try p.addExtra(Node.AsmLegacy{ - .items_start = span.start, - .items_end = span.end, - .rparen = rparen, - }), - } }, - }); - } - break :clobbers (try p.expectExpr()).toOptional(); } else .none; diff --git a/lib/std/zig/WindowsSdk.zig b/lib/std/zig/WindowsSdk.zig index c5bb1b668e..89d608633c 100644 --- a/lib/std/zig/WindowsSdk.zig +++ b/lib/std/zig/WindowsSdk.zig @@ -250,13 +250,15 @@ const RegistryWtf16Le = struct { /// After finishing work, call `closeKey`. fn openKey(hkey: windows.HKEY, key_wtf16le: [:0]const u16, options: OpenOptions) error{KeyNotFound}!RegistryWtf16Le { var key: windows.HKEY = undefined; - var access: windows.REGSAM = windows.KEY_QUERY_VALUE | windows.KEY_ENUMERATE_SUB_KEYS; - if (options.wow64_32) access |= windows.KEY_WOW64_32KEY; const return_code_int: windows.HRESULT = windows.advapi32.RegOpenKeyExW( hkey, key_wtf16le, 0, - access, + .{ .SPECIFIC = .{ .KEY = .{ + .QUERY_VALUE = true, + .ENUMERATE_SUB_KEYS = true, + .WOW64_32KEY = options.wow64_32, + } } }, &key, ); const return_code: windows.Win32Error = @enumFromInt(return_code_int); @@ -389,7 +391,10 @@ const RegistryWtf16Le = struct { const return_code_int: windows.HRESULT = std.os.windows.advapi32.RegLoadAppKeyW( absolute_path_as_wtf16le, &key, - windows.KEY_QUERY_VALUE | windows.KEY_ENUMERATE_SUB_KEYS, + .{ .SPECIFIC = .{ .KEY = .{ + .QUERY_VALUE = true, + .ENUMERATE_SUB_KEYS = true, + } } }, 0, 0, ); diff --git a/lib/std/zig/ZonGen.zig b/lib/std/zig/ZonGen.zig index ecadd737f2..4d4cfcad9f 100644 --- a/lib/std/zig/ZonGen.zig +++ b/lib/std/zig/ZonGen.zig @@ -238,7 +238,7 @@ fn expr(zg: *ZonGen, node: Ast.Node.Index, dest_node: Zoir.Node.Index) Allocator => try zg.addErrorNode(node, "control flow is not allowed in ZON", .{}), .@"comptime" => try zg.addErrorNode(node, "keyword 'comptime' is not allowed in ZON", .{}), - .asm_simple, .@"asm", .asm_legacy => try zg.addErrorNode(node, "inline asm is not allowed in ZON", .{}), + .asm_simple, .@"asm" => try zg.addErrorNode(node, "inline asm is not allowed in ZON", .{}), .builtin_call_two, .builtin_call_two_comma, diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig index 84320e5dbc..3ce2603947 100644 --- a/lib/std/zig/parser_test.zig +++ b/lib/std/zig/parser_test.zig @@ -31,54 +31,16 @@ test "zig fmt: tuple struct" { } test "zig fmt: preserves clobbers in inline asm with stray comma" { - try testTransform( - \\fn foo() void { - \\ asm volatile ("" - \\ : [_] "" (-> type), - \\ : - \\ : "clobber" - \\ ); - \\ asm volatile ("" - \\ : - \\ : [_] "" (type), - \\ : "clobber" - \\ ); - \\} - \\ - , + try testCanonical( \\fn foo() void { \\ asm volatile ("" \\ : [_] "" (-> type), \\ : - \\ : .{ .clobber = true } - \\ ); + \\ : .{ .clobber = true }); \\ asm volatile ("" \\ : \\ : [_] "" (type), - \\ : .{ .clobber = true } - \\ ); - \\} - \\ - ); -} - -test "zig fmt: remove trailing comma at the end of assembly clobber" { - try testTransform( - \\fn foo() void { - \\ asm volatile ("" - \\ : [_] "" (-> type), - \\ : - \\ : "clobber1", "clobber2", - \\ ); - \\} - \\ - , - \\fn foo() void { - \\ asm volatile ("" - \\ : [_] "" (-> type), - \\ : - \\ : .{ .clobber1 = true, .clobber2 = true } - \\ ); + \\ : .{ .clobber = true }); \\} \\ ); @@ -641,27 +603,7 @@ test "zig fmt: builtin call with trailing comma" { } test "zig fmt: asm expression with comptime content" { - try testTransform( - \\comptime { - \\ asm ("foo" ++ "bar"); - \\} - \\pub fn main() void { - \\ asm volatile ("foo" ++ "bar"); - \\ asm volatile ("foo" ++ "bar" - \\ : [_] "" (x), - \\ ); - \\ asm volatile ("foo" ++ "bar" - \\ : [_] "" (x), - \\ : [_] "" (y), - \\ ); - \\ asm volatile ("foo" ++ "bar" - \\ : [_] "" (x), - \\ : [_] "" (y), - \\ : "h", "e", "l", "l", "o" - \\ ); - \\} - \\ - , + try testCanonical( \\comptime { \\ asm ("foo" ++ "bar"); \\} @@ -677,8 +619,7 @@ test "zig fmt: asm expression with comptime content" { \\ asm volatile ("foo" ++ "bar" \\ : [_] "" (x), \\ : [_] "" (y), - \\ : .{ .h = true, .e = true, .l = true, .l = true, .o = true } - \\ ); + \\ : .{ .h = true, .e = true, .l = true, .l = true, .o = true }); \\} \\ ); @@ -2198,7 +2139,7 @@ test "zig fmt: simple asm" { \\ asm ("not real assembly" \\ :[a] "x" (->i32),:[a] "x" (1),); \\ asm ("still not real assembly" - \\ :::"a","b",); + \\ :::.{.a=true,.b=true}); \\} , \\comptime { @@ -3940,24 +3881,13 @@ test "zig fmt: fn type" { } test "zig fmt: inline asm" { - try testTransform( - \\pub fn syscall1(number: usize, arg1: usize) usize { - \\ return asm volatile ("syscall" - \\ : [ret] "={rax}" (-> usize), - \\ : [number] "{rax}" (number), - \\ [arg1] "{rdi}" (arg1), - \\ : "rcx", "r11" - \\ ); - \\} - \\ - , + try testCanonical( \\pub fn syscall1(number: usize, arg1: usize) usize { \\ return asm volatile ("syscall" \\ : [ret] "={rax}" (-> usize), \\ : [number] "{rax}" (number), \\ [arg1] "{rdi}" (arg1), - \\ : .{ .rcx = true, .r11 = true } - \\ ); + \\ : .{ .rcx = true, .r11 = true }); \\} \\ ); @@ -5789,8 +5719,7 @@ test "zig fmt: canonicalize symbols (asm)" { \\ [@"arg1"] "{rdi}" (arg), \\ [arg2] "{rsi}" (arg), \\ [arg3] "{rdx}" (arg), - \\ : "rcx", "fn" - \\ ); + \\ : .{ .rcx = true, .@"fn" = true }); \\ \\ const @"false": usize = 10; \\ const @"true" = "explode"; @@ -5811,8 +5740,7 @@ test "zig fmt: canonicalize symbols (asm)" { \\ [arg1] "{rdi}" (arg), \\ [arg2] "{rsi}" (arg), \\ [arg3] "{rdx}" (arg), - \\ : .{ .rcx = true, .@"fn" = true } - \\ ); + \\ : .{ .rcx = true, .@"fn" = true }); \\ \\ const @"false": usize = 10; \\ const @"true" = "explode"; diff --git a/lib/std/zig/target.zig b/lib/std/zig/target.zig index d1d39948c4..ddd100c404 100644 --- a/lib/std/zig/target.zig +++ b/lib/std/zig/target.zig @@ -71,7 +71,6 @@ pub const available_libcs = [_]ArchOsAbi{ .{ .arch = .mips64el, .os = .linux, .abi = .gnuabin32, .os_ver = .{ .major = 2, .minor = 6, .patch = 0 }, .glibc_triple = "mips64el-linux-gnu-n32" }, .{ .arch = .mips64el, .os = .linux, .abi = .muslabi64, .os_ver = .{ .major = 2, .minor = 3, .patch = 48 } }, .{ .arch = .mips64el, .os = .linux, .abi = .muslabin32, .os_ver = .{ .major = 2, .minor = 6, .patch = 0 } }, - .{ .arch = .powerpc, .os = .freebsd, .abi = .eabihf, .os_ver = .{ .major = 7, .minor = 1, .patch = 0 } }, .{ .arch = .powerpc, .os = .linux, .abi = .gnueabi, .os_ver = .{ .major = 1, .minor = 3, .patch = 45 }, .glibc_triple = "powerpc-linux-gnu-soft" }, .{ .arch = .powerpc, .os = .linux, .abi = .gnueabihf, .os_ver = .{ .major = 1, .minor = 3, .patch = 45 }, .glibc_triple = "powerpc-linux-gnu" }, .{ .arch = .powerpc, .os = .linux, .abi = .musleabi, .os_ver = .{ .major = 1, .minor = 3, .patch = 45 } }, @@ -112,12 +111,6 @@ pub const available_libcs = [_]ArchOsAbi{ .{ .arch = .x86_64, .os = .windows, .abi = .gnu }, }; -/// The version of Zig's bundled FreeBSD libc used when linking libc statically. -pub const freebsd_libc_version: std.SemanticVersion = .{ .major = 14, .minor = 0, .patch = 0 }; - -/// The version of Zig's bundled NetBSD libc used when linking libc statically. -pub const netbsd_libc_version: std.SemanticVersion = .{ .major = 10, .minor = 1, .patch = 0 }; - pub fn canBuildLibC(target: *const std.Target) bool { for (available_libcs) |libc| { if (target.cpu.arch == libc.arch and target.os.tag == libc.os and target.abi == libc.abi) { |
