diff options
| author | Rue <78876133+IOKG04@users.noreply.github.com> | 2025-07-28 14:54:52 +0200 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2025-07-28 14:54:52 +0200 |
| commit | 5381e7891dcdd7b6a9e74250cdcce221fe464cdc (patch) | |
| tree | 4c74744ed84120dccae6dc9811ce945911108a17 /lib/std/Io | |
| parent | 84ae54fbe64a15301317716e7f901d81585332d5 (diff) | |
| parent | dea3ed7f59347e87a1b8fa237202873988084ae8 (diff) | |
| download | zig-5381e7891dcdd7b6a9e74250cdcce221fe464cdc.tar.gz zig-5381e7891dcdd7b6a9e74250cdcce221fe464cdc.zip | |
Merge branch 'ziglang:master' into some-documentation-updates-0
Diffstat (limited to 'lib/std/Io')
| -rw-r--r-- | lib/std/Io/DeprecatedReader.zig | 8 | ||||
| -rw-r--r-- | lib/std/Io/DeprecatedWriter.zig | 7 | ||||
| -rw-r--r-- | lib/std/Io/Reader.zig | 140 | ||||
| -rw-r--r-- | lib/std/Io/Writer.zig | 113 | ||||
| -rw-r--r-- | lib/std/Io/buffered_atomic_file.zig | 55 | ||||
| -rw-r--r-- | lib/std/Io/c_writer.zig | 44 | ||||
| -rw-r--r-- | lib/std/Io/change_detection_stream.zig | 55 | ||||
| -rw-r--r-- | lib/std/Io/find_byte_writer.zig | 40 | ||||
| -rw-r--r-- | lib/std/Io/multi_writer.zig | 53 | ||||
| -rw-r--r-- | lib/std/Io/stream_source.zig | 127 |
10 files changed, 166 insertions, 476 deletions
diff --git a/lib/std/Io/DeprecatedReader.zig b/lib/std/Io/DeprecatedReader.zig index f6cb9f61d5..af1eda8415 100644 --- a/lib/std/Io/DeprecatedReader.zig +++ b/lib/std/Io/DeprecatedReader.zig @@ -373,11 +373,11 @@ pub fn discard(self: Self) anyerror!u64 { } /// Helper for bridging to the new `Reader` API while upgrading. -pub fn adaptToNewApi(self: *const Self) Adapter { +pub fn adaptToNewApi(self: *const Self, buffer: []u8) Adapter { return .{ .derp_reader = self.*, .new_interface = .{ - .buffer = &.{}, + .buffer = buffer, .vtable = &.{ .stream = Adapter.stream }, .seek = 0, .end = 0, @@ -393,10 +393,12 @@ pub const Adapter = struct { fn stream(r: *std.io.Reader, w: *std.io.Writer, limit: std.io.Limit) std.io.Reader.StreamError!usize { const a: *@This() = @alignCast(@fieldParentPtr("new_interface", r)); const buf = limit.slice(try w.writableSliceGreedy(1)); - return a.derp_reader.read(buf) catch |err| { + const n = a.derp_reader.read(buf) catch |err| { a.err = err; return error.ReadFailed; }; + w.advance(n); + return n; } }; diff --git a/lib/std/Io/DeprecatedWriter.zig b/lib/std/Io/DeprecatedWriter.zig index 391b985357..81774b357c 100644 --- a/lib/std/Io/DeprecatedWriter.zig +++ b/lib/std/Io/DeprecatedWriter.zig @@ -100,7 +100,12 @@ pub const Adapter = struct { fn drain(w: *std.io.Writer, data: []const []const u8, splat: usize) std.io.Writer.Error!usize { _ = splat; - const a: *@This() = @fieldParentPtr("new_interface", w); + const a: *@This() = @alignCast(@fieldParentPtr("new_interface", w)); + const buffered = w.buffered(); + if (buffered.len != 0) return w.consume(a.derp_writer.write(buffered) catch |err| { + a.err = err; + return error.WriteFailed; + }); return a.derp_writer.write(data[0]) catch |err| { a.err = err; return error.WriteFailed; diff --git a/lib/std/Io/Reader.zig b/lib/std/Io/Reader.zig index f25e113522..da9e01dd2c 100644 --- a/lib/std/Io/Reader.zig +++ b/lib/std/Io/Reader.zig @@ -67,6 +67,18 @@ pub const VTable = struct { /// /// This function is only called when `buffer` is empty. discard: *const fn (r: *Reader, limit: Limit) Error!usize = defaultDiscard, + + /// Ensures `capacity` more data can be buffered without rebasing. + /// + /// Asserts `capacity` is within buffer capacity, or that the stream ends + /// within `capacity` bytes. + /// + /// Only called when `capacity` cannot fit into the unused capacity of + /// `buffer`. + /// + /// The default implementation moves buffered data to the start of + /// `buffer`, setting `seek` to zero, and cannot fail. + rebase: *const fn (r: *Reader, capacity: usize) RebaseError!void = defaultRebase, }; pub const StreamError = error{ @@ -97,6 +109,10 @@ pub const ShortError = error{ ReadFailed, }; +pub const RebaseError = error{ + EndOfStream, +}; + pub const failing: Reader = .{ .vtable = &.{ .stream = failingStream, @@ -122,6 +138,7 @@ pub fn fixed(buffer: []const u8) Reader { .vtable = &.{ .stream = endingStream, .discard = endingDiscard, + .rebase = endingRebase, }, // This cast is safe because all potential writes to it will instead // return `error.EndOfStream`. @@ -179,6 +196,38 @@ pub fn streamExact(r: *Reader, w: *Writer, n: usize) StreamError!void { while (remaining != 0) remaining -= try r.stream(w, .limited(remaining)); } +/// "Pump" exactly `n` bytes from the reader to the writer. +pub fn streamExact64(r: *Reader, w: *Writer, n: u64) StreamError!void { + var remaining = n; + while (remaining != 0) remaining -= try r.stream(w, .limited64(remaining)); +} + +/// "Pump" exactly `n` bytes from the reader to the writer. +/// +/// When draining `w`, ensures that at least `preserve_len` bytes remain +/// buffered. +/// +/// Asserts `Writer.buffer` capacity exceeds `preserve_len`. +pub fn streamExactPreserve(r: *Reader, w: *Writer, preserve_len: usize, n: usize) StreamError!void { + if (w.end + n <= w.buffer.len) { + @branchHint(.likely); + return streamExact(r, w, n); + } + // If `n` is large, we can ignore `preserve_len` up to a point. + var remaining = n; + while (remaining > preserve_len) { + assert(remaining != 0); + remaining -= try r.stream(w, .limited(remaining - preserve_len)); + if (w.end + remaining <= w.buffer.len) return streamExact(r, w, remaining); + } + // All the next bytes received must be preserved. + if (preserve_len < w.end) { + @memmove(w.buffer[0..preserve_len], w.buffer[w.end - preserve_len ..][0..preserve_len]); + w.end = preserve_len; + } + return streamExact(r, w, remaining); +} + /// "Pump" data from the reader to the writer, handling `error.EndOfStream` as /// a success case. /// @@ -234,7 +283,7 @@ pub fn allocRemaining(r: *Reader, gpa: Allocator, limit: Limit) LimitedAllocErro /// such case, the next byte that would be read will be the first one to exceed /// `limit`, and all preceeding bytes have been appended to `list`. /// -/// Asserts `buffer` has nonzero capacity. +/// If `limit` is not `Limit.unlimited`, asserts `buffer` has nonzero capacity. /// /// See also: /// * `allocRemaining` @@ -245,7 +294,7 @@ pub fn appendRemaining( list: *std.ArrayListAlignedUnmanaged(u8, alignment), limit: Limit, ) LimitedAllocError!void { - assert(r.buffer.len != 0); // Needed to detect limit exceeded without losing data. + if (limit != .unlimited) assert(r.buffer.len != 0); // Needed to detect limit exceeded without losing data. const buffer_contents = r.buffer[r.seek..r.end]; const copy_len = limit.minInt(buffer_contents.len); try list.appendSlice(gpa, r.buffer[0..copy_len]); @@ -748,11 +797,8 @@ pub fn peekDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 { @branchHint(.likely); return buffer[seek .. end + 1]; } - if (r.vtable.stream == &endingStream) { - // Protect the `@constCast` of `fixed`. - return error.EndOfStream; - } - r.rebase(); + // TODO take a parameter for max search length rather than relying on buffer capacity + try rebase(r, r.buffer.len); while (r.buffer.len - r.end != 0) { const end_cap = r.buffer[r.end..]; var writer: Writer = .fixed(end_cap); @@ -1018,11 +1064,7 @@ fn fillUnbuffered(r: *Reader, n: usize) Error!void { }; if (r.seek + n <= r.end) return; }; - if (r.vtable.stream == &endingStream) { - // Protect the `@constCast` of `fixed`. - return error.EndOfStream; - } - rebaseCapacity(r, n); + try rebase(r, n); var writer: Writer = .{ .buffer = r.buffer, .vtable = &.{ .drain = Writer.fixedDrain }, @@ -1042,7 +1084,7 @@ fn fillUnbuffered(r: *Reader, n: usize) Error!void { /// /// Asserts buffer capacity is at least 1. pub fn fillMore(r: *Reader) Error!void { - rebaseCapacity(r, 1); + try rebase(r, 1); var writer: Writer = .{ .buffer = r.buffer, .end = r.end, @@ -1219,7 +1261,7 @@ pub fn takeLeb128(r: *Reader, comptime Result: type) TakeLeb128Error!Result { pub fn expandTotalCapacity(r: *Reader, allocator: Allocator, n: usize) Allocator.Error!void { if (n <= r.buffer.len) return; - if (r.seek > 0) rebase(r); + if (r.seek > 0) rebase(r, r.buffer.len); var list: ArrayList(u8) = .{ .items = r.buffer[0..r.end], .capacity = r.buffer.len, @@ -1235,37 +1277,6 @@ pub fn fillAlloc(r: *Reader, allocator: Allocator, n: usize) FillAllocError!void return fill(r, n); } -/// Returns a slice into the unused capacity of `buffer` with at least -/// `min_len` bytes, extending `buffer` by resizing it with `gpa` as necessary. -/// -/// After calling this function, typically the caller will follow up with a -/// call to `advanceBufferEnd` to report the actual number of bytes buffered. -pub fn writableSliceGreedyAlloc(r: *Reader, allocator: Allocator, min_len: usize) Allocator.Error![]u8 { - { - const unused = r.buffer[r.end..]; - if (unused.len >= min_len) return unused; - } - if (r.seek > 0) rebase(r); - { - var list: ArrayList(u8) = .{ - .items = r.buffer[0..r.end], - .capacity = r.buffer.len, - }; - defer r.buffer = list.allocatedSlice(); - try list.ensureUnusedCapacity(allocator, min_len); - } - const unused = r.buffer[r.end..]; - assert(unused.len >= min_len); - return unused; -} - -/// After writing directly into the unused capacity of `buffer`, this function -/// updates `end` so that users of `Reader` can receive the data. -pub fn advanceBufferEnd(r: *Reader, n: usize) void { - assert(n <= r.buffer.len - r.end); - r.end += n; -} - fn takeMultipleOf7Leb128(r: *Reader, comptime Result: type) TakeLeb128Error!Result { const result_info = @typeInfo(Result).int; comptime assert(result_info.bits % 7 == 0); @@ -1296,37 +1307,20 @@ fn takeMultipleOf7Leb128(r: *Reader, comptime Result: type) TakeLeb128Error!Resu } } -/// Left-aligns data such that `r.seek` becomes zero. -/// -/// If `r.seek` is not already zero then `buffer` is mutated, making it illegal -/// to call this function with a const-casted `buffer`, such as in the case of -/// `fixed`. This issue can be avoided: -/// * in implementations, by attempting a read before a rebase, in which -/// case the read will return `error.EndOfStream`, preventing the rebase. -/// * in usage, by copying into a mutable buffer before initializing `fixed`. -pub fn rebase(r: *Reader) void { - if (r.seek == 0) return; +/// Ensures `capacity` more data can be buffered without rebasing. +pub fn rebase(r: *Reader, capacity: usize) RebaseError!void { + if (r.end + capacity <= r.buffer.len) return; + return r.vtable.rebase(r, capacity); +} + +pub fn defaultRebase(r: *Reader, capacity: usize) RebaseError!void { + if (r.end <= r.buffer.len - capacity) return; const data = r.buffer[r.seek..r.end]; @memmove(r.buffer[0..data.len], data); r.seek = 0; r.end = data.len; } -/// Ensures `capacity` more data can be buffered without rebasing, by rebasing -/// if necessary. -/// -/// Asserts `capacity` is within the buffer capacity. -/// -/// If the rebase occurs then `buffer` is mutated, making it illegal to call -/// this function with a const-casted `buffer`, such as in the case of `fixed`. -/// This issue can be avoided: -/// * in implementations, by attempting a read before a rebase, in which -/// case the read will return `error.EndOfStream`, preventing the rebase. -/// * in usage, by copying into a mutable buffer before initializing `fixed`. -pub fn rebaseCapacity(r: *Reader, capacity: usize) void { - if (r.end > r.buffer.len - capacity) rebase(r); -} - /// Advances the stream and decreases the size of the storage buffer by `n`, /// returning the range of bytes no longer accessible by `r`. /// @@ -1682,6 +1676,12 @@ fn endingDiscard(r: *Reader, limit: Limit) Error!usize { return error.EndOfStream; } +fn endingRebase(r: *Reader, capacity: usize) RebaseError!void { + _ = r; + _ = capacity; + return error.EndOfStream; +} + fn failingStream(r: *Reader, w: *Writer, limit: Limit) StreamError!usize { _ = r; _ = w; diff --git a/lib/std/Io/Writer.zig b/lib/std/Io/Writer.zig index 0723073592..06a6534071 100644 --- a/lib/std/Io/Writer.zig +++ b/lib/std/Io/Writer.zig @@ -256,10 +256,10 @@ test "fixed buffer flush" { try testing.expectEqual(10, buffer[0]); } -/// Calls `VTable.drain` but hides the last `preserve_length` bytes from the +/// Calls `VTable.drain` but hides the last `preserve_len` bytes from the /// implementation, keeping them buffered. -pub fn drainPreserve(w: *Writer, preserve_length: usize) Error!void { - const temp_end = w.end -| preserve_length; +pub fn drainPreserve(w: *Writer, preserve_len: usize) Error!void { + const temp_end = w.end -| preserve_len; const preserved = w.buffer[temp_end..w.end]; w.end = temp_end; defer w.end += preserved.len; @@ -310,24 +310,38 @@ pub fn writableSliceGreedy(w: *Writer, minimum_length: usize) Error![]u8 { } /// Asserts the provided buffer has total capacity enough for `minimum_length` -/// and `preserve_length` combined. +/// and `preserve_len` combined. /// /// Does not `advance` the buffer end position. /// -/// When draining the buffer, ensures that at least `preserve_length` bytes +/// When draining the buffer, ensures that at least `preserve_len` bytes /// remain buffered. /// -/// If `preserve_length` is zero, this is equivalent to `writableSliceGreedy`. -pub fn writableSliceGreedyPreserve(w: *Writer, preserve_length: usize, minimum_length: usize) Error![]u8 { - assert(w.buffer.len >= preserve_length + minimum_length); +/// If `preserve_len` is zero, this is equivalent to `writableSliceGreedy`. +pub fn writableSliceGreedyPreserve(w: *Writer, preserve_len: usize, minimum_length: usize) Error![]u8 { + assert(w.buffer.len >= preserve_len + minimum_length); while (w.buffer.len - w.end < minimum_length) { - try drainPreserve(w, preserve_length); + try drainPreserve(w, preserve_len); } else { @branchHint(.likely); return w.buffer[w.end..]; } } +/// Asserts the provided buffer has total capacity enough for `len`. +/// +/// Advances the buffer end position by `len`. +/// +/// When draining the buffer, ensures that at least `preserve_len` bytes +/// remain buffered. +/// +/// If `preserve_len` is zero, this is equivalent to `writableSlice`. +pub fn writableSlicePreserve(w: *Writer, preserve_len: usize, len: usize) Error![]u8 { + const big_slice = try w.writableSliceGreedyPreserve(preserve_len, len); + advance(w, len); + return big_slice[0..len]; +} + pub const WritableVectorIterator = struct { first: []u8, middle: []const []u8 = &.{}, @@ -523,16 +537,16 @@ pub fn write(w: *Writer, bytes: []const u8) Error!usize { return w.vtable.drain(w, &.{bytes}, 1); } -/// Asserts `buffer` capacity exceeds `preserve_length`. -pub fn writePreserve(w: *Writer, preserve_length: usize, bytes: []const u8) Error!usize { - assert(preserve_length <= w.buffer.len); +/// Asserts `buffer` capacity exceeds `preserve_len`. +pub fn writePreserve(w: *Writer, preserve_len: usize, bytes: []const u8) Error!usize { + assert(preserve_len <= w.buffer.len); if (w.end + bytes.len <= w.buffer.len) { @branchHint(.likely); @memcpy(w.buffer[w.end..][0..bytes.len], bytes); w.end += bytes.len; return bytes.len; } - const temp_end = w.end -| preserve_length; + const temp_end = w.end -| preserve_len; const preserved = w.buffer[temp_end..w.end]; w.end = temp_end; defer w.end += preserved.len; @@ -552,13 +566,13 @@ pub fn writeAll(w: *Writer, bytes: []const u8) Error!void { /// Calls `drain` as many times as necessary such that all of `bytes` are /// transferred. /// -/// When draining the buffer, ensures that at least `preserve_length` bytes +/// When draining the buffer, ensures that at least `preserve_len` bytes /// remain buffered. /// -/// Asserts `buffer` capacity exceeds `preserve_length`. -pub fn writeAllPreserve(w: *Writer, preserve_length: usize, bytes: []const u8) Error!void { +/// Asserts `buffer` capacity exceeds `preserve_len`. +pub fn writeAllPreserve(w: *Writer, preserve_len: usize, bytes: []const u8) Error!void { var index: usize = 0; - while (index < bytes.len) index += try w.writePreserve(preserve_length, bytes[index..]); + while (index < bytes.len) index += try w.writePreserve(preserve_len, bytes[index..]); } /// Renders fmt string with args, calling `writer` with slices of bytes. @@ -761,11 +775,11 @@ pub fn writeByte(w: *Writer, byte: u8) Error!void { } } -/// When draining the buffer, ensures that at least `preserve_length` bytes +/// When draining the buffer, ensures that at least `preserve_len` bytes /// remain buffered. -pub fn writeBytePreserve(w: *Writer, preserve_length: usize, byte: u8) Error!void { +pub fn writeBytePreserve(w: *Writer, preserve_len: usize, byte: u8) Error!void { while (w.buffer.len - w.end == 0) { - try drainPreserve(w, preserve_length); + try drainPreserve(w, preserve_len); } else { @branchHint(.likely); w.buffer[w.end] = byte; @@ -788,10 +802,42 @@ test splatByteAll { try testing.expectEqualStrings("7" ** 45, aw.writer.buffered()); } +pub fn splatBytePreserve(w: *Writer, preserve_len: usize, byte: u8, n: usize) Error!void { + const new_end = w.end + n; + if (new_end <= w.buffer.len) { + @memset(w.buffer[w.end..][0..n], byte); + w.end = new_end; + return; + } + // If `n` is large, we can ignore `preserve_len` up to a point. + var remaining = n; + while (remaining > preserve_len) { + assert(remaining != 0); + remaining -= try splatByte(w, byte, remaining - preserve_len); + if (w.end + remaining <= w.buffer.len) { + @memset(w.buffer[w.end..][0..remaining], byte); + w.end += remaining; + return; + } + } + // All the next bytes received must be preserved. + if (preserve_len < w.end) { + @memmove(w.buffer[0..preserve_len], w.buffer[w.end - preserve_len ..][0..preserve_len]); + w.end = preserve_len; + } + while (remaining > 0) remaining -= try w.splatByte(byte, remaining); +} + /// Writes the same byte many times, allowing short writes. /// /// Does maximum of one underlying `VTable.drain`. pub fn splatByte(w: *Writer, byte: u8, n: usize) Error!usize { + if (w.end + n <= w.buffer.len) { + @branchHint(.likely); + @memset(w.buffer[w.end..][0..n], byte); + w.end += n; + return n; + } return writeSplat(w, &.{&.{byte}}, n); } @@ -801,9 +847,10 @@ pub fn splatBytesAll(w: *Writer, bytes: []const u8, splat: usize) Error!void { var remaining_bytes: usize = bytes.len * splat; remaining_bytes -= try w.splatBytes(bytes, splat); while (remaining_bytes > 0) { - const leftover = remaining_bytes % bytes.len; - const buffers: [2][]const u8 = .{ bytes[bytes.len - leftover ..], bytes }; - remaining_bytes -= try w.writeSplat(&buffers, splat); + const leftover_splat = remaining_bytes / bytes.len; + const leftover_bytes = remaining_bytes % bytes.len; + const buffers: [2][]const u8 = .{ bytes[bytes.len - leftover_bytes ..], bytes }; + remaining_bytes -= try w.writeSplat(&buffers, leftover_splat); } } @@ -1564,17 +1611,23 @@ pub fn printFloatHexOptions(w: *Writer, value: anytype, options: std.fmt.Number) } pub fn printFloatHex(w: *Writer, value: anytype, case: std.fmt.Case, opt_precision: ?usize) Error!void { - if (std.math.signbit(value)) try w.writeByte('-'); - if (std.math.isNan(value)) return w.writeAll(switch (case) { + const v = switch (@TypeOf(value)) { + // comptime_float internally is a f128; this preserves precision. + comptime_float => @as(f128, value), + else => value, + }; + + if (std.math.signbit(v)) try w.writeByte('-'); + if (std.math.isNan(v)) return w.writeAll(switch (case) { .lower => "nan", .upper => "NAN", }); - if (std.math.isInf(value)) return w.writeAll(switch (case) { + if (std.math.isInf(v)) return w.writeAll(switch (case) { .lower => "inf", .upper => "INF", }); - const T = @TypeOf(value); + const T = @TypeOf(v); const TU = std.meta.Int(.unsigned, @bitSizeOf(T)); const mantissa_bits = std.math.floatMantissaBits(T); @@ -1584,7 +1637,7 @@ pub fn printFloatHex(w: *Writer, value: anytype, case: std.fmt.Case, opt_precisi const exponent_mask = (1 << exponent_bits) - 1; const exponent_bias = (1 << (exponent_bits - 1)) - 1; - const as_bits: TU = @bitCast(value); + const as_bits: TU = @bitCast(v); var mantissa = as_bits & mantissa_mask; var exponent: i32 = @as(u16, @truncate((as_bits >> mantissa_bits) & exponent_mask)); @@ -2239,6 +2292,10 @@ pub const Discarding = struct { pub fn sendFile(w: *Writer, file_reader: *File.Reader, limit: Limit) FileError!usize { if (File.Handle == void) return error.Unimplemented; + switch (builtin.zig_backend) { + else => {}, + .stage2_aarch64 => return error.Unimplemented, + } const d: *Discarding = @alignCast(@fieldParentPtr("writer", w)); d.count += w.end; w.end = 0; diff --git a/lib/std/Io/buffered_atomic_file.zig b/lib/std/Io/buffered_atomic_file.zig deleted file mode 100644 index 48510bde52..0000000000 --- a/lib/std/Io/buffered_atomic_file.zig +++ /dev/null @@ -1,55 +0,0 @@ -const std = @import("../std.zig"); -const mem = std.mem; -const fs = std.fs; -const File = std.fs.File; - -pub const BufferedAtomicFile = struct { - atomic_file: fs.AtomicFile, - file_writer: File.Writer, - buffered_writer: BufferedWriter, - allocator: mem.Allocator, - - pub const buffer_size = 4096; - pub const BufferedWriter = std.io.BufferedWriter(buffer_size, File.Writer); - pub const Writer = std.io.GenericWriter(*BufferedWriter, BufferedWriter.Error, BufferedWriter.write); - - /// TODO when https://github.com/ziglang/zig/issues/2761 is solved - /// this API will not need an allocator - pub fn create( - allocator: mem.Allocator, - dir: fs.Dir, - dest_path: []const u8, - atomic_file_options: fs.Dir.AtomicFileOptions, - ) !*BufferedAtomicFile { - var self = try allocator.create(BufferedAtomicFile); - self.* = BufferedAtomicFile{ - .atomic_file = undefined, - .file_writer = undefined, - .buffered_writer = undefined, - .allocator = allocator, - }; - errdefer allocator.destroy(self); - - self.atomic_file = try dir.atomicFile(dest_path, atomic_file_options); - errdefer self.atomic_file.deinit(); - - self.file_writer = self.atomic_file.file.deprecatedWriter(); - self.buffered_writer = .{ .unbuffered_writer = self.file_writer }; - return self; - } - - /// always call destroy, even after successful finish() - pub fn destroy(self: *BufferedAtomicFile) void { - self.atomic_file.deinit(); - self.allocator.destroy(self); - } - - pub fn finish(self: *BufferedAtomicFile) !void { - try self.buffered_writer.flush(); - try self.atomic_file.finish(); - } - - pub fn writer(self: *BufferedAtomicFile) Writer { - return .{ .context = &self.buffered_writer }; - } -}; diff --git a/lib/std/Io/c_writer.zig b/lib/std/Io/c_writer.zig deleted file mode 100644 index 30d0cabcf5..0000000000 --- a/lib/std/Io/c_writer.zig +++ /dev/null @@ -1,44 +0,0 @@ -const std = @import("../std.zig"); -const builtin = @import("builtin"); -const io = std.io; -const testing = std.testing; - -pub const CWriter = io.GenericWriter(*std.c.FILE, std.fs.File.WriteError, cWriterWrite); - -pub fn cWriter(c_file: *std.c.FILE) CWriter { - return .{ .context = c_file }; -} - -fn cWriterWrite(c_file: *std.c.FILE, bytes: []const u8) std.fs.File.WriteError!usize { - const amt_written = std.c.fwrite(bytes.ptr, 1, bytes.len, c_file); - if (amt_written >= 0) return amt_written; - switch (@as(std.c.E, @enumFromInt(std.c._errno().*))) { - .SUCCESS => unreachable, - .INVAL => unreachable, - .FAULT => unreachable, - .AGAIN => unreachable, // this is a blocking API - .BADF => unreachable, // always a race condition - .DESTADDRREQ => unreachable, // connect was never called - .DQUOT => return error.DiskQuota, - .FBIG => return error.FileTooBig, - .IO => return error.InputOutput, - .NOSPC => return error.NoSpaceLeft, - .PERM => return error.PermissionDenied, - .PIPE => return error.BrokenPipe, - else => |err| return std.posix.unexpectedErrno(err), - } -} - -test cWriter { - if (!builtin.link_libc or builtin.os.tag == .wasi) return error.SkipZigTest; - - const filename = "tmp_io_test_file.txt"; - const out_file = std.c.fopen(filename, "w") orelse return error.UnableToOpenTestFile; - defer { - _ = std.c.fclose(out_file); - std.fs.cwd().deleteFileZ(filename) catch {}; - } - - const writer = cWriter(out_file); - try writer.print("hi: {}\n", .{@as(i32, 123)}); -} diff --git a/lib/std/Io/change_detection_stream.zig b/lib/std/Io/change_detection_stream.zig deleted file mode 100644 index d9da1c4a0e..0000000000 --- a/lib/std/Io/change_detection_stream.zig +++ /dev/null @@ -1,55 +0,0 @@ -const std = @import("../std.zig"); -const io = std.io; -const mem = std.mem; -const assert = std.debug.assert; - -/// Used to detect if the data written to a stream differs from a source buffer -pub fn ChangeDetectionStream(comptime WriterType: type) type { - return struct { - const Self = @This(); - pub const Error = WriterType.Error; - pub const Writer = io.GenericWriter(*Self, Error, write); - - anything_changed: bool, - underlying_writer: WriterType, - source_index: usize, - source: []const u8, - - pub fn writer(self: *Self) Writer { - return .{ .context = self }; - } - - fn write(self: *Self, bytes: []const u8) Error!usize { - if (!self.anything_changed) { - const end = self.source_index + bytes.len; - if (end > self.source.len) { - self.anything_changed = true; - } else { - const src_slice = self.source[self.source_index..end]; - self.source_index += bytes.len; - if (!mem.eql(u8, bytes, src_slice)) { - self.anything_changed = true; - } - } - } - - return self.underlying_writer.write(bytes); - } - - pub fn changeDetected(self: *Self) bool { - return self.anything_changed or (self.source_index != self.source.len); - } - }; -} - -pub fn changeDetectionStream( - source: []const u8, - underlying_writer: anytype, -) ChangeDetectionStream(@TypeOf(underlying_writer)) { - return ChangeDetectionStream(@TypeOf(underlying_writer)){ - .anything_changed = false, - .underlying_writer = underlying_writer, - .source_index = 0, - .source = source, - }; -} diff --git a/lib/std/Io/find_byte_writer.zig b/lib/std/Io/find_byte_writer.zig deleted file mode 100644 index fe6836f603..0000000000 --- a/lib/std/Io/find_byte_writer.zig +++ /dev/null @@ -1,40 +0,0 @@ -const std = @import("../std.zig"); -const io = std.io; -const assert = std.debug.assert; - -/// A Writer that returns whether the given character has been written to it. -/// The contents are not written to anything. -pub fn FindByteWriter(comptime UnderlyingWriter: type) type { - return struct { - const Self = @This(); - pub const Error = UnderlyingWriter.Error; - pub const Writer = io.GenericWriter(*Self, Error, write); - - underlying_writer: UnderlyingWriter, - byte_found: bool, - byte: u8, - - pub fn writer(self: *Self) Writer { - return .{ .context = self }; - } - - fn write(self: *Self, bytes: []const u8) Error!usize { - if (!self.byte_found) { - self.byte_found = blk: { - for (bytes) |b| - if (b == self.byte) break :blk true; - break :blk false; - }; - } - return self.underlying_writer.write(bytes); - } - }; -} - -pub fn findByteWriter(byte: u8, underlying_writer: anytype) FindByteWriter(@TypeOf(underlying_writer)) { - return FindByteWriter(@TypeOf(underlying_writer)){ - .underlying_writer = underlying_writer, - .byte = byte, - .byte_found = false, - }; -} diff --git a/lib/std/Io/multi_writer.zig b/lib/std/Io/multi_writer.zig deleted file mode 100644 index 20e9e782de..0000000000 --- a/lib/std/Io/multi_writer.zig +++ /dev/null @@ -1,53 +0,0 @@ -const std = @import("../std.zig"); -const io = std.io; - -/// Takes a tuple of streams, and constructs a new stream that writes to all of them -pub fn MultiWriter(comptime Writers: type) type { - comptime var ErrSet = error{}; - inline for (@typeInfo(Writers).@"struct".fields) |field| { - const StreamType = field.type; - ErrSet = ErrSet || StreamType.Error; - } - - return struct { - const Self = @This(); - - streams: Writers, - - pub const Error = ErrSet; - pub const Writer = io.GenericWriter(*Self, Error, write); - - pub fn writer(self: *Self) Writer { - return .{ .context = self }; - } - - pub fn write(self: *Self, bytes: []const u8) Error!usize { - inline for (self.streams) |stream| - try stream.writeAll(bytes); - return bytes.len; - } - }; -} - -pub fn multiWriter(streams: anytype) MultiWriter(@TypeOf(streams)) { - return .{ .streams = streams }; -} - -const testing = std.testing; - -test "MultiWriter" { - var tmp = testing.tmpDir(.{}); - defer tmp.cleanup(); - var f = try tmp.dir.createFile("t.txt", .{}); - - var buf1: [255]u8 = undefined; - var fbs1 = io.fixedBufferStream(&buf1); - var buf2: [255]u8 = undefined; - var stream = multiWriter(.{ fbs1.writer(), f.writer() }); - - try stream.writer().print("HI", .{}); - f.close(); - - try testing.expectEqualSlices(u8, "HI", fbs1.getWritten()); - try testing.expectEqualSlices(u8, "HI", try tmp.dir.readFile("t.txt", &buf2)); -} diff --git a/lib/std/Io/stream_source.zig b/lib/std/Io/stream_source.zig deleted file mode 100644 index 2a3527e479..0000000000 --- a/lib/std/Io/stream_source.zig +++ /dev/null @@ -1,127 +0,0 @@ -const std = @import("../std.zig"); -const builtin = @import("builtin"); -const io = std.io; - -/// Provides `io.GenericReader`, `io.GenericWriter`, and `io.SeekableStream` for in-memory buffers as -/// well as files. -/// For memory sources, if the supplied byte buffer is const, then `io.GenericWriter` is not available. -/// The error set of the stream functions is the error set of the corresponding file functions. -pub const StreamSource = union(enum) { - // TODO: expose UEFI files to std.os in a way that allows this to be true - const has_file = (builtin.os.tag != .freestanding and builtin.os.tag != .uefi); - - /// The stream access is redirected to this buffer. - buffer: io.FixedBufferStream([]u8), - - /// The stream access is redirected to this buffer. - /// Writing to the source will always yield `error.AccessDenied`. - const_buffer: io.FixedBufferStream([]const u8), - - /// The stream access is redirected to this file. - /// On freestanding, this must never be initialized! - file: if (has_file) std.fs.File else void, - - pub const ReadError = io.FixedBufferStream([]u8).ReadError || (if (has_file) std.fs.File.ReadError else error{}); - pub const WriteError = error{AccessDenied} || io.FixedBufferStream([]u8).WriteError || (if (has_file) std.fs.File.WriteError else error{}); - pub const SeekError = io.FixedBufferStream([]u8).SeekError || (if (has_file) std.fs.File.SeekError else error{}); - pub const GetSeekPosError = io.FixedBufferStream([]u8).GetSeekPosError || (if (has_file) std.fs.File.GetSeekPosError else error{}); - - pub const Reader = io.GenericReader(*StreamSource, ReadError, read); - pub const Writer = io.GenericWriter(*StreamSource, WriteError, write); - pub const SeekableStream = io.SeekableStream( - *StreamSource, - SeekError, - GetSeekPosError, - seekTo, - seekBy, - getPos, - getEndPos, - ); - - pub fn read(self: *StreamSource, dest: []u8) ReadError!usize { - switch (self.*) { - .buffer => |*x| return x.read(dest), - .const_buffer => |*x| return x.read(dest), - .file => |x| if (!has_file) unreachable else return x.read(dest), - } - } - - pub fn write(self: *StreamSource, bytes: []const u8) WriteError!usize { - switch (self.*) { - .buffer => |*x| return x.write(bytes), - .const_buffer => return error.AccessDenied, - .file => |x| if (!has_file) unreachable else return x.write(bytes), - } - } - - pub fn seekTo(self: *StreamSource, pos: u64) SeekError!void { - switch (self.*) { - .buffer => |*x| return x.seekTo(pos), - .const_buffer => |*x| return x.seekTo(pos), - .file => |x| if (!has_file) unreachable else return x.seekTo(pos), - } - } - - pub fn seekBy(self: *StreamSource, amt: i64) SeekError!void { - switch (self.*) { - .buffer => |*x| return x.seekBy(amt), - .const_buffer => |*x| return x.seekBy(amt), - .file => |x| if (!has_file) unreachable else return x.seekBy(amt), - } - } - - pub fn getEndPos(self: *StreamSource) GetSeekPosError!u64 { - switch (self.*) { - .buffer => |*x| return x.getEndPos(), - .const_buffer => |*x| return x.getEndPos(), - .file => |x| if (!has_file) unreachable else return x.getEndPos(), - } - } - - pub fn getPos(self: *StreamSource) GetSeekPosError!u64 { - switch (self.*) { - .buffer => |*x| return x.getPos(), - .const_buffer => |*x| return x.getPos(), - .file => |x| if (!has_file) unreachable else return x.getPos(), - } - } - - pub fn reader(self: *StreamSource) Reader { - return .{ .context = self }; - } - - pub fn writer(self: *StreamSource) Writer { - return .{ .context = self }; - } - - pub fn seekableStream(self: *StreamSource) SeekableStream { - return .{ .context = self }; - } -}; - -test "refs" { - std.testing.refAllDecls(StreamSource); -} - -test "mutable buffer" { - var buffer: [64]u8 = undefined; - var source = StreamSource{ .buffer = std.io.fixedBufferStream(&buffer) }; - - var writer = source.writer(); - - try writer.writeAll("Hello, World!"); - - try std.testing.expectEqualStrings("Hello, World!", source.buffer.getWritten()); -} - -test "const buffer" { - const buffer: [64]u8 = "Hello, World!".* ++ ([1]u8{0xAA} ** 51); - var source = StreamSource{ .const_buffer = std.io.fixedBufferStream(&buffer) }; - - var reader = source.reader(); - - var dst_buffer: [13]u8 = undefined; - try reader.readNoEof(&dst_buffer); - - try std.testing.expectEqualStrings("Hello, World!", &dst_buffer); -} |
