diff options
Diffstat (limited to 'lib/std/Io/Reader.zig')
| -rw-r--r-- | lib/std/Io/Reader.zig | 140 |
1 files changed, 70 insertions, 70 deletions
diff --git a/lib/std/Io/Reader.zig b/lib/std/Io/Reader.zig index f25e113522..da9e01dd2c 100644 --- a/lib/std/Io/Reader.zig +++ b/lib/std/Io/Reader.zig @@ -67,6 +67,18 @@ pub const VTable = struct { /// /// This function is only called when `buffer` is empty. discard: *const fn (r: *Reader, limit: Limit) Error!usize = defaultDiscard, + + /// Ensures `capacity` more data can be buffered without rebasing. + /// + /// Asserts `capacity` is within buffer capacity, or that the stream ends + /// within `capacity` bytes. + /// + /// Only called when `capacity` cannot fit into the unused capacity of + /// `buffer`. + /// + /// The default implementation moves buffered data to the start of + /// `buffer`, setting `seek` to zero, and cannot fail. + rebase: *const fn (r: *Reader, capacity: usize) RebaseError!void = defaultRebase, }; pub const StreamError = error{ @@ -97,6 +109,10 @@ pub const ShortError = error{ ReadFailed, }; +pub const RebaseError = error{ + EndOfStream, +}; + pub const failing: Reader = .{ .vtable = &.{ .stream = failingStream, @@ -122,6 +138,7 @@ pub fn fixed(buffer: []const u8) Reader { .vtable = &.{ .stream = endingStream, .discard = endingDiscard, + .rebase = endingRebase, }, // This cast is safe because all potential writes to it will instead // return `error.EndOfStream`. @@ -179,6 +196,38 @@ pub fn streamExact(r: *Reader, w: *Writer, n: usize) StreamError!void { while (remaining != 0) remaining -= try r.stream(w, .limited(remaining)); } +/// "Pump" exactly `n` bytes from the reader to the writer. +pub fn streamExact64(r: *Reader, w: *Writer, n: u64) StreamError!void { + var remaining = n; + while (remaining != 0) remaining -= try r.stream(w, .limited64(remaining)); +} + +/// "Pump" exactly `n` bytes from the reader to the writer. +/// +/// When draining `w`, ensures that at least `preserve_len` bytes remain +/// buffered. +/// +/// Asserts `Writer.buffer` capacity exceeds `preserve_len`. +pub fn streamExactPreserve(r: *Reader, w: *Writer, preserve_len: usize, n: usize) StreamError!void { + if (w.end + n <= w.buffer.len) { + @branchHint(.likely); + return streamExact(r, w, n); + } + // If `n` is large, we can ignore `preserve_len` up to a point. + var remaining = n; + while (remaining > preserve_len) { + assert(remaining != 0); + remaining -= try r.stream(w, .limited(remaining - preserve_len)); + if (w.end + remaining <= w.buffer.len) return streamExact(r, w, remaining); + } + // All the next bytes received must be preserved. + if (preserve_len < w.end) { + @memmove(w.buffer[0..preserve_len], w.buffer[w.end - preserve_len ..][0..preserve_len]); + w.end = preserve_len; + } + return streamExact(r, w, remaining); +} + /// "Pump" data from the reader to the writer, handling `error.EndOfStream` as /// a success case. /// @@ -234,7 +283,7 @@ pub fn allocRemaining(r: *Reader, gpa: Allocator, limit: Limit) LimitedAllocErro /// such case, the next byte that would be read will be the first one to exceed /// `limit`, and all preceeding bytes have been appended to `list`. /// -/// Asserts `buffer` has nonzero capacity. +/// If `limit` is not `Limit.unlimited`, asserts `buffer` has nonzero capacity. /// /// See also: /// * `allocRemaining` @@ -245,7 +294,7 @@ pub fn appendRemaining( list: *std.ArrayListAlignedUnmanaged(u8, alignment), limit: Limit, ) LimitedAllocError!void { - assert(r.buffer.len != 0); // Needed to detect limit exceeded without losing data. + if (limit != .unlimited) assert(r.buffer.len != 0); // Needed to detect limit exceeded without losing data. const buffer_contents = r.buffer[r.seek..r.end]; const copy_len = limit.minInt(buffer_contents.len); try list.appendSlice(gpa, r.buffer[0..copy_len]); @@ -748,11 +797,8 @@ pub fn peekDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 { @branchHint(.likely); return buffer[seek .. end + 1]; } - if (r.vtable.stream == &endingStream) { - // Protect the `@constCast` of `fixed`. - return error.EndOfStream; - } - r.rebase(); + // TODO take a parameter for max search length rather than relying on buffer capacity + try rebase(r, r.buffer.len); while (r.buffer.len - r.end != 0) { const end_cap = r.buffer[r.end..]; var writer: Writer = .fixed(end_cap); @@ -1018,11 +1064,7 @@ fn fillUnbuffered(r: *Reader, n: usize) Error!void { }; if (r.seek + n <= r.end) return; }; - if (r.vtable.stream == &endingStream) { - // Protect the `@constCast` of `fixed`. - return error.EndOfStream; - } - rebaseCapacity(r, n); + try rebase(r, n); var writer: Writer = .{ .buffer = r.buffer, .vtable = &.{ .drain = Writer.fixedDrain }, @@ -1042,7 +1084,7 @@ fn fillUnbuffered(r: *Reader, n: usize) Error!void { /// /// Asserts buffer capacity is at least 1. pub fn fillMore(r: *Reader) Error!void { - rebaseCapacity(r, 1); + try rebase(r, 1); var writer: Writer = .{ .buffer = r.buffer, .end = r.end, @@ -1219,7 +1261,7 @@ pub fn takeLeb128(r: *Reader, comptime Result: type) TakeLeb128Error!Result { pub fn expandTotalCapacity(r: *Reader, allocator: Allocator, n: usize) Allocator.Error!void { if (n <= r.buffer.len) return; - if (r.seek > 0) rebase(r); + if (r.seek > 0) rebase(r, r.buffer.len); var list: ArrayList(u8) = .{ .items = r.buffer[0..r.end], .capacity = r.buffer.len, @@ -1235,37 +1277,6 @@ pub fn fillAlloc(r: *Reader, allocator: Allocator, n: usize) FillAllocError!void return fill(r, n); } -/// Returns a slice into the unused capacity of `buffer` with at least -/// `min_len` bytes, extending `buffer` by resizing it with `gpa` as necessary. -/// -/// After calling this function, typically the caller will follow up with a -/// call to `advanceBufferEnd` to report the actual number of bytes buffered. -pub fn writableSliceGreedyAlloc(r: *Reader, allocator: Allocator, min_len: usize) Allocator.Error![]u8 { - { - const unused = r.buffer[r.end..]; - if (unused.len >= min_len) return unused; - } - if (r.seek > 0) rebase(r); - { - var list: ArrayList(u8) = .{ - .items = r.buffer[0..r.end], - .capacity = r.buffer.len, - }; - defer r.buffer = list.allocatedSlice(); - try list.ensureUnusedCapacity(allocator, min_len); - } - const unused = r.buffer[r.end..]; - assert(unused.len >= min_len); - return unused; -} - -/// After writing directly into the unused capacity of `buffer`, this function -/// updates `end` so that users of `Reader` can receive the data. -pub fn advanceBufferEnd(r: *Reader, n: usize) void { - assert(n <= r.buffer.len - r.end); - r.end += n; -} - fn takeMultipleOf7Leb128(r: *Reader, comptime Result: type) TakeLeb128Error!Result { const result_info = @typeInfo(Result).int; comptime assert(result_info.bits % 7 == 0); @@ -1296,37 +1307,20 @@ fn takeMultipleOf7Leb128(r: *Reader, comptime Result: type) TakeLeb128Error!Resu } } -/// Left-aligns data such that `r.seek` becomes zero. -/// -/// If `r.seek` is not already zero then `buffer` is mutated, making it illegal -/// to call this function with a const-casted `buffer`, such as in the case of -/// `fixed`. This issue can be avoided: -/// * in implementations, by attempting a read before a rebase, in which -/// case the read will return `error.EndOfStream`, preventing the rebase. -/// * in usage, by copying into a mutable buffer before initializing `fixed`. -pub fn rebase(r: *Reader) void { - if (r.seek == 0) return; +/// Ensures `capacity` more data can be buffered without rebasing. +pub fn rebase(r: *Reader, capacity: usize) RebaseError!void { + if (r.end + capacity <= r.buffer.len) return; + return r.vtable.rebase(r, capacity); +} + +pub fn defaultRebase(r: *Reader, capacity: usize) RebaseError!void { + if (r.end <= r.buffer.len - capacity) return; const data = r.buffer[r.seek..r.end]; @memmove(r.buffer[0..data.len], data); r.seek = 0; r.end = data.len; } -/// Ensures `capacity` more data can be buffered without rebasing, by rebasing -/// if necessary. -/// -/// Asserts `capacity` is within the buffer capacity. -/// -/// If the rebase occurs then `buffer` is mutated, making it illegal to call -/// this function with a const-casted `buffer`, such as in the case of `fixed`. -/// This issue can be avoided: -/// * in implementations, by attempting a read before a rebase, in which -/// case the read will return `error.EndOfStream`, preventing the rebase. -/// * in usage, by copying into a mutable buffer before initializing `fixed`. -pub fn rebaseCapacity(r: *Reader, capacity: usize) void { - if (r.end > r.buffer.len - capacity) rebase(r); -} - /// Advances the stream and decreases the size of the storage buffer by `n`, /// returning the range of bytes no longer accessible by `r`. /// @@ -1682,6 +1676,12 @@ fn endingDiscard(r: *Reader, limit: Limit) Error!usize { return error.EndOfStream; } +fn endingRebase(r: *Reader, capacity: usize) RebaseError!void { + _ = r; + _ = capacity; + return error.EndOfStream; +} + fn failingStream(r: *Reader, w: *Writer, limit: Limit) StreamError!usize { _ = r; _ = w; |
