aboutsummaryrefslogtreecommitdiff
path: root/lib/std
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2025-10-08 19:14:01 -0700
committerAndrew Kelley <andrew@ziglang.org>2025-10-08 19:30:36 -0700
commitd83d79c032cf72d26a20068c9f5172b8dba56df8 (patch)
tree8e5418be95c2c2a40015d3eb71841c7d6ac4380e /lib/std
parentb9f8b6ef0649f4bbbd9e24c51ec65a91a8bc5e67 (diff)
downloadzig-d83d79c032cf72d26a20068c9f5172b8dba56df8.tar.gz
zig-d83d79c032cf72d26a20068c9f5172b8dba56df8.zip
std.Io.Reader: rework peekDelimiterInclusive
Now it's based on calling fillMore rather than an illegal aliased stream into the Reader buffer. This commit also includes a disambiguation block inspired by #25162. If `StreamTooLong` was added to `RebaseError` then this logic could be replaced by removing the exit condition from the while loop. That error code would represent when `buffer` capacity is too small for an operation, replacing the current use of asserts.
Diffstat (limited to 'lib/std')
-rw-r--r--lib/std/Io/Reader.zig47
1 files changed, 28 insertions, 19 deletions
diff --git a/lib/std/Io/Reader.zig b/lib/std/Io/Reader.zig
index 28926d7d31..e2344e959e 100644
--- a/lib/std/Io/Reader.zig
+++ b/lib/std/Io/Reader.zig
@@ -792,28 +792,37 @@ pub fn takeDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
/// * `peekDelimiterExclusive`
/// * `takeDelimiterInclusive`
pub fn peekDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
- const buffer = r.buffer[0..r.end];
- const seek = r.seek;
- if (std.mem.indexOfScalarPos(u8, buffer, seek, delimiter)) |delimiter_index| {
- @branchHint(.likely);
- return buffer[seek .. delimiter_index + 1];
+ {
+ const contents = r.buffer[0..r.end];
+ const seek = r.seek;
+ if (std.mem.findScalarPos(u8, contents, seek, delimiter)) |end| {
+ @branchHint(.likely);
+ return contents[seek .. end + 1];
+ }
}
- // TODO take a parameter for max search length rather than relying on buffer capacity
- try rebase(r, r.buffer.len);
- while (r.buffer.len - r.end != 0) {
- const existing_buffered_len = r.end - r.seek;
- const end_cap = r.buffer[r.end..];
- var writer: Writer = .fixed(end_cap);
- const n = r.vtable.stream(r, &writer, .limited(end_cap.len)) catch |err| switch (err) {
- error.WriteFailed => unreachable,
- else => |e| return e,
- };
- r.end += n;
- if (std.mem.indexOfScalarPos(u8, r.buffer[0..r.end], r.seek + existing_buffered_len, delimiter)) |delimiter_index| {
- return r.buffer[r.seek .. delimiter_index + 1];
+ while (true) {
+ const content_len = r.end - r.seek;
+ if (r.buffer.len - content_len == 0) break;
+ try fillMore(r);
+ const seek = r.seek;
+ const contents = r.buffer[0..r.end];
+ if (std.mem.findScalarPos(u8, contents, seek + content_len, delimiter)) |end| {
+ return contents[seek .. end + 1];
}
}
- return error.StreamTooLong;
+ // It might or might not be end of stream. There is no more buffer space
+ // left to disambiguate. If `StreamTooLong` was added to `RebaseError` then
+ // this logic could be replaced by removing the exit condition from the
+ // above while loop. That error code would represent when `buffer` capacity
+ // is too small for an operation, replacing the current use of asserts.
+ var failing_writer = Writer.failing;
+ while (r.vtable.stream(r, &failing_writer, .limited(1))) |n| {
+ assert(n == 0);
+ } else |err| switch (err) {
+ error.WriteFailed => return error.StreamTooLong,
+ error.ReadFailed => |e| return e,
+ error.EndOfStream => |e| return e,
+ }
}
/// Returns a slice of the next bytes of buffered data from the stream until