aboutsummaryrefslogtreecommitdiff
path: root/lib/std/http
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2025-08-14 20:34:44 -0700
committerAndrew Kelley <andrew@ziglang.org>2025-08-15 10:44:35 -0700
commit30b41dc51015c1ed8fa4a7c4f2c61e2a6206ff55 (patch)
treeab442ad8ac96dc82a6b03ff94e099f705689abc7 /lib/std/http
parent6d7c6a0f4e4f77e10462c3d8becf4e51fe172ccf (diff)
downloadzig-30b41dc51015c1ed8fa4a7c4f2c61e2a6206ff55.tar.gz
zig-30b41dc51015c1ed8fa4a7c4f2c61e2a6206ff55.zip
std.compress.zstd.Decompress fixes
* std.Io.Reader: appendRemaining no longer supports alignment and has different rules about how exceeding limit. Fixed bug where it would return success instead of error.StreamTooLong like it was supposed to. * std.Io.Reader: simplify appendRemaining and appendRemainingUnlimited to be implemented based on std.Io.Writer.Allocating * std.Io.Writer: introduce unreachableRebase * std.Io.Writer: remove minimum_unused_capacity from Allocating. maybe that flexibility could have been handy, but let's see if anyone actually needs it. The field is redundant with the superlinear growth of ArrayList capacity. * std.Io.Writer: growingRebase also ensures total capacity on the preserve parameter, making it no longer necessary to do ensureTotalCapacity at the usage site of decompression streams. * std.compress.flate.Decompress: fix rebase not taking into account seek * std.compress.zstd.Decompress: split into "direct" and "indirect" usage patterns depending on whether a buffer is provided to init, matching how flate works. Remove some overzealous asserts that prevented buffer expansion from within rebase implementation. * std.zig: fix readSourceFileToAlloc returning an overaligned slice which was difficult to free correctly. fixes #24608
Diffstat (limited to 'lib/std/http')
-rw-r--r--lib/std/http/test.zig11
1 files changed, 4 insertions, 7 deletions
diff --git a/lib/std/http/test.zig b/lib/std/http/test.zig
index 20c7383650..91a57dbbe2 100644
--- a/lib/std/http/test.zig
+++ b/lib/std/http/test.zig
@@ -149,9 +149,8 @@ test "HTTP server handles a chunked transfer coding request" {
"content-type: text/plain\r\n" ++
"\r\n" ++
"message from server!\n";
- var tiny_buffer: [1]u8 = undefined; // allows allocRemaining to detect limit exceeded
- var stream_reader = stream.reader(&tiny_buffer);
- const response = try stream_reader.interface().allocRemaining(gpa, .limited(expected_response.len));
+ var stream_reader = stream.reader(&.{});
+ const response = try stream_reader.interface().allocRemaining(gpa, .limited(expected_response.len + 1));
defer gpa.free(response);
try expectEqualStrings(expected_response, response);
}
@@ -293,8 +292,7 @@ test "Server.Request.respondStreaming non-chunked, unknown content-length" {
var stream_writer = stream.writer(&.{});
try stream_writer.interface.writeAll(request_bytes);
- var tiny_buffer: [1]u8 = undefined; // allows allocRemaining to detect limit exceeded
- var stream_reader = stream.reader(&tiny_buffer);
+ var stream_reader = stream.reader(&.{});
const response = try stream_reader.interface().allocRemaining(gpa, .unlimited);
defer gpa.free(response);
@@ -364,8 +362,7 @@ test "receiving arbitrary http headers from the client" {
var stream_writer = stream.writer(&.{});
try stream_writer.interface.writeAll(request_bytes);
- var tiny_buffer: [1]u8 = undefined; // allows allocRemaining to detect limit exceeded
- var stream_reader = stream.reader(&tiny_buffer);
+ var stream_reader = stream.reader(&.{});
const response = try stream_reader.interface().allocRemaining(gpa, .unlimited);
defer gpa.free(response);