aboutsummaryrefslogtreecommitdiff
path: root/lib/std
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2025-08-29 03:48:45 -0700
committerGitHub <noreply@github.com>2025-08-29 03:48:45 -0700
commit4b948e8556b80cbc874415aa7c4bf9ac0027ffed (patch)
treeca48e7208aa23a24db82e8521c37a6c2abcd5dc1 /lib/std
parent640c11171bf8d13776629941f3305cf11c62c1f3 (diff)
parent43fbc37a490442ffcecf9817877f542251fee664 (diff)
downloadzig-4b948e8556b80cbc874415aa7c4bf9ac0027ffed.tar.gz
zig-4b948e8556b80cbc874415aa7c4bf9ac0027ffed.zip
Merge pull request #25036 from ziglang/GenericWriter
std.Io: delete GenericWriter, AnyWriter, and null_writer
Diffstat (limited to 'lib/std')
-rw-r--r--lib/std/Build/Step/CheckObject.zig52
-rw-r--r--lib/std/Io.zig163
-rw-r--r--lib/std/Io/DeprecatedReader.zig98
-rw-r--r--lib/std/Io/DeprecatedWriter.zig114
-rw-r--r--lib/std/Io/Reader.zig53
-rw-r--r--lib/std/Io/Reader/test.zig351
-rw-r--r--lib/std/Io/Writer.zig34
-rw-r--r--lib/std/Io/fixed_buffer_stream.zig69
-rw-r--r--lib/std/Thread.zig2
-rw-r--r--lib/std/array_list.zig129
-rw-r--r--lib/std/base64.zig3
-rw-r--r--lib/std/crypto/aegis.zig12
-rw-r--r--lib/std/crypto/blake2.zig12
-rw-r--r--lib/std/crypto/blake3.zig12
-rw-r--r--lib/std/crypto/codecs/asn1/der/ArrayListReverse.zig17
-rw-r--r--lib/std/crypto/ml_kem.zig92
-rw-r--r--lib/std/crypto/scrypt.zig21
-rw-r--r--lib/std/crypto/sha2.zig12
-rw-r--r--lib/std/crypto/sha3.zig60
-rw-r--r--lib/std/crypto/siphash.zig12
-rw-r--r--lib/std/debug/Dwarf/expression.zig201
-rw-r--r--lib/std/debug/Pdb.zig343
-rw-r--r--lib/std/debug/SelfInfo.zig51
-rw-r--r--lib/std/fs/File.zig8
-rw-r--r--lib/std/json.zig2
-rw-r--r--lib/std/leb128.zig103
-rw-r--r--lib/std/macho.zig2
-rw-r--r--lib/std/posix/test.zig4
-rw-r--r--lib/std/tz.zig89
29 files changed, 537 insertions, 1584 deletions
diff --git a/lib/std/Build/Step/CheckObject.zig b/lib/std/Build/Step/CheckObject.zig
index 6bbe3307a0..1e321de50a 100644
--- a/lib/std/Build/Step/CheckObject.zig
+++ b/lib/std/Build/Step/CheckObject.zig
@@ -257,7 +257,7 @@ const Check = struct {
fn dumpSection(allocator: Allocator, name: [:0]const u8) Check {
var check = Check.create(allocator, .dump_section);
const off: u32 = @intCast(check.data.items.len);
- check.data.writer().print("{s}\x00", .{name}) catch @panic("OOM");
+ check.data.print("{s}\x00", .{name}) catch @panic("OOM");
check.payload = .{ .dump_section = off };
return check;
}
@@ -1320,7 +1320,8 @@ const MachODumper = struct {
}
bindings.deinit();
}
- try ctx.parseBindInfo(data, &bindings);
+ var data_reader: std.Io.Reader = .fixed(data);
+ try ctx.parseBindInfo(&data_reader, &bindings);
mem.sort(Binding, bindings.items, {}, Binding.lessThan);
for (bindings.items) |binding| {
try writer.print("0x{x} [addend: {d}]", .{ binding.address, binding.addend });
@@ -1335,11 +1336,7 @@ const MachODumper = struct {
}
}
- fn parseBindInfo(ctx: ObjectContext, data: []const u8, bindings: *std.array_list.Managed(Binding)) !void {
- var stream = std.io.fixedBufferStream(data);
- var creader = std.io.countingReader(stream.reader());
- const reader = creader.reader();
-
+ fn parseBindInfo(ctx: ObjectContext, reader: *std.Io.Reader, bindings: *std.array_list.Managed(Binding)) !void {
var seg_id: ?u8 = null;
var tag: Binding.Tag = .self;
var ordinal: u16 = 0;
@@ -1350,7 +1347,7 @@ const MachODumper = struct {
defer name_buf.deinit();
while (true) {
- const byte = reader.readByte() catch break;
+ const byte = reader.takeByte() catch break;
const opc = byte & macho.BIND_OPCODE_MASK;
const imm = byte & macho.BIND_IMMEDIATE_MASK;
switch (opc) {
@@ -1371,18 +1368,17 @@ const MachODumper = struct {
},
macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB => {
seg_id = imm;
- offset = try std.leb.readUleb128(u64, reader);
+ offset = try reader.takeLeb128(u64);
},
macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM => {
name_buf.clearRetainingCapacity();
- try reader.readUntilDelimiterArrayList(&name_buf, 0, std.math.maxInt(u32));
- try name_buf.append(0);
+ try name_buf.appendSlice(try reader.takeDelimiterInclusive(0));
},
macho.BIND_OPCODE_SET_ADDEND_SLEB => {
- addend = try std.leb.readIleb128(i64, reader);
+ addend = try reader.takeLeb128(i64);
},
macho.BIND_OPCODE_ADD_ADDR_ULEB => {
- const x = try std.leb.readUleb128(u64, reader);
+ const x = try reader.takeLeb128(u64);
offset = @intCast(@as(i64, @intCast(offset)) + @as(i64, @bitCast(x)));
},
macho.BIND_OPCODE_DO_BIND,
@@ -1397,14 +1393,14 @@ const MachODumper = struct {
switch (opc) {
macho.BIND_OPCODE_DO_BIND => {},
macho.BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB => {
- add_addr = try std.leb.readUleb128(u64, reader);
+ add_addr = try reader.takeLeb128(u64);
},
macho.BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED => {
add_addr = imm * @sizeOf(u64);
},
macho.BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB => {
- count = try std.leb.readUleb128(u64, reader);
- skip = try std.leb.readUleb128(u64, reader);
+ count = try reader.takeLeb128(u64);
+ skip = try reader.takeLeb128(u64);
},
else => unreachable,
}
@@ -1621,8 +1617,9 @@ const MachODumper = struct {
var ctx = ObjectContext{ .gpa = gpa, .data = bytes, .header = hdr };
try ctx.parse();
- var output = std.array_list.Managed(u8).init(gpa);
- const writer = output.writer();
+ var output: std.Io.Writer.Allocating = .init(gpa);
+ defer output.deinit();
+ const writer = &output.writer;
switch (check.kind) {
.headers => {
@@ -1787,8 +1784,9 @@ const ElfDumper = struct {
try ctx.objects.append(gpa, .{ .name = name, .off = stream.pos, .len = size });
}
- var output = std.array_list.Managed(u8).init(gpa);
- const writer = output.writer();
+ var output: std.Io.Writer.Allocating = .init(gpa);
+ defer output.deinit();
+ const writer = &output.writer;
switch (check.kind) {
.archive_symtab => if (ctx.symtab.items.len > 0) {
@@ -1944,8 +1942,9 @@ const ElfDumper = struct {
else => {},
};
- var output = std.array_list.Managed(u8).init(gpa);
- const writer = output.writer();
+ var output: std.Io.Writer.Allocating = .init(gpa);
+ defer output.deinit();
+ const writer = &output.writer;
switch (check.kind) {
.headers => {
@@ -2398,10 +2397,10 @@ const WasmDumper = struct {
return error.UnsupportedWasmVersion;
}
- var output = std.array_list.Managed(u8).init(gpa);
+ var output: std.Io.Writer.Allocating = .init(gpa);
defer output.deinit();
- parseAndDumpInner(step, check, bytes, &fbs, &output) catch |err| switch (err) {
- error.EndOfStream => try output.appendSlice("\n<UnexpectedEndOfStream>"),
+ parseAndDumpInner(step, check, bytes, &fbs, &output.writer) catch |err| switch (err) {
+ error.EndOfStream => try output.writer.writeAll("\n<UnexpectedEndOfStream>"),
else => |e| return e,
};
return output.toOwnedSlice();
@@ -2412,10 +2411,9 @@ const WasmDumper = struct {
check: Check,
bytes: []const u8,
fbs: *std.io.FixedBufferStream([]const u8),
- output: *std.array_list.Managed(u8),
+ writer: *std.Io.Writer,
) !void {
const reader = fbs.reader();
- const writer = output.writer();
switch (check.kind) {
.headers => {
diff --git a/lib/std/Io.zig b/lib/std/Io.zig
index 7cce6397bd..9c91e1159d 100644
--- a/lib/std/Io.zig
+++ b/lib/std/Io.zig
@@ -144,61 +144,6 @@ pub fn GenericReader(
return @errorCast(self.any().readAllAlloc(allocator, max_size));
}
- pub inline fn readUntilDelimiterArrayList(
- self: Self,
- array_list: *std.array_list.Managed(u8),
- delimiter: u8,
- max_size: usize,
- ) (NoEofError || Allocator.Error || error{StreamTooLong})!void {
- return @errorCast(self.any().readUntilDelimiterArrayList(
- array_list,
- delimiter,
- max_size,
- ));
- }
-
- pub inline fn readUntilDelimiterAlloc(
- self: Self,
- allocator: Allocator,
- delimiter: u8,
- max_size: usize,
- ) (NoEofError || Allocator.Error || error{StreamTooLong})![]u8 {
- return @errorCast(self.any().readUntilDelimiterAlloc(
- allocator,
- delimiter,
- max_size,
- ));
- }
-
- pub inline fn readUntilDelimiter(
- self: Self,
- buf: []u8,
- delimiter: u8,
- ) (NoEofError || error{StreamTooLong})![]u8 {
- return @errorCast(self.any().readUntilDelimiter(buf, delimiter));
- }
-
- pub inline fn readUntilDelimiterOrEofAlloc(
- self: Self,
- allocator: Allocator,
- delimiter: u8,
- max_size: usize,
- ) (Error || Allocator.Error || error{StreamTooLong})!?[]u8 {
- return @errorCast(self.any().readUntilDelimiterOrEofAlloc(
- allocator,
- delimiter,
- max_size,
- ));
- }
-
- pub inline fn readUntilDelimiterOrEof(
- self: Self,
- buf: []u8,
- delimiter: u8,
- ) (Error || error{StreamTooLong})!?[]u8 {
- return @errorCast(self.any().readUntilDelimiterOrEof(buf, delimiter));
- }
-
pub inline fn streamUntilDelimiter(
self: Self,
writer: anytype,
@@ -326,103 +271,8 @@ pub fn GenericReader(
};
}
-/// Deprecated in favor of `Writer`.
-pub fn GenericWriter(
- comptime Context: type,
- comptime WriteError: type,
- comptime writeFn: fn (context: Context, bytes: []const u8) WriteError!usize,
-) type {
- return struct {
- context: Context,
-
- const Self = @This();
- pub const Error = WriteError;
-
- pub inline fn write(self: Self, bytes: []const u8) Error!usize {
- return writeFn(self.context, bytes);
- }
-
- pub inline fn writeAll(self: Self, bytes: []const u8) Error!void {
- return @errorCast(self.any().writeAll(bytes));
- }
-
- pub inline fn print(self: Self, comptime format: []const u8, args: anytype) Error!void {
- return @errorCast(self.any().print(format, args));
- }
-
- pub inline fn writeByte(self: Self, byte: u8) Error!void {
- return @errorCast(self.any().writeByte(byte));
- }
-
- pub inline fn writeByteNTimes(self: Self, byte: u8, n: usize) Error!void {
- return @errorCast(self.any().writeByteNTimes(byte, n));
- }
-
- pub inline fn writeBytesNTimes(self: Self, bytes: []const u8, n: usize) Error!void {
- return @errorCast(self.any().writeBytesNTimes(bytes, n));
- }
-
- pub inline fn writeInt(self: Self, comptime T: type, value: T, endian: std.builtin.Endian) Error!void {
- return @errorCast(self.any().writeInt(T, value, endian));
- }
-
- pub inline fn writeStruct(self: Self, value: anytype) Error!void {
- return @errorCast(self.any().writeStruct(value));
- }
-
- pub inline fn writeStructEndian(self: Self, value: anytype, endian: std.builtin.Endian) Error!void {
- return @errorCast(self.any().writeStructEndian(value, endian));
- }
-
- pub inline fn any(self: *const Self) AnyWriter {
- return .{
- .context = @ptrCast(&self.context),
- .writeFn = typeErasedWriteFn,
- };
- }
-
- fn typeErasedWriteFn(context: *const anyopaque, bytes: []const u8) anyerror!usize {
- const ptr: *const Context = @ptrCast(@alignCast(context));
- return writeFn(ptr.*, bytes);
- }
-
- /// Helper for bridging to the new `Writer` API while upgrading.
- pub fn adaptToNewApi(self: *const Self, buffer: []u8) Adapter {
- return .{
- .derp_writer = self.*,
- .new_interface = .{
- .buffer = buffer,
- .vtable = &.{ .drain = Adapter.drain },
- },
- };
- }
-
- pub const Adapter = struct {
- derp_writer: Self,
- new_interface: Writer,
- err: ?Error = null,
-
- fn drain(w: *std.io.Writer, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
- _ = splat;
- const a: *@This() = @alignCast(@fieldParentPtr("new_interface", w));
- const buffered = w.buffered();
- if (buffered.len != 0) return w.consume(a.derp_writer.write(buffered) catch |err| {
- a.err = err;
- return error.WriteFailed;
- });
- return a.derp_writer.write(data[0]) catch |err| {
- a.err = err;
- return error.WriteFailed;
- };
- }
- };
- };
-}
-
/// Deprecated in favor of `Reader`.
pub const AnyReader = @import("Io/DeprecatedReader.zig");
-/// Deprecated in favor of `Writer`.
-pub const AnyWriter = @import("Io/DeprecatedWriter.zig");
/// Deprecated in favor of `Reader`.
pub const FixedBufferStream = @import("Io/fixed_buffer_stream.zig").FixedBufferStream;
/// Deprecated in favor of `Reader`.
@@ -434,19 +284,6 @@ pub const countingReader = @import("Io/counting_reader.zig").countingReader;
pub const tty = @import("Io/tty.zig");
-/// Deprecated in favor of `Writer.Discarding`.
-pub const null_writer: NullWriter = .{ .context = {} };
-/// Deprecated in favor of `Writer.Discarding`.
-pub const NullWriter = GenericWriter(void, error{}, dummyWrite);
-fn dummyWrite(context: void, data: []const u8) error{}!usize {
- _ = context;
- return data.len;
-}
-
-test null_writer {
- null_writer.writeAll("yay" ** 10) catch |err| switch (err) {};
-}
-
pub fn poll(
gpa: Allocator,
comptime StreamEnum: type,
diff --git a/lib/std/Io/DeprecatedReader.zig b/lib/std/Io/DeprecatedReader.zig
index 4dfbdd19ee..0505c6be66 100644
--- a/lib/std/Io/DeprecatedReader.zig
+++ b/lib/std/Io/DeprecatedReader.zig
@@ -93,100 +93,6 @@ pub fn readAllAlloc(self: Self, allocator: mem.Allocator, max_size: usize) anyer
return try array_list.toOwnedSlice();
}
-/// Deprecated: use `streamUntilDelimiter` with ArrayList's writer instead.
-/// Replaces the `std.array_list.Managed` contents by reading from the stream until `delimiter` is found.
-/// Does not include the delimiter in the result.
-/// If the `std.array_list.Managed` length would exceed `max_size`, `error.StreamTooLong` is returned and the
-/// `std.array_list.Managed` is populated with `max_size` bytes from the stream.
-pub fn readUntilDelimiterArrayList(
- self: Self,
- array_list: *std.array_list.Managed(u8),
- delimiter: u8,
- max_size: usize,
-) anyerror!void {
- array_list.shrinkRetainingCapacity(0);
- try self.streamUntilDelimiter(array_list.writer(), delimiter, max_size);
-}
-
-/// Deprecated: use `streamUntilDelimiter` with ArrayList's writer instead.
-/// Allocates enough memory to read until `delimiter`. If the allocated
-/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
-/// Caller owns returned memory.
-/// If this function returns an error, the contents from the stream read so far are lost.
-pub fn readUntilDelimiterAlloc(
- self: Self,
- allocator: mem.Allocator,
- delimiter: u8,
- max_size: usize,
-) anyerror![]u8 {
- var array_list = std.array_list.Managed(u8).init(allocator);
- defer array_list.deinit();
- try self.streamUntilDelimiter(array_list.writer(), delimiter, max_size);
- return try array_list.toOwnedSlice();
-}
-
-/// Deprecated: use `streamUntilDelimiter` with FixedBufferStream's writer instead.
-/// Reads from the stream until specified byte is found. If the buffer is not
-/// large enough to hold the entire contents, `error.StreamTooLong` is returned.
-/// If end-of-stream is found, `error.EndOfStream` is returned.
-/// Returns a slice of the stream data, with ptr equal to `buf.ptr`. The
-/// delimiter byte is written to the output buffer but is not included
-/// in the returned slice.
-pub fn readUntilDelimiter(self: Self, buf: []u8, delimiter: u8) anyerror![]u8 {
- var fbs = std.io.fixedBufferStream(buf);
- try self.streamUntilDelimiter(fbs.writer(), delimiter, fbs.buffer.len);
- const output = fbs.getWritten();
- buf[output.len] = delimiter; // emulating old behaviour
- return output;
-}
-
-/// Deprecated: use `streamUntilDelimiter` with ArrayList's (or any other's) writer instead.
-/// Allocates enough memory to read until `delimiter` or end-of-stream.
-/// If the allocated memory would be greater than `max_size`, returns
-/// `error.StreamTooLong`. If end-of-stream is found, returns the rest
-/// of the stream. If this function is called again after that, returns
-/// null.
-/// Caller owns returned memory.
-/// If this function returns an error, the contents from the stream read so far are lost.
-pub fn readUntilDelimiterOrEofAlloc(
- self: Self,
- allocator: mem.Allocator,
- delimiter: u8,
- max_size: usize,
-) anyerror!?[]u8 {
- var array_list = std.array_list.Managed(u8).init(allocator);
- defer array_list.deinit();
- self.streamUntilDelimiter(array_list.writer(), delimiter, max_size) catch |err| switch (err) {
- error.EndOfStream => if (array_list.items.len == 0) {
- return null;
- },
- else => |e| return e,
- };
- return try array_list.toOwnedSlice();
-}
-
-/// Deprecated: use `streamUntilDelimiter` with FixedBufferStream's writer instead.
-/// Reads from the stream until specified byte is found. If the buffer is not
-/// large enough to hold the entire contents, `error.StreamTooLong` is returned.
-/// If end-of-stream is found, returns the rest of the stream. If this
-/// function is called again after that, returns null.
-/// Returns a slice of the stream data, with ptr equal to `buf.ptr`. The
-/// delimiter byte is written to the output buffer but is not included
-/// in the returned slice.
-pub fn readUntilDelimiterOrEof(self: Self, buf: []u8, delimiter: u8) anyerror!?[]u8 {
- var fbs = std.io.fixedBufferStream(buf);
- self.streamUntilDelimiter(fbs.writer(), delimiter, fbs.buffer.len) catch |err| switch (err) {
- error.EndOfStream => if (fbs.getWritten().len == 0) {
- return null;
- },
-
- else => |e| return e,
- };
- const output = fbs.getWritten();
- buf[output.len] = delimiter; // emulating old behaviour
- return output;
-}
-
/// Appends to the `writer` contents by reading from the stream until `delimiter` is found.
/// Does not write the delimiter itself.
/// If `optional_max_size` is not null and amount of written bytes exceeds `optional_max_size`,
@@ -384,7 +290,3 @@ const mem = std.mem;
const testing = std.testing;
const native_endian = @import("builtin").target.cpu.arch.endian();
const Alignment = std.mem.Alignment;
-
-test {
- _ = @import("Reader/test.zig");
-}
diff --git a/lib/std/Io/DeprecatedWriter.zig b/lib/std/Io/DeprecatedWriter.zig
deleted file mode 100644
index 68b21bde5b..0000000000
--- a/lib/std/Io/DeprecatedWriter.zig
+++ /dev/null
@@ -1,114 +0,0 @@
-const std = @import("../std.zig");
-const assert = std.debug.assert;
-const mem = std.mem;
-const native_endian = @import("builtin").target.cpu.arch.endian();
-
-context: *const anyopaque,
-writeFn: *const fn (context: *const anyopaque, bytes: []const u8) anyerror!usize,
-
-const Self = @This();
-pub const Error = anyerror;
-
-pub fn write(self: Self, bytes: []const u8) anyerror!usize {
- return self.writeFn(self.context, bytes);
-}
-
-pub fn writeAll(self: Self, bytes: []const u8) anyerror!void {
- var index: usize = 0;
- while (index != bytes.len) {
- index += try self.write(bytes[index..]);
- }
-}
-
-pub fn print(self: Self, comptime format: []const u8, args: anytype) anyerror!void {
- return std.fmt.format(self, format, args);
-}
-
-pub fn writeByte(self: Self, byte: u8) anyerror!void {
- const array = [1]u8{byte};
- return self.writeAll(&array);
-}
-
-pub fn writeByteNTimes(self: Self, byte: u8, n: usize) anyerror!void {
- var bytes: [256]u8 = undefined;
- @memset(bytes[0..], byte);
-
- var remaining: usize = n;
- while (remaining > 0) {
- const to_write = @min(remaining, bytes.len);
- try self.writeAll(bytes[0..to_write]);
- remaining -= to_write;
- }
-}
-
-pub fn writeBytesNTimes(self: Self, bytes: []const u8, n: usize) anyerror!void {
- var i: usize = 0;
- while (i < n) : (i += 1) {
- try self.writeAll(bytes);
- }
-}
-
-pub inline fn writeInt(self: Self, comptime T: type, value: T, endian: std.builtin.Endian) anyerror!void {
- var bytes: [@divExact(@typeInfo(T).int.bits, 8)]u8 = undefined;
- mem.writeInt(std.math.ByteAlignedInt(@TypeOf(value)), &bytes, value, endian);
- return self.writeAll(&bytes);
-}
-
-pub fn writeStruct(self: Self, value: anytype) anyerror!void {
- // Only extern and packed structs have defined in-memory layout.
- comptime assert(@typeInfo(@TypeOf(value)).@"struct".layout != .auto);
- return self.writeAll(mem.asBytes(&value));
-}
-
-pub fn writeStructEndian(self: Self, value: anytype, endian: std.builtin.Endian) anyerror!void {
- // TODO: make sure this value is not a reference type
- if (native_endian == endian) {
- return self.writeStruct(value);
- } else {
- var copy = value;
- mem.byteSwapAllFields(@TypeOf(value), &copy);
- return self.writeStruct(copy);
- }
-}
-
-pub fn writeFile(self: Self, file: std.fs.File) anyerror!void {
- // TODO: figure out how to adjust std lib abstractions so that this ends up
- // doing sendfile or maybe even copy_file_range under the right conditions.
- var buf: [4000]u8 = undefined;
- while (true) {
- const n = try file.readAll(&buf);
- try self.writeAll(buf[0..n]);
- if (n < buf.len) return;
- }
-}
-
-/// Helper for bridging to the new `Writer` API while upgrading.
-pub fn adaptToNewApi(self: *const Self, buffer: []u8) Adapter {
- return .{
- .derp_writer = self.*,
- .new_interface = .{
- .buffer = buffer,
- .vtable = &.{ .drain = Adapter.drain },
- },
- };
-}
-
-pub const Adapter = struct {
- derp_writer: Self,
- new_interface: std.io.Writer,
- err: ?Error = null,
-
- fn drain(w: *std.io.Writer, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
- _ = splat;
- const a: *@This() = @alignCast(@fieldParentPtr("new_interface", w));
- const buffered = w.buffered();
- if (buffered.len != 0) return w.consume(a.derp_writer.write(buffered) catch |err| {
- a.err = err;
- return error.WriteFailed;
- });
- return a.derp_writer.write(data[0]) catch |err| {
- a.err = err;
- return error.WriteFailed;
- };
- }
-};
diff --git a/lib/std/Io/Reader.zig b/lib/std/Io/Reader.zig
index f6cb22cb4b..4cad73e8ce 100644
--- a/lib/std/Io/Reader.zig
+++ b/lib/std/Io/Reader.zig
@@ -143,8 +143,8 @@ pub const failing: Reader = .{
/// This is generally safe to `@constCast` because it has an empty buffer, so
/// there is not really a way to accidentally attempt mutation of these fields.
-const ending_state: Reader = .fixed(&.{});
-pub const ending: *Reader = @constCast(&ending_state);
+pub const ending_instance: Reader = .fixed(&.{});
+pub const ending: *Reader = @constCast(&ending_instance);
pub fn limited(r: *Reader, limit: Limit, buffer: []u8) Limited {
return .init(r, limit, buffer);
@@ -784,7 +784,7 @@ pub fn peekDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
}
/// Returns a slice of the next bytes of buffered data from the stream until
-/// `delimiter` is found, advancing the seek position.
+/// `delimiter` is found, advancing the seek position up to the delimiter.
///
/// Returned slice excludes the delimiter. End-of-stream is treated equivalent
/// to a delimiter, unless it would result in a length 0 return value, in which
@@ -815,6 +815,37 @@ pub fn takeDelimiterExclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
}
/// Returns a slice of the next bytes of buffered data from the stream until
+/// `delimiter` is found, advancing the seek position past the delimiter.
+///
+/// Returned slice excludes the delimiter. End-of-stream is treated equivalent
+/// to a delimiter, unless it would result in a length 0 return value, in which
+/// case `null` is returned instead.
+///
+/// If the delimiter is not found within a number of bytes matching the
+/// capacity of this `Reader`, `error.StreamTooLong` is returned. In
+/// such case, the stream state is unmodified as if this function was never
+/// called.
+///
+/// Invalidates previously returned values from `peek`.
+///
+/// See also:
+/// * `takeDelimiterInclusive`
+/// * `takeDelimiterExclusive`
+pub fn takeDelimiter(r: *Reader, delimiter: u8) error{ ReadFailed, StreamTooLong }!?[]u8 {
+ const result = r.peekDelimiterInclusive(delimiter) catch |err| switch (err) {
+ error.EndOfStream => {
+ const remaining = r.buffer[r.seek..r.end];
+ if (remaining.len == 0) return null;
+ r.toss(remaining.len);
+ return remaining;
+ },
+ else => |e| return e,
+ };
+ r.toss(result.len + 1);
+ return result[0 .. result.len - 1];
+}
+
+/// Returns a slice of the next bytes of buffered data from the stream until
/// `delimiter` is found, without advancing the seek position.
///
/// Returned slice excludes the delimiter. End-of-stream is treated equivalent
@@ -846,6 +877,8 @@ pub fn peekDelimiterExclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
/// Appends to `w` contents by reading from the stream until `delimiter` is
/// found. Does not write the delimiter itself.
///
+/// Does not discard the delimiter from the `Reader`.
+///
/// Returns number of bytes streamed, which may be zero, or error.EndOfStream
/// if the delimiter was not found.
///
@@ -899,6 +932,8 @@ pub const StreamDelimiterLimitError = error{
/// Appends to `w` contents by reading from the stream until `delimiter` is found.
/// Does not write the delimiter itself.
///
+/// Does not discard the delimiter from the `Reader`.
+///
/// Returns number of bytes streamed, which may be zero. End of stream can be
/// detected by checking if the next byte in the stream is the delimiter.
///
@@ -1128,7 +1163,11 @@ pub inline fn takeStruct(r: *Reader, comptime T: type, endian: std.builtin.Endia
.@"struct" => |info| switch (info.layout) {
.auto => @compileError("ill-defined memory layout"),
.@"extern" => {
- var res = (try r.takeStructPointer(T)).*;
+ // This code works around https://github.com/ziglang/zig/issues/25067
+ // by avoiding a call to `peekStructPointer`.
+ const struct_bytes = try r.takeArray(@sizeOf(T));
+ var res: T = undefined;
+ @memcpy(@as([]u8, @ptrCast(&res)), struct_bytes);
if (native_endian != endian) std.mem.byteSwapAllFields(T, &res);
return res;
},
@@ -1153,7 +1192,11 @@ pub inline fn peekStruct(r: *Reader, comptime T: type, endian: std.builtin.Endia
.@"struct" => |info| switch (info.layout) {
.auto => @compileError("ill-defined memory layout"),
.@"extern" => {
- var res = (try r.peekStructPointer(T)).*;
+ // This code works around https://github.com/ziglang/zig/issues/25067
+ // by avoiding a call to `peekStructPointer`.
+ const struct_bytes = try r.peekArray(@sizeOf(T));
+ var res: T = undefined;
+ @memcpy(@as([]u8, @ptrCast(&res)), struct_bytes);
if (native_endian != endian) std.mem.byteSwapAllFields(T, &res);
return res;
},
diff --git a/lib/std/Io/Reader/test.zig b/lib/std/Io/Reader/test.zig
deleted file mode 100644
index 90f9539ae8..0000000000
--- a/lib/std/Io/Reader/test.zig
+++ /dev/null
@@ -1,351 +0,0 @@
-const builtin = @import("builtin");
-const std = @import("../../std.zig");
-const testing = std.testing;
-
-test "Reader" {
- var buf = "a\x02".*;
- var fis = std.io.fixedBufferStream(&buf);
- const reader = fis.reader();
- try testing.expect((try reader.readByte()) == 'a');
- try testing.expect((try reader.readEnum(enum(u8) {
- a = 0,
- b = 99,
- c = 2,
- d = 3,
- }, builtin.cpu.arch.endian())) == .c);
- try testing.expectError(error.EndOfStream, reader.readByte());
-}
-
-test "isBytes" {
- var fis = std.io.fixedBufferStream("foobar");
- const reader = fis.reader();
- try testing.expectEqual(true, try reader.isBytes("foo"));
- try testing.expectEqual(false, try reader.isBytes("qux"));
-}
-
-test "skipBytes" {
- var fis = std.io.fixedBufferStream("foobar");
- const reader = fis.reader();
- try reader.skipBytes(3, .{});
- try testing.expect(try reader.isBytes("bar"));
- try reader.skipBytes(0, .{});
- try testing.expectError(error.EndOfStream, reader.skipBytes(1, .{}));
-}
-
-test "readUntilDelimiterArrayList returns ArrayLists with bytes read until the delimiter, then EndOfStream" {
- const a = std.testing.allocator;
- var list = std.array_list.Managed(u8).init(a);
- defer list.deinit();
-
- var fis = std.io.fixedBufferStream("0000\n1234\n");
- const reader = fis.reader();
-
- try reader.readUntilDelimiterArrayList(&list, '\n', 5);
- try std.testing.expectEqualStrings("0000", list.items);
- try reader.readUntilDelimiterArrayList(&list, '\n', 5);
- try std.testing.expectEqualStrings("1234", list.items);
- try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiterArrayList(&list, '\n', 5));
-}
-
-test "readUntilDelimiterArrayList returns an empty ArrayList" {
- const a = std.testing.allocator;
- var list = std.array_list.Managed(u8).init(a);
- defer list.deinit();
-
- var fis = std.io.fixedBufferStream("\n");
- const reader = fis.reader();
-
- try reader.readUntilDelimiterArrayList(&list, '\n', 5);
- try std.testing.expectEqualStrings("", list.items);
-}
-
-test "readUntilDelimiterArrayList returns StreamTooLong, then an ArrayList with bytes read until the delimiter" {
- const a = std.testing.allocator;
- var list = std.array_list.Managed(u8).init(a);
- defer list.deinit();
-
- var fis = std.io.fixedBufferStream("1234567\n");
- const reader = fis.reader();
-
- try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterArrayList(&list, '\n', 5));
- try std.testing.expectEqualStrings("12345", list.items);
- try reader.readUntilDelimiterArrayList(&list, '\n', 5);
- try std.testing.expectEqualStrings("67", list.items);
-}
-
-test "readUntilDelimiterArrayList returns EndOfStream" {
- const a = std.testing.allocator;
- var list = std.array_list.Managed(u8).init(a);
- defer list.deinit();
-
- var fis = std.io.fixedBufferStream("1234");
- const reader = fis.reader();
-
- try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiterArrayList(&list, '\n', 5));
- try std.testing.expectEqualStrings("1234", list.items);
-}
-
-test "readUntilDelimiterAlloc returns ArrayLists with bytes read until the delimiter, then EndOfStream" {
- const a = std.testing.allocator;
-
- var fis = std.io.fixedBufferStream("0000\n1234\n");
- const reader = fis.reader();
-
- {
- const result = try reader.readUntilDelimiterAlloc(a, '\n', 5);
- defer a.free(result);
- try std.testing.expectEqualStrings("0000", result);
- }
-
- {
- const result = try reader.readUntilDelimiterAlloc(a, '\n', 5);
- defer a.free(result);
- try std.testing.expectEqualStrings("1234", result);
- }
-
- try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiterAlloc(a, '\n', 5));
-}
-
-test "readUntilDelimiterAlloc returns an empty ArrayList" {
- const a = std.testing.allocator;
-
- var fis = std.io.fixedBufferStream("\n");
- const reader = fis.reader();
-
- {
- const result = try reader.readUntilDelimiterAlloc(a, '\n', 5);
- defer a.free(result);
- try std.testing.expectEqualStrings("", result);
- }
-}
-
-test "readUntilDelimiterAlloc returns StreamTooLong, then an ArrayList with bytes read until the delimiter" {
- const a = std.testing.allocator;
-
- var fis = std.io.fixedBufferStream("1234567\n");
- const reader = fis.reader();
-
- try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterAlloc(a, '\n', 5));
-
- const result = try reader.readUntilDelimiterAlloc(a, '\n', 5);
- defer a.free(result);
- try std.testing.expectEqualStrings("67", result);
-}
-
-test "readUntilDelimiterAlloc returns EndOfStream" {
- const a = std.testing.allocator;
-
- var fis = std.io.fixedBufferStream("1234");
- const reader = fis.reader();
-
- try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiterAlloc(a, '\n', 5));
-}
-
-test "readUntilDelimiter returns bytes read until the delimiter" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("0000\n1234\n");
- const reader = fis.reader();
- try std.testing.expectEqualStrings("0000", try reader.readUntilDelimiter(&buf, '\n'));
- try std.testing.expectEqualStrings("1234", try reader.readUntilDelimiter(&buf, '\n'));
-}
-
-test "readUntilDelimiter returns an empty string" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("\n");
- const reader = fis.reader();
- try std.testing.expectEqualStrings("", try reader.readUntilDelimiter(&buf, '\n'));
-}
-
-test "readUntilDelimiter returns StreamTooLong, then an empty string" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("12345\n");
- const reader = fis.reader();
- try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiter(&buf, '\n'));
- try std.testing.expectEqualStrings("", try reader.readUntilDelimiter(&buf, '\n'));
-}
-
-test "readUntilDelimiter returns StreamTooLong, then bytes read until the delimiter" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("1234567\n");
- const reader = fis.reader();
- try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiter(&buf, '\n'));
- try std.testing.expectEqualStrings("67", try reader.readUntilDelimiter(&buf, '\n'));
-}
-
-test "readUntilDelimiter returns EndOfStream" {
- {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("");
- const reader = fis.reader();
- try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiter(&buf, '\n'));
- }
- {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("1234");
- const reader = fis.reader();
- try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiter(&buf, '\n'));
- }
-}
-
-test "readUntilDelimiter returns bytes read until delimiter, then EndOfStream" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("1234\n");
- const reader = fis.reader();
- try std.testing.expectEqualStrings("1234", try reader.readUntilDelimiter(&buf, '\n'));
- try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiter(&buf, '\n'));
-}
-
-test "readUntilDelimiter returns StreamTooLong, then EndOfStream" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("12345");
- const reader = fis.reader();
- try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiter(&buf, '\n'));
- try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiter(&buf, '\n'));
-}
-
-test "readUntilDelimiter writes all bytes read to the output buffer" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("0000\n12345");
- const reader = fis.reader();
- _ = try reader.readUntilDelimiter(&buf, '\n');
- try std.testing.expectEqualStrings("0000\n", &buf);
- try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiter(&buf, '\n'));
- try std.testing.expectEqualStrings("12345", &buf);
-}
-
-test "readUntilDelimiterOrEofAlloc returns ArrayLists with bytes read until the delimiter, then EndOfStream" {
- const a = std.testing.allocator;
-
- var fis = std.io.fixedBufferStream("0000\n1234\n");
- const reader = fis.reader();
-
- {
- const result = (try reader.readUntilDelimiterOrEofAlloc(a, '\n', 5)).?;
- defer a.free(result);
- try std.testing.expectEqualStrings("0000", result);
- }
-
- {
- const result = (try reader.readUntilDelimiterOrEofAlloc(a, '\n', 5)).?;
- defer a.free(result);
- try std.testing.expectEqualStrings("1234", result);
- }
-
- try std.testing.expect((try reader.readUntilDelimiterOrEofAlloc(a, '\n', 5)) == null);
-}
-
-test "readUntilDelimiterOrEofAlloc returns an empty ArrayList" {
- const a = std.testing.allocator;
-
- var fis = std.io.fixedBufferStream("\n");
- const reader = fis.reader();
-
- {
- const result = (try reader.readUntilDelimiterOrEofAlloc(a, '\n', 5)).?;
- defer a.free(result);
- try std.testing.expectEqualStrings("", result);
- }
-}
-
-test "readUntilDelimiterOrEofAlloc returns StreamTooLong, then an ArrayList with bytes read until the delimiter" {
- const a = std.testing.allocator;
-
- var fis = std.io.fixedBufferStream("1234567\n");
- const reader = fis.reader();
-
- try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterOrEofAlloc(a, '\n', 5));
-
- const result = (try reader.readUntilDelimiterOrEofAlloc(a, '\n', 5)).?;
- defer a.free(result);
- try std.testing.expectEqualStrings("67", result);
-}
-
-test "readUntilDelimiterOrEof returns bytes read until the delimiter" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("0000\n1234\n");
- const reader = fis.reader();
- try std.testing.expectEqualStrings("0000", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
- try std.testing.expectEqualStrings("1234", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
-}
-
-test "readUntilDelimiterOrEof returns an empty string" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("\n");
- const reader = fis.reader();
- try std.testing.expectEqualStrings("", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
-}
-
-test "readUntilDelimiterOrEof returns StreamTooLong, then an empty string" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("12345\n");
- const reader = fis.reader();
- try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterOrEof(&buf, '\n'));
- try std.testing.expectEqualStrings("", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
-}
-
-test "readUntilDelimiterOrEof returns StreamTooLong, then bytes read until the delimiter" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("1234567\n");
- const reader = fis.reader();
- try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterOrEof(&buf, '\n'));
- try std.testing.expectEqualStrings("67", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
-}
-
-test "readUntilDelimiterOrEof returns null" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("");
- const reader = fis.reader();
- try std.testing.expect((try reader.readUntilDelimiterOrEof(&buf, '\n')) == null);
-}
-
-test "readUntilDelimiterOrEof returns bytes read until delimiter, then null" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("1234\n");
- const reader = fis.reader();
- try std.testing.expectEqualStrings("1234", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
- try std.testing.expect((try reader.readUntilDelimiterOrEof(&buf, '\n')) == null);
-}
-
-test "readUntilDelimiterOrEof returns bytes read until end-of-stream" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("1234");
- const reader = fis.reader();
- try std.testing.expectEqualStrings("1234", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
-}
-
-test "readUntilDelimiterOrEof returns StreamTooLong, then bytes read until end-of-stream" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("1234567");
- const reader = fis.reader();
- try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterOrEof(&buf, '\n'));
- try std.testing.expectEqualStrings("67", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
-}
-
-test "readUntilDelimiterOrEof writes all bytes read to the output buffer" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("0000\n12345");
- const reader = fis.reader();
- _ = try reader.readUntilDelimiterOrEof(&buf, '\n');
- try std.testing.expectEqualStrings("0000\n", &buf);
- try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterOrEof(&buf, '\n'));
- try std.testing.expectEqualStrings("12345", &buf);
-}
-
-test "streamUntilDelimiter writes all bytes without delimiter to the output" {
- const input_string = "some_string_with_delimiter!";
- var input_fbs = std.io.fixedBufferStream(input_string);
- const reader = input_fbs.reader();
-
- var output: [input_string.len]u8 = undefined;
- var output_fbs = std.io.fixedBufferStream(&output);
- const writer = output_fbs.writer();
-
- try reader.streamUntilDelimiter(writer, '!', input_fbs.buffer.len);
- try std.testing.expectEqualStrings("some_string_with_delimiter", output_fbs.getWritten());
- try std.testing.expectError(error.EndOfStream, reader.streamUntilDelimiter(writer, '!', input_fbs.buffer.len));
-
- input_fbs.reset();
- output_fbs.reset();
-
- try std.testing.expectError(error.StreamTooLong, reader.streamUntilDelimiter(writer, '!', 5));
-}
diff --git a/lib/std/Io/Writer.zig b/lib/std/Io/Writer.zig
index 707ed9cb94..384faff62e 100644
--- a/lib/std/Io/Writer.zig
+++ b/lib/std/Io/Writer.zig
@@ -8,6 +8,7 @@ const Limit = std.Io.Limit;
const File = std.fs.File;
const testing = std.testing;
const Allocator = std.mem.Allocator;
+const ArrayList = std.ArrayList;
vtable: *const VTable,
/// If this has length zero, the writer is unbuffered, and `flush` is a no-op.
@@ -2374,6 +2375,29 @@ pub fn unreachableRebase(w: *Writer, preserve: usize, capacity: usize) Error!voi
unreachable;
}
+pub fn fromArrayList(array_list: *ArrayList(u8)) Writer {
+ defer array_list.* = .empty;
+ return .{
+ .vtable = &.{
+ .drain = fixedDrain,
+ .flush = noopFlush,
+ .rebase = failingRebase,
+ },
+ .buffer = array_list.allocatedSlice(),
+ .end = array_list.items.len,
+ };
+}
+
+pub fn toArrayList(w: *Writer) ArrayList(u8) {
+ const result: ArrayList(u8) = .{
+ .items = w.buffer[0..w.end],
+ .capacity = w.buffer.len,
+ };
+ w.buffer = &.{};
+ w.end = 0;
+ return result;
+}
+
/// Provides a `Writer` implementation based on calling `Hasher.update`, sending
/// all data also to an underlying `Writer`.
///
@@ -2546,7 +2570,7 @@ pub const Allocating = struct {
}
/// Replaces `array_list` with empty, taking ownership of the memory.
- pub fn fromArrayList(allocator: Allocator, array_list: *std.ArrayListUnmanaged(u8)) Allocating {
+ pub fn fromArrayList(allocator: Allocator, array_list: *ArrayList(u8)) Allocating {
defer array_list.* = .empty;
return .{
.allocator = allocator,
@@ -2572,9 +2596,9 @@ pub const Allocating = struct {
/// Returns an array list that takes ownership of the allocated memory.
/// Resets the `Allocating` to an empty state.
- pub fn toArrayList(a: *Allocating) std.ArrayListUnmanaged(u8) {
+ pub fn toArrayList(a: *Allocating) ArrayList(u8) {
const w = &a.writer;
- const result: std.ArrayListUnmanaged(u8) = .{
+ const result: ArrayList(u8) = .{
.items = w.buffer[0..w.end],
.capacity = w.buffer.len,
};
@@ -2603,7 +2627,7 @@ pub const Allocating = struct {
pub fn toOwnedSliceSentinel(a: *Allocating, comptime sentinel: u8) error{OutOfMemory}![:sentinel]u8 {
const gpa = a.allocator;
- var list = toArrayList(a);
+ var list = @This().toArrayList(a);
defer a.setArrayList(list);
return list.toOwnedSliceSentinel(gpa, sentinel);
}
@@ -2670,7 +2694,7 @@ pub const Allocating = struct {
list.ensureUnusedCapacity(gpa, minimum_len) catch return error.WriteFailed;
}
- fn setArrayList(a: *Allocating, list: std.ArrayListUnmanaged(u8)) void {
+ fn setArrayList(a: *Allocating, list: ArrayList(u8)) void {
a.writer.buffer = list.allocatedSlice();
a.writer.end = list.items.len;
}
diff --git a/lib/std/Io/fixed_buffer_stream.zig b/lib/std/Io/fixed_buffer_stream.zig
index c284b9baf4..a960f21ce6 100644
--- a/lib/std/Io/fixed_buffer_stream.zig
+++ b/lib/std/Io/fixed_buffer_stream.zig
@@ -17,7 +17,6 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
pub const GetSeekPosError = error{};
pub const Reader = io.GenericReader(*Self, ReadError, read);
- pub const Writer = io.GenericWriter(*Self, WriteError, write);
const Self = @This();
@@ -25,10 +24,6 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
return .{ .context = self };
}
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
-
pub fn read(self: *Self, dest: []u8) ReadError!usize {
const size = @min(dest.len, self.buffer.len - self.pos);
const end = self.pos + size;
@@ -39,23 +34,6 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
return size;
}
- /// If the returned number of bytes written is less than requested, the
- /// buffer is full. Returns `error.NoSpaceLeft` when no bytes would be written.
- /// Note: `error.NoSpaceLeft` matches the corresponding error from
- /// `std.fs.File.WriteError`.
- pub fn write(self: *Self, bytes: []const u8) WriteError!usize {
- if (bytes.len == 0) return 0;
- if (self.pos >= self.buffer.len) return error.NoSpaceLeft;
-
- const n = @min(self.buffer.len - self.pos, bytes.len);
- @memcpy(self.buffer[self.pos..][0..n], bytes[0..n]);
- self.pos += n;
-
- if (n == 0) return error.NoSpaceLeft;
-
- return n;
- }
-
pub fn seekTo(self: *Self, pos: u64) SeekError!void {
self.pos = @min(std.math.lossyCast(usize, pos), self.buffer.len);
}
@@ -84,10 +62,6 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
return self.pos;
}
- pub fn getWritten(self: Self) Buffer {
- return self.buffer[0..self.pos];
- }
-
pub fn reset(self: *Self) void {
self.pos = 0;
}
@@ -117,49 +91,6 @@ fn Slice(comptime T: type) type {
}
}
-test "output" {
- var buf: [255]u8 = undefined;
- var fbs = fixedBufferStream(&buf);
- const stream = fbs.writer();
-
- try stream.print("{s}{s}!", .{ "Hello", "World" });
- try testing.expectEqualSlices(u8, "HelloWorld!", fbs.getWritten());
-}
-
-test "output at comptime" {
- comptime {
- var buf: [255]u8 = undefined;
- var fbs = fixedBufferStream(&buf);
- const stream = fbs.writer();
-
- try stream.print("{s}{s}!", .{ "Hello", "World" });
- try testing.expectEqualSlices(u8, "HelloWorld!", fbs.getWritten());
- }
-}
-
-test "output 2" {
- var buffer: [10]u8 = undefined;
- var fbs = fixedBufferStream(&buffer);
-
- try fbs.writer().writeAll("Hello");
- try testing.expect(mem.eql(u8, fbs.getWritten(), "Hello"));
-
- try fbs.writer().writeAll("world");
- try testing.expect(mem.eql(u8, fbs.getWritten(), "Helloworld"));
-
- try testing.expectError(error.NoSpaceLeft, fbs.writer().writeAll("!"));
- try testing.expect(mem.eql(u8, fbs.getWritten(), "Helloworld"));
-
- fbs.reset();
- try testing.expect(fbs.getWritten().len == 0);
-
- try testing.expectError(error.NoSpaceLeft, fbs.writer().writeAll("Hello world!"));
- try testing.expect(mem.eql(u8, fbs.getWritten(), "Hello worl"));
-
- try fbs.seekTo((try fbs.getEndPos()) + 1);
- try testing.expectError(error.NoSpaceLeft, fbs.writer().writeAll("H"));
-}
-
test "input" {
const bytes = [_]u8{ 1, 2, 3, 4, 5, 6, 7 };
var fbs = fixedBufferStream(&bytes);
diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig
index e6d1c79d01..5cf6b173fb 100644
--- a/lib/std/Thread.zig
+++ b/lib/std/Thread.zig
@@ -167,7 +167,7 @@ pub fn setName(self: Thread, name: []const u8) SetNameError!void {
const file = try std.fs.cwd().openFile(path, .{ .mode = .write_only });
defer file.close();
- try file.deprecatedWriter().writeAll(name);
+ try file.writeAll(name);
return;
},
.windows => {
diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig
index 486deffafd..a0b7614d20 100644
--- a/lib/std/array_list.zig
+++ b/lib/std/array_list.zig
@@ -336,39 +336,6 @@ pub fn AlignedManaged(comptime T: type, comptime alignment: ?mem.Alignment) type
try unmanaged.print(gpa, fmt, args);
}
- pub const Writer = if (T != u8) void else std.io.GenericWriter(*Self, Allocator.Error, appendWrite);
-
- /// Initializes a Writer which will append to the list.
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
-
- /// Same as `append` except it returns the number of bytes written, which is always the same
- /// as `m.len`. The purpose of this function existing is to match `std.io.GenericWriter` API.
- /// Invalidates element pointers if additional memory is needed.
- fn appendWrite(self: *Self, m: []const u8) Allocator.Error!usize {
- try self.appendSlice(m);
- return m.len;
- }
-
- pub const FixedWriter = std.io.GenericWriter(*Self, Allocator.Error, appendWriteFixed);
-
- /// Initializes a Writer which will append to the list but will return
- /// `error.OutOfMemory` rather than increasing capacity.
- pub fn fixedWriter(self: *Self) FixedWriter {
- return .{ .context = self };
- }
-
- /// The purpose of this function existing is to match `std.io.GenericWriter` API.
- fn appendWriteFixed(self: *Self, m: []const u8) error{OutOfMemory}!usize {
- const available_capacity = self.capacity - self.items.len;
- if (m.len > available_capacity)
- return error.OutOfMemory;
-
- self.appendSliceAssumeCapacity(m);
- return m.len;
- }
-
/// Append a value to the list `n` times.
/// Allocates more memory as necessary.
/// Invalidates element pointers if additional memory is needed.
@@ -1083,48 +1050,6 @@ pub fn Aligned(comptime T: type, comptime alignment: ?mem.Alignment) type {
self.items.len += w.end;
}
- /// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
- pub const WriterContext = struct {
- self: *Self,
- allocator: Allocator,
- };
-
- /// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
- pub const Writer = if (T != u8)
- @compileError("The Writer interface is only defined for ArrayList(u8) " ++
- "but the given type is ArrayList(" ++ @typeName(T) ++ ")")
- else
- std.io.GenericWriter(WriterContext, Allocator.Error, appendWrite);
-
- /// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
- pub fn writer(self: *Self, gpa: Allocator) Writer {
- return .{ .context = .{ .self = self, .allocator = gpa } };
- }
-
- /// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
- fn appendWrite(context: WriterContext, m: []const u8) Allocator.Error!usize {
- try context.self.appendSlice(context.allocator, m);
- return m.len;
- }
-
- /// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
- pub const FixedWriter = std.io.GenericWriter(*Self, Allocator.Error, appendWriteFixed);
-
- /// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
- pub fn fixedWriter(self: *Self) FixedWriter {
- return .{ .context = self };
- }
-
- /// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
- fn appendWriteFixed(self: *Self, m: []const u8) error{OutOfMemory}!usize {
- const available_capacity = self.capacity - self.items.len;
- if (m.len > available_capacity)
- return error.OutOfMemory;
-
- self.appendSliceAssumeCapacity(m);
- return m.len;
- }
-
/// Append a value to the list `n` times.
/// Allocates more memory as necessary.
/// Invalidates element pointers if additional memory is needed.
@@ -2116,60 +2041,6 @@ test "Managed(T) of struct T" {
}
}
-test "Managed(u8) implements writer" {
- const a = testing.allocator;
-
- {
- var buffer = Managed(u8).init(a);
- defer buffer.deinit();
-
- const x: i32 = 42;
- const y: i32 = 1234;
- try buffer.writer().print("x: {}\ny: {}\n", .{ x, y });
-
- try testing.expectEqualSlices(u8, "x: 42\ny: 1234\n", buffer.items);
- }
- {
- var list = AlignedManaged(u8, .@"2").init(a);
- defer list.deinit();
-
- const writer = list.writer();
- try writer.writeAll("a");
- try writer.writeAll("bc");
- try writer.writeAll("d");
- try writer.writeAll("efg");
-
- try testing.expectEqualSlices(u8, list.items, "abcdefg");
- }
-}
-
-test "ArrayList(u8) implements writer" {
- const a = testing.allocator;
-
- {
- var buffer: ArrayList(u8) = .empty;
- defer buffer.deinit(a);
-
- const x: i32 = 42;
- const y: i32 = 1234;
- try buffer.writer(a).print("x: {}\ny: {}\n", .{ x, y });
-
- try testing.expectEqualSlices(u8, "x: 42\ny: 1234\n", buffer.items);
- }
- {
- var list: Aligned(u8, .@"2") = .empty;
- defer list.deinit(a);
-
- const writer = list.writer(a);
- try writer.writeAll("a");
- try writer.writeAll("bc");
- try writer.writeAll("d");
- try writer.writeAll("efg");
-
- try testing.expectEqualSlices(u8, list.items, "abcdefg");
- }
-}
-
test "shrink still sets length when resizing is disabled" {
var failing_allocator = testing.FailingAllocator.init(testing.allocator, .{ .resize_fail_index = 0 });
const a = failing_allocator.allocator();
diff --git a/lib/std/base64.zig b/lib/std/base64.zig
index 15e48b5c51..8c08fd6786 100644
--- a/lib/std/base64.zig
+++ b/lib/std/base64.zig
@@ -108,8 +108,7 @@ pub const Base64Encoder = struct {
}
}
- // dest must be compatible with std.io.GenericWriter's writeAll interface
- pub fn encodeWriter(encoder: *const Base64Encoder, dest: anytype, source: []const u8) !void {
+ pub fn encodeWriter(encoder: *const Base64Encoder, dest: *std.Io.Writer, source: []const u8) !void {
var chunker = window(u8, source, 3, 3);
while (chunker.next()) |chunk| {
var temp: [5]u8 = undefined;
diff --git a/lib/std/crypto/aegis.zig b/lib/std/crypto/aegis.zig
index 1065c4fc21..5c81b9640c 100644
--- a/lib/std/crypto/aegis.zig
+++ b/lib/std/crypto/aegis.zig
@@ -801,18 +801,6 @@ fn AegisMac(comptime T: type) type {
ctx.update(msg);
ctx.final(out);
}
-
- pub const Error = error{};
- pub const Writer = std.io.GenericWriter(*Mac, Error, write);
-
- fn write(self: *Mac, bytes: []const u8) Error!usize {
- self.update(bytes);
- return bytes.len;
- }
-
- pub fn writer(self: *Mac) Writer {
- return .{ .context = self };
- }
};
}
diff --git a/lib/std/crypto/blake2.zig b/lib/std/crypto/blake2.zig
index cd40978cf3..28754b4cae 100644
--- a/lib/std/crypto/blake2.zig
+++ b/lib/std/crypto/blake2.zig
@@ -185,18 +185,6 @@ pub fn Blake2s(comptime out_bits: usize) type {
r.* ^= v[i] ^ v[i + 8];
}
}
-
- pub const Error = error{};
- pub const Writer = std.io.GenericWriter(*Self, Error, write);
-
- fn write(self: *Self, bytes: []const u8) Error!usize {
- self.update(bytes);
- return bytes.len;
- }
-
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
};
}
diff --git a/lib/std/crypto/blake3.zig b/lib/std/crypto/blake3.zig
index bc771524e8..a840a30632 100644
--- a/lib/std/crypto/blake3.zig
+++ b/lib/std/crypto/blake3.zig
@@ -474,18 +474,6 @@ pub const Blake3 = struct {
}
output.rootOutputBytes(out_slice);
}
-
- pub const Error = error{};
- pub const Writer = std.io.GenericWriter(*Blake3, Error, write);
-
- fn write(self: *Blake3, bytes: []const u8) Error!usize {
- self.update(bytes);
- return bytes.len;
- }
-
- pub fn writer(self: *Blake3) Writer {
- return .{ .context = self };
- }
};
// Use named type declarations to workaround crash with anonymous structs (issue #4373).
diff --git a/lib/std/crypto/codecs/asn1/der/ArrayListReverse.zig b/lib/std/crypto/codecs/asn1/der/ArrayListReverse.zig
index b6c0ab20d4..b761a93345 100644
--- a/lib/std/crypto/codecs/asn1/der/ArrayListReverse.zig
+++ b/lib/std/crypto/codecs/asn1/der/ArrayListReverse.zig
@@ -4,6 +4,12 @@
//! Laid out in memory like:
//! capacity |--------------------------|
//! data |-------------|
+
+const std = @import("std");
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+const testing = std.testing;
+
data: []u8,
capacity: usize,
allocator: Allocator,
@@ -45,12 +51,6 @@ pub fn prependSlice(self: *ArrayListReverse, data: []const u8) Error!void {
self.data.ptr = begin;
}
-pub const Writer = std.io.GenericWriter(*ArrayListReverse, Error, prependSliceSize);
-/// Warning: This writer writes backwards. `fn print` will NOT work as expected.
-pub fn writer(self: *ArrayListReverse) Writer {
- return .{ .context = self };
-}
-
fn prependSliceSize(self: *ArrayListReverse, data: []const u8) Error!usize {
try self.prependSlice(data);
return data.len;
@@ -77,11 +77,6 @@ pub fn toOwnedSlice(self: *ArrayListReverse) Error![]u8 {
return new_memory;
}
-const std = @import("std");
-const Allocator = std.mem.Allocator;
-const assert = std.debug.assert;
-const testing = std.testing;
-
test ArrayListReverse {
var b = ArrayListReverse.init(testing.allocator);
defer b.deinit();
diff --git a/lib/std/crypto/ml_kem.zig b/lib/std/crypto/ml_kem.zig
index ce3edf9eb5..c7ad23d3e2 100644
--- a/lib/std/crypto/ml_kem.zig
+++ b/lib/std/crypto/ml_kem.zig
@@ -1721,53 +1721,55 @@ test "Test happy flow" {
// Code to test NIST Known Answer Tests (KAT), see PQCgenKAT.c.
-const sha2 = crypto.hash.sha2;
-
-test "NIST KAT test" {
- inline for (.{
- .{ d00.Kyber512, "e9c2bd37133fcb40772f81559f14b1f58dccd1c816701be9ba6214d43baf4547" },
- .{ d00.Kyber1024, "89248f2f33f7f4f7051729111f3049c409a933ec904aedadf035f30fa5646cd5" },
- .{ d00.Kyber768, "a1e122cad3c24bc51622e4c242d8b8acbcd3f618fee4220400605ca8f9ea02c2" },
- }) |modeHash| {
- const mode = modeHash[0];
- var seed: [48]u8 = undefined;
- for (&seed, 0..) |*s, i| {
- s.* = @as(u8, @intCast(i));
- }
- var f = sha2.Sha256.init(.{});
- const fw = f.writer();
- var g = NistDRBG.init(seed);
- try std.fmt.format(fw, "# {s}\n\n", .{mode.name});
- for (0..100) |i| {
- g.fill(&seed);
- try std.fmt.format(fw, "count = {}\n", .{i});
- try std.fmt.format(fw, "seed = {X}\n", .{&seed});
- var g2 = NistDRBG.init(seed);
-
- // This is not equivalent to g2.fill(kseed[:]). As the reference
- // implementation calls randombytes twice generating the keypair,
- // we have to do that as well.
- var kseed: [64]u8 = undefined;
- var eseed: [32]u8 = undefined;
- g2.fill(kseed[0..32]);
- g2.fill(kseed[32..64]);
- g2.fill(&eseed);
- const kp = try mode.KeyPair.generateDeterministic(kseed);
- const e = kp.public_key.encaps(eseed);
- const ss2 = try kp.secret_key.decaps(&e.ciphertext);
- try testing.expectEqual(ss2, e.shared_secret);
- try std.fmt.format(fw, "pk = {X}\n", .{&kp.public_key.toBytes()});
- try std.fmt.format(fw, "sk = {X}\n", .{&kp.secret_key.toBytes()});
- try std.fmt.format(fw, "ct = {X}\n", .{&e.ciphertext});
- try std.fmt.format(fw, "ss = {X}\n\n", .{&e.shared_secret});
- }
+test "NIST KAT test d00.Kyber512" {
+ try testNistKat(d00.Kyber512, "e9c2bd37133fcb40772f81559f14b1f58dccd1c816701be9ba6214d43baf4547");
+}
- var out: [32]u8 = undefined;
- f.final(&out);
- var outHex: [64]u8 = undefined;
- _ = try std.fmt.bufPrint(&outHex, "{x}", .{&out});
- try testing.expectEqual(outHex, modeHash[1].*);
+test "NIST KAT test d00.Kyber1024" {
+ try testNistKat(d00.Kyber1024, "89248f2f33f7f4f7051729111f3049c409a933ec904aedadf035f30fa5646cd5");
+}
+
+test "NIST KAT test d00.Kyber768" {
+ try testNistKat(d00.Kyber768, "a1e122cad3c24bc51622e4c242d8b8acbcd3f618fee4220400605ca8f9ea02c2");
+}
+
+fn testNistKat(mode: type, hash: []const u8) !void {
+ var seed: [48]u8 = undefined;
+ for (&seed, 0..) |*s, i| {
+ s.* = @as(u8, @intCast(i));
}
+ var fw: std.Io.Writer.Hashing(crypto.hash.sha2.Sha256) = .init(&.{});
+ var g = NistDRBG.init(seed);
+ try fw.writer.print("# {s}\n\n", .{mode.name});
+ for (0..100) |i| {
+ g.fill(&seed);
+ try fw.writer.print("count = {}\n", .{i});
+ try fw.writer.print("seed = {X}\n", .{&seed});
+ var g2 = NistDRBG.init(seed);
+
+ // This is not equivalent to g2.fill(kseed[:]). As the reference
+ // implementation calls randombytes twice generating the keypair,
+ // we have to do that as well.
+ var kseed: [64]u8 = undefined;
+ var eseed: [32]u8 = undefined;
+ g2.fill(kseed[0..32]);
+ g2.fill(kseed[32..64]);
+ g2.fill(&eseed);
+ const kp = try mode.KeyPair.generateDeterministic(kseed);
+ const e = kp.public_key.encaps(eseed);
+ const ss2 = try kp.secret_key.decaps(&e.ciphertext);
+ try testing.expectEqual(ss2, e.shared_secret);
+ try fw.writer.print("pk = {X}\n", .{&kp.public_key.toBytes()});
+ try fw.writer.print("sk = {X}\n", .{&kp.secret_key.toBytes()});
+ try fw.writer.print("ct = {X}\n", .{&e.ciphertext});
+ try fw.writer.print("ss = {X}\n\n", .{&e.shared_secret});
+ }
+
+ var out: [32]u8 = undefined;
+ fw.hasher.final(&out);
+ var outHex: [64]u8 = undefined;
+ _ = try std.fmt.bufPrint(&outHex, "{x}", .{&out});
+ try testing.expectEqualStrings(&outHex, hash);
}
const NistDRBG = struct {
diff --git a/lib/std/crypto/scrypt.zig b/lib/std/crypto/scrypt.zig
index f12306a1dc..122da1bb45 100644
--- a/lib/std/crypto/scrypt.zig
+++ b/lib/std/crypto/scrypt.zig
@@ -304,31 +304,34 @@ const crypt_format = struct {
/// Serialize parameters into a string in modular crypt format.
pub fn serialize(params: anytype, str: []u8) EncodingError![]const u8 {
- var buf = io.fixedBufferStream(str);
- try serializeTo(params, buf.writer());
- return buf.getWritten();
+ var w: std.Io.Writer = .fixed(str);
+ serializeTo(params, &w) catch |err| switch (err) {
+ error.WriteFailed => return error.NoSpaceLeft,
+ else => |e| return e,
+ };
+ return w.buffered();
}
/// Compute the number of bytes required to serialize `params`
pub fn calcSize(params: anytype) usize {
var trash: [128]u8 = undefined;
var d: std.Io.Writer.Discarding = .init(&trash);
- serializeTo(params, &d) catch unreachable;
+ serializeTo(params, &d.writer) catch unreachable;
return @intCast(d.fullCount());
}
- fn serializeTo(params: anytype, out: anytype) !void {
+ fn serializeTo(params: anytype, w: *std.Io.Writer) !void {
var header: [14]u8 = undefined;
header[0..3].* = prefix.*;
Codec.intEncode(header[3..4], params.ln);
Codec.intEncode(header[4..9], params.r);
Codec.intEncode(header[9..14], params.p);
- try out.writeAll(&header);
- try out.writeAll(params.salt);
- try out.writeAll("$");
+ try w.writeAll(&header);
+ try w.writeAll(params.salt);
+ try w.writeAll("$");
var buf: [@TypeOf(params.hash).max_encoded_length]u8 = undefined;
const hash_str = try params.hash.toB64(&buf);
- try out.writeAll(hash_str);
+ try w.writeAll(hash_str);
}
/// Custom codec that maps 6 bits into 8 like regular Base64, but uses its own alphabet,
diff --git a/lib/std/crypto/sha2.zig b/lib/std/crypto/sha2.zig
index 1abc2b0edc..d32f15f57b 100644
--- a/lib/std/crypto/sha2.zig
+++ b/lib/std/crypto/sha2.zig
@@ -373,18 +373,6 @@ fn Sha2x32(comptime iv: Iv32, digest_bits: comptime_int) type {
for (&d.s, v) |*dv, vv| dv.* +%= vv;
}
-
- pub const Error = error{};
- pub const Writer = std.io.GenericWriter(*Self, Error, write);
-
- fn write(self: *Self, bytes: []const u8) Error!usize {
- self.update(bytes);
- return bytes.len;
- }
-
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
};
}
diff --git a/lib/std/crypto/sha3.zig b/lib/std/crypto/sha3.zig
index 78c3ff5527..84cd0c2b0e 100644
--- a/lib/std/crypto/sha3.zig
+++ b/lib/std/crypto/sha3.zig
@@ -80,18 +80,6 @@ pub fn Keccak(comptime f: u11, comptime output_bits: u11, comptime default_delim
self.st.pad();
self.st.squeeze(out[0..]);
}
-
- pub const Error = error{};
- pub const Writer = std.io.GenericWriter(*Self, Error, write);
-
- fn write(self: *Self, bytes: []const u8) Error!usize {
- self.update(bytes);
- return bytes.len;
- }
-
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
};
}
@@ -191,18 +179,6 @@ fn ShakeLike(comptime security_level: u11, comptime default_delim: u8, comptime
pub fn fillBlock(self: *Self) void {
self.st.fillBlock();
}
-
- pub const Error = error{};
- pub const Writer = std.io.GenericWriter(*Self, Error, write);
-
- fn write(self: *Self, bytes: []const u8) Error!usize {
- self.update(bytes);
- return bytes.len;
- }
-
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
};
}
@@ -284,18 +260,6 @@ fn CShakeLike(comptime security_level: u11, comptime default_delim: u8, comptime
pub fn fillBlock(self: *Self) void {
self.shaker.fillBlock();
}
-
- pub const Error = error{};
- pub const Writer = std.io.GenericWriter(*Self, Error, write);
-
- fn write(self: *Self, bytes: []const u8) Error!usize {
- self.update(bytes);
- return bytes.len;
- }
-
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
};
}
@@ -390,18 +354,6 @@ fn KMacLike(comptime security_level: u11, comptime default_delim: u8, comptime r
ctx.update(msg);
ctx.final(out);
}
-
- pub const Error = error{};
- pub const Writer = std.io.GenericWriter(*Self, Error, write);
-
- fn write(self: *Self, bytes: []const u8) Error!usize {
- self.update(bytes);
- return bytes.len;
- }
-
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
};
}
@@ -482,18 +434,6 @@ fn TupleHashLike(comptime security_level: u11, comptime default_delim: u8, compt
}
self.cshaker.squeeze(out);
}
-
- pub const Error = error{};
- pub const Writer = std.io.GenericWriter(*Self, Error, write);
-
- fn write(self: *Self, bytes: []const u8) Error!usize {
- self.update(bytes);
- return bytes.len;
- }
-
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
};
}
diff --git a/lib/std/crypto/siphash.zig b/lib/std/crypto/siphash.zig
index 4334a6912d..cf595327c5 100644
--- a/lib/std/crypto/siphash.zig
+++ b/lib/std/crypto/siphash.zig
@@ -238,18 +238,6 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
pub fn toInt(msg: []const u8, key: *const [key_length]u8) T {
return State.hash(msg, key);
}
-
- pub const Error = error{};
- pub const Writer = std.io.GenericWriter(*Self, Error, write);
-
- fn write(self: *Self, bytes: []const u8) Error!usize {
- self.update(bytes);
- return bytes.len;
- }
-
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
};
}
diff --git a/lib/std/debug/Dwarf/expression.zig b/lib/std/debug/Dwarf/expression.zig
index c123cdb30b..1c849489ad 100644
--- a/lib/std/debug/Dwarf/expression.zig
+++ b/lib/std/debug/Dwarf/expression.zig
@@ -8,6 +8,8 @@ const OP = std.dwarf.OP;
const abi = std.debug.Dwarf.abi;
const mem = std.mem;
const assert = std.debug.assert;
+const testing = std.testing;
+const Writer = std.Io.Writer;
/// Expressions can be evaluated in different contexts, each requiring its own set of inputs.
/// Callers should specify all the fields relevant to their context. If a field is required
@@ -782,7 +784,7 @@ pub fn Builder(comptime options: Options) type {
return struct {
/// Zero-operand instructions
- pub fn writeOpcode(writer: anytype, comptime opcode: u8) !void {
+ pub fn writeOpcode(writer: *Writer, comptime opcode: u8) !void {
if (options.call_frame_context and !comptime isOpcodeValidInCFA(opcode)) return error.InvalidCFAOpcode;
switch (opcode) {
OP.dup,
@@ -823,14 +825,14 @@ pub fn Builder(comptime options: Options) type {
}
// 2.5.1.1: Literal Encodings
- pub fn writeLiteral(writer: anytype, literal: u8) !void {
+ pub fn writeLiteral(writer: *Writer, literal: u8) !void {
switch (literal) {
0...31 => |n| try writer.writeByte(n + OP.lit0),
else => return error.InvalidLiteral,
}
}
- pub fn writeConst(writer: anytype, comptime T: type, value: T) !void {
+ pub fn writeConst(writer: *Writer, comptime T: type, value: T) !void {
if (@typeInfo(T) != .int) @compileError("Constants must be integers");
switch (T) {
@@ -852,7 +854,7 @@ pub fn Builder(comptime options: Options) type {
else => switch (@typeInfo(T).int.signedness) {
.unsigned => {
try writer.writeByte(OP.constu);
- try leb.writeUleb128(writer, value);
+ try writer.writeUleb128(value);
},
.signed => {
try writer.writeByte(OP.consts);
@@ -862,105 +864,105 @@ pub fn Builder(comptime options: Options) type {
}
}
- pub fn writeConstx(writer: anytype, debug_addr_offset: anytype) !void {
+ pub fn writeConstx(writer: *Writer, debug_addr_offset: anytype) !void {
try writer.writeByte(OP.constx);
- try leb.writeUleb128(writer, debug_addr_offset);
+ try writer.writeUleb128(debug_addr_offset);
}
- pub fn writeConstType(writer: anytype, die_offset: anytype, value_bytes: []const u8) !void {
+ pub fn writeConstType(writer: *Writer, die_offset: anytype, value_bytes: []const u8) !void {
if (options.call_frame_context) return error.InvalidCFAOpcode;
if (value_bytes.len > 0xff) return error.InvalidTypeLength;
try writer.writeByte(OP.const_type);
- try leb.writeUleb128(writer, die_offset);
+ try writer.writeUleb128(die_offset);
try writer.writeByte(@intCast(value_bytes.len));
try writer.writeAll(value_bytes);
}
- pub fn writeAddr(writer: anytype, value: addr_type) !void {
+ pub fn writeAddr(writer: *Writer, value: addr_type) !void {
try writer.writeByte(OP.addr);
try writer.writeInt(addr_type, value, options.endian);
}
- pub fn writeAddrx(writer: anytype, debug_addr_offset: anytype) !void {
+ pub fn writeAddrx(writer: *Writer, debug_addr_offset: anytype) !void {
if (options.call_frame_context) return error.InvalidCFAOpcode;
try writer.writeByte(OP.addrx);
- try leb.writeUleb128(writer, debug_addr_offset);
+ try writer.writeUleb128(debug_addr_offset);
}
// 2.5.1.2: Register Values
- pub fn writeFbreg(writer: anytype, offset: anytype) !void {
+ pub fn writeFbreg(writer: *Writer, offset: anytype) !void {
try writer.writeByte(OP.fbreg);
try leb.writeIleb128(writer, offset);
}
- pub fn writeBreg(writer: anytype, register: u8, offset: anytype) !void {
+ pub fn writeBreg(writer: *Writer, register: u8, offset: anytype) !void {
if (register > 31) return error.InvalidRegister;
try writer.writeByte(OP.breg0 + register);
try leb.writeIleb128(writer, offset);
}
- pub fn writeBregx(writer: anytype, register: anytype, offset: anytype) !void {
+ pub fn writeBregx(writer: *Writer, register: anytype, offset: anytype) !void {
try writer.writeByte(OP.bregx);
- try leb.writeUleb128(writer, register);
+ try writer.writeUleb128(register);
try leb.writeIleb128(writer, offset);
}
- pub fn writeRegvalType(writer: anytype, register: anytype, offset: anytype) !void {
+ pub fn writeRegvalType(writer: *Writer, register: anytype, offset: anytype) !void {
if (options.call_frame_context) return error.InvalidCFAOpcode;
try writer.writeByte(OP.regval_type);
- try leb.writeUleb128(writer, register);
- try leb.writeUleb128(writer, offset);
+ try writer.writeUleb128(register);
+ try writer.writeUleb128(offset);
}
// 2.5.1.3: Stack Operations
- pub fn writePick(writer: anytype, index: u8) !void {
+ pub fn writePick(writer: *Writer, index: u8) !void {
try writer.writeByte(OP.pick);
try writer.writeByte(index);
}
- pub fn writeDerefSize(writer: anytype, size: u8) !void {
+ pub fn writeDerefSize(writer: *Writer, size: u8) !void {
try writer.writeByte(OP.deref_size);
try writer.writeByte(size);
}
- pub fn writeXDerefSize(writer: anytype, size: u8) !void {
+ pub fn writeXDerefSize(writer: *Writer, size: u8) !void {
try writer.writeByte(OP.xderef_size);
try writer.writeByte(size);
}
- pub fn writeDerefType(writer: anytype, size: u8, die_offset: anytype) !void {
+ pub fn writeDerefType(writer: *Writer, size: u8, die_offset: anytype) !void {
if (options.call_frame_context) return error.InvalidCFAOpcode;
try writer.writeByte(OP.deref_type);
try writer.writeByte(size);
- try leb.writeUleb128(writer, die_offset);
+ try writer.writeUleb128(die_offset);
}
- pub fn writeXDerefType(writer: anytype, size: u8, die_offset: anytype) !void {
+ pub fn writeXDerefType(writer: *Writer, size: u8, die_offset: anytype) !void {
try writer.writeByte(OP.xderef_type);
try writer.writeByte(size);
- try leb.writeUleb128(writer, die_offset);
+ try writer.writeUleb128(die_offset);
}
// 2.5.1.4: Arithmetic and Logical Operations
- pub fn writePlusUconst(writer: anytype, uint_value: anytype) !void {
+ pub fn writePlusUconst(writer: *Writer, uint_value: anytype) !void {
try writer.writeByte(OP.plus_uconst);
- try leb.writeUleb128(writer, uint_value);
+ try writer.writeUleb128(uint_value);
}
// 2.5.1.5: Control Flow Operations
- pub fn writeSkip(writer: anytype, offset: i16) !void {
+ pub fn writeSkip(writer: *Writer, offset: i16) !void {
try writer.writeByte(OP.skip);
try writer.writeInt(i16, offset, options.endian);
}
- pub fn writeBra(writer: anytype, offset: i16) !void {
+ pub fn writeBra(writer: *Writer, offset: i16) !void {
try writer.writeByte(OP.bra);
try writer.writeInt(i16, offset, options.endian);
}
- pub fn writeCall(writer: anytype, comptime T: type, offset: T) !void {
+ pub fn writeCall(writer: *Writer, comptime T: type, offset: T) !void {
if (options.call_frame_context) return error.InvalidCFAOpcode;
switch (T) {
u16 => try writer.writeByte(OP.call2),
@@ -971,45 +973,45 @@ pub fn Builder(comptime options: Options) type {
try writer.writeInt(T, offset, options.endian);
}
- pub fn writeCallRef(writer: anytype, comptime is_64: bool, value: if (is_64) u64 else u32) !void {
+ pub fn writeCallRef(writer: *Writer, comptime is_64: bool, value: if (is_64) u64 else u32) !void {
if (options.call_frame_context) return error.InvalidCFAOpcode;
try writer.writeByte(OP.call_ref);
try writer.writeInt(if (is_64) u64 else u32, value, options.endian);
}
- pub fn writeConvert(writer: anytype, die_offset: anytype) !void {
+ pub fn writeConvert(writer: *Writer, die_offset: anytype) !void {
if (options.call_frame_context) return error.InvalidCFAOpcode;
try writer.writeByte(OP.convert);
- try leb.writeUleb128(writer, die_offset);
+ try writer.writeUleb128(die_offset);
}
- pub fn writeReinterpret(writer: anytype, die_offset: anytype) !void {
+ pub fn writeReinterpret(writer: *Writer, die_offset: anytype) !void {
if (options.call_frame_context) return error.InvalidCFAOpcode;
try writer.writeByte(OP.reinterpret);
- try leb.writeUleb128(writer, die_offset);
+ try writer.writeUleb128(die_offset);
}
// 2.5.1.7: Special Operations
- pub fn writeEntryValue(writer: anytype, expression: []const u8) !void {
+ pub fn writeEntryValue(writer: *Writer, expression: []const u8) !void {
try writer.writeByte(OP.entry_value);
- try leb.writeUleb128(writer, expression.len);
+ try writer.writeUleb128(expression.len);
try writer.writeAll(expression);
}
// 2.6: Location Descriptions
- pub fn writeReg(writer: anytype, register: u8) !void {
+ pub fn writeReg(writer: *Writer, register: u8) !void {
try writer.writeByte(OP.reg0 + register);
}
- pub fn writeRegx(writer: anytype, register: anytype) !void {
+ pub fn writeRegx(writer: *Writer, register: anytype) !void {
try writer.writeByte(OP.regx);
- try leb.writeUleb128(writer, register);
+ try writer.writeUleb128(register);
}
- pub fn writeImplicitValue(writer: anytype, value_bytes: []const u8) !void {
+ pub fn writeImplicitValue(writer: *Writer, value_bytes: []const u8) !void {
try writer.writeByte(OP.implicit_value);
- try leb.writeUleb128(writer, value_bytes.len);
+ try writer.writeUleb128(value_bytes.len);
try writer.writeAll(value_bytes);
}
};
@@ -1042,8 +1044,7 @@ fn isOpcodeRegisterLocation(opcode: u8) bool {
};
}
-const testing = std.testing;
-test "DWARF expressions" {
+test "basics" {
const allocator = std.testing.allocator;
const options = Options{};
@@ -1052,10 +1053,10 @@ test "DWARF expressions" {
const b = Builder(options);
- var program = std.array_list.Managed(u8).init(allocator);
+ var program: std.Io.Writer.Allocating = .init(allocator);
defer program.deinit();
- const writer = program.writer();
+ const writer = &program.writer;
// Literals
{
@@ -1064,7 +1065,7 @@ test "DWARF expressions" {
try b.writeLiteral(writer, @intCast(i));
}
- _ = try stack_machine.run(program.items, allocator, context, 0);
+ _ = try stack_machine.run(program.written(), allocator, context, 0);
for (0..32) |i| {
const expected = 31 - i;
@@ -1108,16 +1109,16 @@ test "DWARF expressions" {
var mock_compile_unit: std.debug.Dwarf.CompileUnit = undefined;
mock_compile_unit.addr_base = 1;
- var mock_debug_addr = std.array_list.Managed(u8).init(allocator);
+ var mock_debug_addr: std.Io.Writer.Allocating = .init(allocator);
defer mock_debug_addr.deinit();
- try mock_debug_addr.writer().writeInt(u16, 0, native_endian);
- try mock_debug_addr.writer().writeInt(usize, input[11], native_endian);
- try mock_debug_addr.writer().writeInt(usize, input[12], native_endian);
+ try mock_debug_addr.writer.writeInt(u16, 0, native_endian);
+ try mock_debug_addr.writer.writeInt(usize, input[11], native_endian);
+ try mock_debug_addr.writer.writeInt(usize, input[12], native_endian);
- const context = Context{
+ const context: Context = .{
.compile_unit = &mock_compile_unit,
- .debug_addr = mock_debug_addr.items,
+ .debug_addr = mock_debug_addr.written(),
};
try b.writeConstx(writer, @as(usize, 1));
@@ -1127,7 +1128,7 @@ test "DWARF expressions" {
const type_bytes: []const u8 = &.{ 1, 2, 3, 4 };
try b.writeConstType(writer, die_offset, type_bytes);
- _ = try stack_machine.run(program.items, allocator, context, 0);
+ _ = try stack_machine.run(program.written(), allocator, context, 0);
const const_type = stack_machine.stack.pop().?.const_type;
try testing.expectEqual(die_offset, const_type.type_offset);
@@ -1185,7 +1186,7 @@ test "DWARF expressions" {
try b.writeBregx(writer, abi.ipRegNum(native_arch).?, @as(usize, 300));
try b.writeRegvalType(writer, @as(u8, 0), @as(usize, 400));
- _ = try stack_machine.run(program.items, allocator, context, 0);
+ _ = try stack_machine.run(program.written(), allocator, context, 0);
const regval_type = stack_machine.stack.pop().?.regval_type;
try testing.expectEqual(@as(usize, 400), regval_type.type_offset);
@@ -1214,7 +1215,7 @@ test "DWARF expressions" {
program.clearRetainingCapacity();
try b.writeConst(writer, u8, 1);
try b.writeOpcode(writer, OP.dup);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 1), stack_machine.stack.pop().?.generic);
try testing.expectEqual(@as(usize, 1), stack_machine.stack.pop().?.generic);
@@ -1222,7 +1223,7 @@ test "DWARF expressions" {
program.clearRetainingCapacity();
try b.writeConst(writer, u8, 1);
try b.writeOpcode(writer, OP.drop);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expect(stack_machine.stack.pop() == null);
stack_machine.reset();
@@ -1231,7 +1232,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u8, 5);
try b.writeConst(writer, u8, 6);
try b.writePick(writer, 2);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 4), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1240,7 +1241,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u8, 5);
try b.writeConst(writer, u8, 6);
try b.writeOpcode(writer, OP.over);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 5), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1248,7 +1249,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u8, 5);
try b.writeConst(writer, u8, 6);
try b.writeOpcode(writer, OP.swap);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 5), stack_machine.stack.pop().?.generic);
try testing.expectEqual(@as(usize, 6), stack_machine.stack.pop().?.generic);
@@ -1258,7 +1259,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u8, 5);
try b.writeConst(writer, u8, 6);
try b.writeOpcode(writer, OP.rot);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 5), stack_machine.stack.pop().?.generic);
try testing.expectEqual(@as(usize, 4), stack_machine.stack.pop().?.generic);
try testing.expectEqual(@as(usize, 6), stack_machine.stack.pop().?.generic);
@@ -1269,7 +1270,7 @@ test "DWARF expressions" {
program.clearRetainingCapacity();
try b.writeAddr(writer, @intFromPtr(&deref_target));
try b.writeOpcode(writer, OP.deref);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(deref_target, stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1277,14 +1278,14 @@ test "DWARF expressions" {
try b.writeLiteral(writer, 0);
try b.writeAddr(writer, @intFromPtr(&deref_target));
try b.writeOpcode(writer, OP.xderef);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(deref_target, stack_machine.stack.pop().?.generic);
stack_machine.reset();
program.clearRetainingCapacity();
try b.writeAddr(writer, @intFromPtr(&deref_target));
try b.writeDerefSize(writer, 1);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, @as(*const u8, @ptrCast(&deref_target)).*), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1292,7 +1293,7 @@ test "DWARF expressions" {
try b.writeLiteral(writer, 0);
try b.writeAddr(writer, @intFromPtr(&deref_target));
try b.writeXDerefSize(writer, 1);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, @as(*const u8, @ptrCast(&deref_target)).*), stack_machine.stack.pop().?.generic);
const type_offset: usize = @truncate(0xaabbaabb_aabbaabb);
@@ -1301,7 +1302,7 @@ test "DWARF expressions" {
program.clearRetainingCapacity();
try b.writeAddr(writer, @intFromPtr(&deref_target));
try b.writeDerefType(writer, 1, type_offset);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
const deref_type = stack_machine.stack.pop().?.regval_type;
try testing.expectEqual(type_offset, deref_type.type_offset);
try testing.expectEqual(@as(u8, 1), deref_type.type_size);
@@ -1312,7 +1313,7 @@ test "DWARF expressions" {
try b.writeLiteral(writer, 0);
try b.writeAddr(writer, @intFromPtr(&deref_target));
try b.writeXDerefType(writer, 1, type_offset);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
const xderef_type = stack_machine.stack.pop().?.regval_type;
try testing.expectEqual(type_offset, xderef_type.type_offset);
try testing.expectEqual(@as(u8, 1), xderef_type.type_size);
@@ -1323,7 +1324,7 @@ test "DWARF expressions" {
stack_machine.reset();
program.clearRetainingCapacity();
try b.writeOpcode(writer, OP.push_object_address);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, @intFromPtr(context.object_address.?)), stack_machine.stack.pop().?.generic);
// TODO: Test OP.form_tls_address
@@ -1333,7 +1334,7 @@ test "DWARF expressions" {
stack_machine.reset();
program.clearRetainingCapacity();
try b.writeOpcode(writer, OP.call_frame_cfa);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(context.cfa.?, stack_machine.stack.pop().?.generic);
}
@@ -1345,7 +1346,7 @@ test "DWARF expressions" {
program.clearRetainingCapacity();
try b.writeConst(writer, i16, -4096);
try b.writeOpcode(writer, OP.abs);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 4096), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1353,7 +1354,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u16, 0xff0f);
try b.writeConst(writer, u16, 0xf0ff);
try b.writeOpcode(writer, OP.@"and");
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 0xf00f), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1361,7 +1362,7 @@ test "DWARF expressions" {
try b.writeConst(writer, i16, -404);
try b.writeConst(writer, i16, 100);
try b.writeOpcode(writer, OP.div);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(isize, -404 / 100), @as(isize, @bitCast(stack_machine.stack.pop().?.generic)));
stack_machine.reset();
@@ -1369,7 +1370,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u16, 200);
try b.writeConst(writer, u16, 50);
try b.writeOpcode(writer, OP.minus);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 150), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1377,7 +1378,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u16, 123);
try b.writeConst(writer, u16, 100);
try b.writeOpcode(writer, OP.mod);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 23), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1385,7 +1386,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u16, 0xff);
try b.writeConst(writer, u16, 0xee);
try b.writeOpcode(writer, OP.mul);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 0xed12), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1394,7 +1395,7 @@ test "DWARF expressions" {
try b.writeOpcode(writer, OP.neg);
try b.writeConst(writer, i16, -6);
try b.writeOpcode(writer, OP.neg);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 6), stack_machine.stack.pop().?.generic);
try testing.expectEqual(@as(isize, -5), @as(isize, @bitCast(stack_machine.stack.pop().?.generic)));
@@ -1402,7 +1403,7 @@ test "DWARF expressions" {
program.clearRetainingCapacity();
try b.writeConst(writer, u16, 0xff0f);
try b.writeOpcode(writer, OP.not);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(~@as(usize, 0xff0f), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1410,7 +1411,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u16, 0xff0f);
try b.writeConst(writer, u16, 0xf0ff);
try b.writeOpcode(writer, OP.@"or");
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 0xffff), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1418,14 +1419,14 @@ test "DWARF expressions" {
try b.writeConst(writer, i16, 402);
try b.writeConst(writer, i16, 100);
try b.writeOpcode(writer, OP.plus);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 502), stack_machine.stack.pop().?.generic);
stack_machine.reset();
program.clearRetainingCapacity();
try b.writeConst(writer, u16, 4096);
try b.writePlusUconst(writer, @as(usize, 8192));
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 4096 + 8192), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1433,7 +1434,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u16, 0xfff);
try b.writeConst(writer, u16, 1);
try b.writeOpcode(writer, OP.shl);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 0xfff << 1), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1441,7 +1442,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u16, 0xfff);
try b.writeConst(writer, u16, 1);
try b.writeOpcode(writer, OP.shr);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 0xfff >> 1), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1449,7 +1450,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u16, 0xfff);
try b.writeConst(writer, u16, 1);
try b.writeOpcode(writer, OP.shr);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, @bitCast(@as(isize, 0xfff) >> 1)), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1457,7 +1458,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u16, 0xf0ff);
try b.writeConst(writer, u16, 0xff0f);
try b.writeOpcode(writer, OP.xor);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 0x0ff0), stack_machine.stack.pop().?.generic);
}
@@ -1486,7 +1487,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u16, 1);
try b.writeConst(writer, u16, 0);
try b.writeOpcode(writer, e[0]);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, e[3]), stack_machine.stack.pop().?.generic);
try testing.expectEqual(@as(usize, e[2]), stack_machine.stack.pop().?.generic);
try testing.expectEqual(@as(usize, e[1]), stack_machine.stack.pop().?.generic);
@@ -1497,7 +1498,7 @@ test "DWARF expressions" {
try b.writeLiteral(writer, 2);
try b.writeSkip(writer, 1);
try b.writeLiteral(writer, 3);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 2), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1509,7 +1510,7 @@ test "DWARF expressions" {
try b.writeBra(writer, 1);
try b.writeLiteral(writer, 4);
try b.writeLiteral(writer, 5);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 5), stack_machine.stack.pop().?.generic);
try testing.expectEqual(@as(usize, 4), stack_machine.stack.pop().?.generic);
try testing.expect(stack_machine.stack.pop() == null);
@@ -1535,7 +1536,7 @@ test "DWARF expressions" {
program.clearRetainingCapacity();
try b.writeConstType(writer, @as(usize, 0), &value_bytes);
try b.writeConvert(writer, @as(usize, 0));
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(value, stack_machine.stack.pop().?.generic);
// Reinterpret to generic type
@@ -1543,7 +1544,7 @@ test "DWARF expressions" {
program.clearRetainingCapacity();
try b.writeConstType(writer, @as(usize, 0), &value_bytes);
try b.writeReinterpret(writer, @as(usize, 0));
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(value, stack_machine.stack.pop().?.generic);
// Reinterpret to new type
@@ -1553,7 +1554,7 @@ test "DWARF expressions" {
program.clearRetainingCapacity();
try b.writeConstType(writer, @as(usize, 0), &value_bytes);
try b.writeReinterpret(writer, die_offset);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
const const_type = stack_machine.stack.pop().?.const_type;
try testing.expectEqual(die_offset, const_type.type_offset);
@@ -1561,7 +1562,7 @@ test "DWARF expressions" {
program.clearRetainingCapacity();
try b.writeLiteral(writer, 0);
try b.writeReinterpret(writer, die_offset);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
const regval_type = stack_machine.stack.pop().?.regval_type;
try testing.expectEqual(die_offset, regval_type.type_offset);
}
@@ -1573,20 +1574,20 @@ test "DWARF expressions" {
stack_machine.reset();
program.clearRetainingCapacity();
try b.writeOpcode(writer, OP.nop);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expect(stack_machine.stack.pop() == null);
// Sub-expression
{
- var sub_program = std.array_list.Managed(u8).init(allocator);
+ var sub_program: std.Io.Writer.Allocating = .init(allocator);
defer sub_program.deinit();
- const sub_writer = sub_program.writer();
+ const sub_writer = &sub_program.writer;
try b.writeLiteral(sub_writer, 3);
stack_machine.reset();
program.clearRetainingCapacity();
- try b.writeEntryValue(writer, sub_program.items);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ try b.writeEntryValue(writer, sub_program.written());
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 3), stack_machine.stack.pop().?.generic);
}
@@ -1605,15 +1606,15 @@ test "DWARF expressions" {
if (abi.regBytes(&thread_context, 0, reg_context)) |reg_bytes| {
mem.writeInt(usize, reg_bytes[0..@sizeOf(usize)], 0xee, native_endian);
- var sub_program = std.array_list.Managed(u8).init(allocator);
+ var sub_program: std.Io.Writer.Allocating = .init(allocator);
defer sub_program.deinit();
- const sub_writer = sub_program.writer();
+ const sub_writer = &sub_program.writer;
try b.writeReg(sub_writer, 0);
stack_machine.reset();
program.clearRetainingCapacity();
- try b.writeEntryValue(writer, sub_program.items);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ try b.writeEntryValue(writer, sub_program.written());
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 0xee), stack_machine.stack.pop().?.generic);
} else |err| {
switch (err) {
diff --git a/lib/std/debug/Pdb.zig b/lib/std/debug/Pdb.zig
index 4e46ab7bf6..008aad6ab6 100644
--- a/lib/std/debug/Pdb.zig
+++ b/lib/std/debug/Pdb.zig
@@ -2,10 +2,11 @@ const std = @import("../std.zig");
const File = std.fs.File;
const Allocator = std.mem.Allocator;
const pdb = std.pdb;
+const assert = std.debug.assert;
const Pdb = @This();
-in_file: File,
+file_reader: *File.Reader,
msf: Msf,
allocator: Allocator,
string_table: ?*MsfStream,
@@ -35,39 +36,38 @@ pub const Module = struct {
}
};
-pub fn init(allocator: Allocator, path: []const u8) !Pdb {
- const file = try std.fs.cwd().openFile(path, .{});
- errdefer file.close();
-
+pub fn init(gpa: Allocator, file_reader: *File.Reader) !Pdb {
return .{
- .in_file = file,
- .allocator = allocator,
+ .file_reader = file_reader,
+ .allocator = gpa,
.string_table = null,
.dbi = null,
- .msf = try Msf.init(allocator, file),
- .modules = &[_]Module{},
- .sect_contribs = &[_]pdb.SectionContribEntry{},
+ .msf = try Msf.init(gpa, file_reader),
+ .modules = &.{},
+ .sect_contribs = &.{},
.guid = undefined,
.age = undefined,
};
}
pub fn deinit(self: *Pdb) void {
- self.in_file.close();
- self.msf.deinit(self.allocator);
+ const gpa = self.allocator;
+ self.msf.deinit(gpa);
for (self.modules) |*module| {
- module.deinit(self.allocator);
+ module.deinit(gpa);
}
- self.allocator.free(self.modules);
- self.allocator.free(self.sect_contribs);
+ gpa.free(self.modules);
+ gpa.free(self.sect_contribs);
}
pub fn parseDbiStream(self: *Pdb) !void {
var stream = self.getStream(pdb.StreamType.dbi) orelse
return error.InvalidDebugInfo;
- const reader = stream.reader();
- const header = try reader.readStruct(std.pdb.DbiStreamHeader);
+ const gpa = self.allocator;
+ const reader = &stream.interface;
+
+ const header = try reader.takeStruct(std.pdb.DbiStreamHeader, .little);
if (header.version_header != 19990903) // V70, only value observed by LLVM team
return error.UnknownPDBVersion;
// if (header.Age != age)
@@ -76,22 +76,28 @@ pub fn parseDbiStream(self: *Pdb) !void {
const mod_info_size = header.mod_info_size;
const section_contrib_size = header.section_contribution_size;
- var modules = std.array_list.Managed(Module).init(self.allocator);
+ var modules = std.array_list.Managed(Module).init(gpa);
errdefer modules.deinit();
// Module Info Substream
var mod_info_offset: usize = 0;
while (mod_info_offset != mod_info_size) {
- const mod_info = try reader.readStruct(pdb.ModInfo);
+ const mod_info = try reader.takeStruct(pdb.ModInfo, .little);
var this_record_len: usize = @sizeOf(pdb.ModInfo);
- const module_name = try reader.readUntilDelimiterAlloc(self.allocator, 0, 1024);
- errdefer self.allocator.free(module_name);
- this_record_len += module_name.len + 1;
+ var module_name: std.Io.Writer.Allocating = .init(gpa);
+ defer module_name.deinit();
+ this_record_len += try reader.streamDelimiterLimit(&module_name.writer, 0, .limited(1024));
+ assert(reader.buffered()[0] == 0); // TODO change streamDelimiterLimit API
+ reader.toss(1);
+ this_record_len += 1;
- const obj_file_name = try reader.readUntilDelimiterAlloc(self.allocator, 0, 1024);
- errdefer self.allocator.free(obj_file_name);
- this_record_len += obj_file_name.len + 1;
+ var obj_file_name: std.Io.Writer.Allocating = .init(gpa);
+ defer obj_file_name.deinit();
+ this_record_len += try reader.streamDelimiterLimit(&obj_file_name.writer, 0, .limited(1024));
+ assert(reader.buffered()[0] == 0); // TODO change streamDelimiterLimit API
+ reader.toss(1);
+ this_record_len += 1;
if (this_record_len % 4 != 0) {
const round_to_next_4 = (this_record_len | 0x3) + 1;
@@ -100,10 +106,10 @@ pub fn parseDbiStream(self: *Pdb) !void {
this_record_len += march_forward_bytes;
}
- try modules.append(Module{
+ try modules.append(.{
.mod_info = mod_info,
- .module_name = module_name,
- .obj_file_name = obj_file_name,
+ .module_name = try module_name.toOwnedSlice(),
+ .obj_file_name = try obj_file_name.toOwnedSlice(),
.populated = false,
.symbols = undefined,
@@ -117,21 +123,21 @@ pub fn parseDbiStream(self: *Pdb) !void {
}
// Section Contribution Substream
- var sect_contribs = std.array_list.Managed(pdb.SectionContribEntry).init(self.allocator);
+ var sect_contribs = std.array_list.Managed(pdb.SectionContribEntry).init(gpa);
errdefer sect_contribs.deinit();
var sect_cont_offset: usize = 0;
if (section_contrib_size != 0) {
- const version = reader.readEnum(std.pdb.SectionContrSubstreamVersion, .little) catch |err| switch (err) {
- error.InvalidValue => return error.InvalidDebugInfo,
- else => |e| return e,
+ const version = reader.takeEnum(std.pdb.SectionContrSubstreamVersion, .little) catch |err| switch (err) {
+ error.InvalidEnumTag, error.EndOfStream => return error.InvalidDebugInfo,
+ error.ReadFailed => return error.ReadFailed,
};
_ = version;
sect_cont_offset += @sizeOf(u32);
}
while (sect_cont_offset != section_contrib_size) {
const entry = try sect_contribs.addOne();
- entry.* = try reader.readStruct(pdb.SectionContribEntry);
+ entry.* = try reader.takeStruct(pdb.SectionContribEntry, .little);
sect_cont_offset += @sizeOf(pdb.SectionContribEntry);
if (sect_cont_offset > section_contrib_size)
@@ -143,29 +149,28 @@ pub fn parseDbiStream(self: *Pdb) !void {
}
pub fn parseInfoStream(self: *Pdb) !void {
- var stream = self.getStream(pdb.StreamType.pdb) orelse
- return error.InvalidDebugInfo;
- const reader = stream.reader();
+ var stream = self.getStream(pdb.StreamType.pdb) orelse return error.InvalidDebugInfo;
+ const reader = &stream.interface;
// Parse the InfoStreamHeader.
- const version = try reader.readInt(u32, .little);
- const signature = try reader.readInt(u32, .little);
+ const version = try reader.takeInt(u32, .little);
+ const signature = try reader.takeInt(u32, .little);
_ = signature;
- const age = try reader.readInt(u32, .little);
- const guid = try reader.readBytesNoEof(16);
+ const age = try reader.takeInt(u32, .little);
+ const guid = try reader.takeArray(16);
if (version != 20000404) // VC70, only value observed by LLVM team
return error.UnknownPDBVersion;
- self.guid = guid;
+ self.guid = guid.*;
self.age = age;
+ const gpa = self.allocator;
+
// Find the string table.
const string_table_index = str_tab_index: {
- const name_bytes_len = try reader.readInt(u32, .little);
- const name_bytes = try self.allocator.alloc(u8, name_bytes_len);
- defer self.allocator.free(name_bytes);
- try reader.readNoEof(name_bytes);
+ const name_bytes_len = try reader.takeInt(u32, .little);
+ const name_bytes = try reader.readAlloc(gpa, name_bytes_len);
const HashTableHeader = extern struct {
size: u32,
@@ -175,23 +180,23 @@ pub fn parseInfoStream(self: *Pdb) !void {
return cap * 2 / 3 + 1;
}
};
- const hash_tbl_hdr = try reader.readStruct(HashTableHeader);
+ const hash_tbl_hdr = try reader.takeStruct(HashTableHeader, .little);
if (hash_tbl_hdr.capacity == 0)
return error.InvalidDebugInfo;
if (hash_tbl_hdr.size > HashTableHeader.maxLoad(hash_tbl_hdr.capacity))
return error.InvalidDebugInfo;
- const present = try readSparseBitVector(&reader, self.allocator);
- defer self.allocator.free(present);
+ const present = try readSparseBitVector(reader, gpa);
+ defer gpa.free(present);
if (present.len != hash_tbl_hdr.size)
return error.InvalidDebugInfo;
- const deleted = try readSparseBitVector(&reader, self.allocator);
- defer self.allocator.free(deleted);
+ const deleted = try readSparseBitVector(reader, gpa);
+ defer gpa.free(deleted);
for (present) |_| {
- const name_offset = try reader.readInt(u32, .little);
- const name_index = try reader.readInt(u32, .little);
+ const name_offset = try reader.takeInt(u32, .little);
+ const name_index = try reader.takeInt(u32, .little);
if (name_offset > name_bytes.len)
return error.InvalidDebugInfo;
const name = std.mem.sliceTo(name_bytes[name_offset..], 0);
@@ -233,6 +238,7 @@ pub fn getSymbolName(self: *Pdb, module: *Module, address: u64) ?[]const u8 {
pub fn getLineNumberInfo(self: *Pdb, module: *Module, address: u64) !std.debug.SourceLocation {
std.debug.assert(module.populated);
const subsect_info = module.subsect_info;
+ const gpa = self.allocator;
var sect_offset: usize = 0;
var skip_len: usize = undefined;
@@ -287,7 +293,16 @@ pub fn getLineNumberInfo(self: *Pdb, module: *Module, address: u64) !std.debug.S
const chksum_hdr: *align(1) pdb.FileChecksumEntryHeader = @ptrCast(&module.subsect_info[subsect_index]);
const strtab_offset = @sizeOf(pdb.StringTableHeader) + chksum_hdr.file_name_offset;
try self.string_table.?.seekTo(strtab_offset);
- const source_file_name = try self.string_table.?.reader().readUntilDelimiterAlloc(self.allocator, 0, 1024);
+ const source_file_name = s: {
+ const string_reader = &self.string_table.?.interface;
+ var source_file_name: std.Io.Writer.Allocating = .init(gpa);
+ defer source_file_name.deinit();
+ _ = try string_reader.streamDelimiterLimit(&source_file_name.writer, 0, .limited(1024));
+ assert(string_reader.buffered()[0] == 0); // TODO change streamDelimiterLimit API
+ string_reader.toss(1);
+ break :s try source_file_name.toOwnedSlice();
+ };
+ errdefer gpa.free(source_file_name);
const line_entry_idx = line_i - 1;
@@ -341,19 +356,16 @@ pub fn getModule(self: *Pdb, index: usize) !?*Module {
const stream = self.getStreamById(mod.mod_info.module_sym_stream) orelse
return error.MissingDebugInfo;
- const reader = stream.reader();
+ const reader = &stream.interface;
- const signature = try reader.readInt(u32, .little);
+ const signature = try reader.takeInt(u32, .little);
if (signature != 4)
return error.InvalidDebugInfo;
- mod.symbols = try self.allocator.alloc(u8, mod.mod_info.sym_byte_size - 4);
- errdefer self.allocator.free(mod.symbols);
- try reader.readNoEof(mod.symbols);
+ const gpa = self.allocator;
- mod.subsect_info = try self.allocator.alloc(u8, mod.mod_info.c13_byte_size);
- errdefer self.allocator.free(mod.subsect_info);
- try reader.readNoEof(mod.subsect_info);
+ mod.symbols = try reader.readAlloc(gpa, mod.mod_info.sym_byte_size - 4);
+ mod.subsect_info = try reader.readAlloc(gpa, mod.mod_info.c13_byte_size);
var sect_offset: usize = 0;
var skip_len: usize = undefined;
@@ -379,8 +391,7 @@ pub fn getModule(self: *Pdb, index: usize) !?*Module {
}
pub fn getStreamById(self: *Pdb, id: u32) ?*MsfStream {
- if (id >= self.msf.streams.len)
- return null;
+ if (id >= self.msf.streams.len) return null;
return &self.msf.streams[id];
}
@@ -394,17 +405,14 @@ const Msf = struct {
directory: MsfStream,
streams: []MsfStream,
- fn init(allocator: Allocator, file: File) !Msf {
- const in = file.deprecatedReader();
-
- const superblock = try in.readStruct(pdb.SuperBlock);
+ fn init(gpa: Allocator, file_reader: *File.Reader) !Msf {
+ const superblock = try file_reader.interface.takeStruct(pdb.SuperBlock, .little);
- // Sanity checks
if (!std.mem.eql(u8, &superblock.file_magic, pdb.SuperBlock.expect_magic))
return error.InvalidDebugInfo;
if (superblock.free_block_map_block != 1 and superblock.free_block_map_block != 2)
return error.InvalidDebugInfo;
- const file_len = try file.getEndPos();
+ const file_len = try file_reader.getSize();
if (superblock.num_blocks * superblock.block_size != file_len)
return error.InvalidDebugInfo;
switch (superblock.block_size) {
@@ -417,163 +425,182 @@ const Msf = struct {
if (dir_block_count > superblock.block_size / @sizeOf(u32))
return error.UnhandledBigDirectoryStream; // cf. BlockMapAddr comment.
- try file.seekTo(superblock.block_size * superblock.block_map_addr);
- const dir_blocks = try allocator.alloc(u32, dir_block_count);
+ try file_reader.seekTo(superblock.block_size * superblock.block_map_addr);
+ const dir_blocks = try gpa.alloc(u32, dir_block_count);
for (dir_blocks) |*b| {
- b.* = try in.readInt(u32, .little);
+ b.* = try file_reader.interface.takeInt(u32, .little);
}
- var directory = MsfStream.init(
- superblock.block_size,
- file,
- dir_blocks,
- );
+ var directory_buffer: [64]u8 = undefined;
+ var directory = MsfStream.init(superblock.block_size, file_reader, dir_blocks, &directory_buffer);
- const begin = directory.pos;
- const stream_count = try directory.reader().readInt(u32, .little);
- const stream_sizes = try allocator.alloc(u32, stream_count);
- defer allocator.free(stream_sizes);
+ const begin = directory.logicalPos();
+ const stream_count = try directory.interface.takeInt(u32, .little);
+ const stream_sizes = try gpa.alloc(u32, stream_count);
+ defer gpa.free(stream_sizes);
// Microsoft's implementation uses @as(u32, -1) for inexistent streams.
// These streams are not used, but still participate in the file
// and must be taken into account when resolving stream indices.
- const Nil = 0xFFFFFFFF;
+ const nil_size = 0xFFFFFFFF;
for (stream_sizes) |*s| {
- const size = try directory.reader().readInt(u32, .little);
- s.* = if (size == Nil) 0 else blockCountFromSize(size, superblock.block_size);
+ const size = try directory.interface.takeInt(u32, .little);
+ s.* = if (size == nil_size) 0 else blockCountFromSize(size, superblock.block_size);
}
- const streams = try allocator.alloc(MsfStream, stream_count);
+ const streams = try gpa.alloc(MsfStream, stream_count);
+ errdefer gpa.free(streams);
+
for (streams, 0..) |*stream, i| {
const size = stream_sizes[i];
if (size == 0) {
- stream.* = MsfStream{
- .blocks = &[_]u32{},
- };
+ stream.* = .empty;
} else {
- var blocks = try allocator.alloc(u32, size);
- var j: u32 = 0;
- while (j < size) : (j += 1) {
- const block_id = try directory.reader().readInt(u32, .little);
+ const blocks = try gpa.alloc(u32, size);
+ errdefer gpa.free(blocks);
+ for (blocks) |*block| {
+ const block_id = try directory.interface.takeInt(u32, .little);
const n = (block_id % superblock.block_size);
// 0 is for pdb.SuperBlock, 1 and 2 for FPMs.
if (block_id == 0 or n == 1 or n == 2 or block_id * superblock.block_size > file_len)
return error.InvalidBlockIndex;
- blocks[j] = block_id;
+ block.* = block_id;
}
-
- stream.* = MsfStream.init(
- superblock.block_size,
- file,
- blocks,
- );
+ const buffer = try gpa.alloc(u8, 64);
+ errdefer gpa.free(buffer);
+ stream.* = .init(superblock.block_size, file_reader, blocks, buffer);
}
}
- const end = directory.pos;
+ const end = directory.logicalPos();
if (end - begin != superblock.num_directory_bytes)
return error.InvalidStreamDirectory;
- return Msf{
+ return .{
.directory = directory,
.streams = streams,
};
}
- fn deinit(self: *Msf, allocator: Allocator) void {
- allocator.free(self.directory.blocks);
+ fn deinit(self: *Msf, gpa: Allocator) void {
+ gpa.free(self.directory.blocks);
for (self.streams) |*stream| {
- allocator.free(stream.blocks);
+ gpa.free(stream.interface.buffer);
+ gpa.free(stream.blocks);
}
- allocator.free(self.streams);
+ gpa.free(self.streams);
}
};
const MsfStream = struct {
- in_file: File = undefined,
- pos: u64 = undefined,
- blocks: []u32 = undefined,
- block_size: u32 = undefined,
-
- pub const Error = @typeInfo(@typeInfo(@TypeOf(read)).@"fn".return_type.?).error_union.error_set;
+ file_reader: *File.Reader,
+ next_read_pos: u64,
+ blocks: []u32,
+ block_size: u32,
+ interface: std.Io.Reader,
+ err: ?Error,
+
+ const Error = File.Reader.SeekError;
+
+ const empty: MsfStream = .{
+ .file_reader = undefined,
+ .next_read_pos = 0,
+ .blocks = &.{},
+ .block_size = undefined,
+ .interface = .ending_instance,
+ .err = null,
+ };
- fn init(block_size: u32, file: File, blocks: []u32) MsfStream {
- const stream = MsfStream{
- .in_file = file,
- .pos = 0,
+ fn init(block_size: u32, file_reader: *File.Reader, blocks: []u32, buffer: []u8) MsfStream {
+ return .{
+ .file_reader = file_reader,
+ .next_read_pos = 0,
.blocks = blocks,
.block_size = block_size,
+ .interface = .{
+ .vtable = &.{ .stream = stream },
+ .buffer = buffer,
+ .seek = 0,
+ .end = 0,
+ },
+ .err = null,
};
-
- return stream;
}
- fn read(self: *MsfStream, buffer: []u8) !usize {
- var block_id = @as(usize, @intCast(self.pos / self.block_size));
- if (block_id >= self.blocks.len) return 0; // End of Stream
- var block = self.blocks[block_id];
- var offset = self.pos % self.block_size;
+ fn stream(r: *std.Io.Reader, w: *std.Io.Writer, limit: std.Io.Limit) std.Io.Reader.StreamError!usize {
+ const ms: *MsfStream = @alignCast(@fieldParentPtr("interface", r));
- try self.in_file.seekTo(block * self.block_size + offset);
- const in = self.in_file.deprecatedReader();
+ var block_id: usize = @intCast(ms.next_read_pos / ms.block_size);
+ if (block_id >= ms.blocks.len) return error.EndOfStream;
+ var block = ms.blocks[block_id];
+ var offset = ms.next_read_pos % ms.block_size;
- var size: usize = 0;
- var rem_buffer = buffer;
- while (size < buffer.len) {
- const size_to_read = @min(self.block_size - offset, rem_buffer.len);
- size += try in.read(rem_buffer[0..size_to_read]);
- rem_buffer = buffer[size..];
- offset += size_to_read;
+ ms.file_reader.seekTo(block * ms.block_size + offset) catch |err| {
+ ms.err = err;
+ return error.ReadFailed;
+ };
+
+ var remaining = @intFromEnum(limit);
+ while (remaining != 0) {
+ const stream_len: usize = @min(remaining, ms.block_size - offset);
+ const n = try ms.file_reader.interface.stream(w, .limited(stream_len));
+ remaining -= n;
+ offset += n;
// If we're at the end of a block, go to the next one.
- if (offset == self.block_size) {
+ if (offset == ms.block_size) {
offset = 0;
block_id += 1;
- if (block_id >= self.blocks.len) break; // End of Stream
- block = self.blocks[block_id];
- try self.in_file.seekTo(block * self.block_size);
+ if (block_id >= ms.blocks.len) break; // End of Stream
+ block = ms.blocks[block_id];
+ ms.file_reader.seekTo(block * ms.block_size) catch |err| {
+ ms.err = err;
+ return error.ReadFailed;
+ };
}
}
- self.pos += buffer.len;
- return buffer.len;
+ const total = @intFromEnum(limit) - remaining;
+ ms.next_read_pos += total;
+ return total;
}
- pub fn seekBy(self: *MsfStream, len: i64) !void {
- self.pos = @as(u64, @intCast(@as(i64, @intCast(self.pos)) + len));
- if (self.pos >= self.blocks.len * self.block_size)
- return error.EOF;
+ pub fn logicalPos(ms: *const MsfStream) u64 {
+ return ms.next_read_pos - ms.interface.bufferedLen();
}
- pub fn seekTo(self: *MsfStream, len: u64) !void {
- self.pos = len;
- if (self.pos >= self.blocks.len * self.block_size)
- return error.EOF;
+ pub fn seekBy(ms: *MsfStream, len: i64) !void {
+ ms.next_read_pos = @as(u64, @intCast(@as(i64, @intCast(ms.logicalPos())) + len));
+ if (ms.next_read_pos >= ms.blocks.len * ms.block_size) return error.EOF;
+ ms.interface.tossBuffered();
}
- fn getSize(self: *const MsfStream) u64 {
- return self.blocks.len * self.block_size;
+ pub fn seekTo(ms: *MsfStream, len: u64) !void {
+ ms.next_read_pos = len;
+ if (ms.next_read_pos >= ms.blocks.len * ms.block_size) return error.EOF;
+ ms.interface.tossBuffered();
}
- fn getFilePos(self: MsfStream) u64 {
- const block_id = self.pos / self.block_size;
- const block = self.blocks[block_id];
- const offset = self.pos % self.block_size;
-
- return block * self.block_size + offset;
+ fn getSize(ms: *const MsfStream) u64 {
+ return ms.blocks.len * ms.block_size;
}
- pub fn reader(self: *MsfStream) std.io.GenericReader(*MsfStream, Error, read) {
- return .{ .context = self };
+ fn getFilePos(ms: *const MsfStream) u64 {
+ const pos = ms.logicalPos();
+ const block_id = pos / ms.block_size;
+ const block = ms.blocks[block_id];
+ const offset = pos % ms.block_size;
+
+ return block * ms.block_size + offset;
}
};
-fn readSparseBitVector(stream: anytype, allocator: Allocator) ![]u32 {
- const num_words = try stream.readInt(u32, .little);
+fn readSparseBitVector(reader: *std.Io.Reader, allocator: Allocator) ![]u32 {
+ const num_words = try reader.takeInt(u32, .little);
var list = std.array_list.Managed(u32).init(allocator);
errdefer list.deinit();
var word_i: u32 = 0;
while (word_i != num_words) : (word_i += 1) {
- const word = try stream.readInt(u32, .little);
+ const word = try reader.takeInt(u32, .little);
var bit_i: u5 = 0;
while (true) : (bit_i += 1) {
if (word & (@as(u32, 1) << bit_i) != 0) {
diff --git a/lib/std/debug/SelfInfo.zig b/lib/std/debug/SelfInfo.zig
index 693643c598..0a7a0f9a80 100644
--- a/lib/std/debug/SelfInfo.zig
+++ b/lib/std/debug/SelfInfo.zig
@@ -713,22 +713,26 @@ pub const Module = switch (native_os) {
},
.uefi, .windows => struct {
base_address: usize,
- pdb: ?Pdb = null,
- dwarf: ?Dwarf = null,
+ pdb: ?Pdb,
+ dwarf: ?Dwarf,
coff_image_base: u64,
/// Only used if pdb is non-null
coff_section_headers: []coff.SectionHeader,
- pub fn deinit(self: *@This(), allocator: Allocator) void {
+ pub fn deinit(self: *@This(), gpa: Allocator) void {
if (self.dwarf) |*dwarf| {
- dwarf.deinit(allocator);
+ dwarf.deinit(gpa);
}
if (self.pdb) |*p| {
+ gpa.free(p.file_reader.interface.buffer);
+ gpa.destroy(p.file_reader);
p.deinit();
- allocator.free(self.coff_section_headers);
+ gpa.free(self.coff_section_headers);
}
+
+ self.* = undefined;
}
fn getSymbolFromPdb(self: *@This(), relocated_address: usize) !?std.debug.Symbol {
@@ -970,23 +974,25 @@ fn readMachODebugInfo(allocator: Allocator, macho_file: File) !Module {
};
}
-fn readCoffDebugInfo(allocator: Allocator, coff_obj: *coff.Coff) !Module {
+fn readCoffDebugInfo(gpa: Allocator, coff_obj: *coff.Coff) !Module {
nosuspend {
var di: Module = .{
.base_address = undefined,
.coff_image_base = coff_obj.getImageBase(),
.coff_section_headers = undefined,
+ .pdb = null,
+ .dwarf = null,
};
if (coff_obj.getSectionByName(".debug_info")) |_| {
// This coff file has embedded DWARF debug info
var sections: Dwarf.SectionArray = Dwarf.null_section_array;
- errdefer for (sections) |section| if (section) |s| if (s.owned) allocator.free(s.data);
+ errdefer for (sections) |section| if (section) |s| if (s.owned) gpa.free(s.data);
inline for (@typeInfo(Dwarf.Section.Id).@"enum".fields, 0..) |section, i| {
sections[i] = if (coff_obj.getSectionByName("." ++ section.name)) |section_header| blk: {
break :blk .{
- .data = try coff_obj.getSectionDataAlloc(section_header, allocator),
+ .data = try coff_obj.getSectionDataAlloc(section_header, gpa),
.virtual_address = section_header.virtual_address,
.owned = true,
};
@@ -999,7 +1005,7 @@ fn readCoffDebugInfo(allocator: Allocator, coff_obj: *coff.Coff) !Module {
.is_macho = false,
};
- try Dwarf.open(&dwarf, allocator);
+ try Dwarf.open(&dwarf, gpa);
di.dwarf = dwarf;
}
@@ -1008,20 +1014,31 @@ fn readCoffDebugInfo(allocator: Allocator, coff_obj: *coff.Coff) !Module {
if (fs.path.isAbsolute(raw_path)) {
break :blk raw_path;
} else {
- const self_dir = try fs.selfExeDirPathAlloc(allocator);
- defer allocator.free(self_dir);
- break :blk try fs.path.join(allocator, &.{ self_dir, raw_path });
+ const self_dir = try fs.selfExeDirPathAlloc(gpa);
+ defer gpa.free(self_dir);
+ break :blk try fs.path.join(gpa, &.{ self_dir, raw_path });
}
};
- defer if (path.ptr != raw_path.ptr) allocator.free(path);
+ defer if (path.ptr != raw_path.ptr) gpa.free(path);
- di.pdb = Pdb.init(allocator, path) catch |err| switch (err) {
+ const pdb_file = std.fs.cwd().openFile(path, .{}) catch |err| switch (err) {
error.FileNotFound, error.IsDir => {
if (di.dwarf == null) return error.MissingDebugInfo;
return di;
},
- else => return err,
+ else => |e| return e,
};
+ errdefer pdb_file.close();
+
+ const pdb_file_reader_buffer = try gpa.alloc(u8, 4096);
+ errdefer gpa.free(pdb_file_reader_buffer);
+
+ const pdb_file_reader = try gpa.create(File.Reader);
+ errdefer gpa.destroy(pdb_file_reader);
+
+ pdb_file_reader.* = pdb_file.reader(pdb_file_reader_buffer);
+
+ di.pdb = try Pdb.init(gpa, pdb_file_reader);
try di.pdb.?.parseInfoStream();
try di.pdb.?.parseDbiStream();
@@ -1029,8 +1046,8 @@ fn readCoffDebugInfo(allocator: Allocator, coff_obj: *coff.Coff) !Module {
return error.InvalidDebugInfo;
// Only used by the pdb path
- di.coff_section_headers = try coff_obj.getSectionHeadersAlloc(allocator);
- errdefer allocator.free(di.coff_section_headers);
+ di.coff_section_headers = try coff_obj.getSectionHeadersAlloc(gpa);
+ errdefer gpa.free(di.coff_section_headers);
return di;
}
diff --git a/lib/std/fs/File.zig b/lib/std/fs/File.zig
index 8da2112f8b..99825a87d1 100644
--- a/lib/std/fs/File.zig
+++ b/lib/std/fs/File.zig
@@ -1097,14 +1097,6 @@ pub fn deprecatedReader(file: File) DeprecatedReader {
return .{ .context = file };
}
-/// Deprecated in favor of `Writer`.
-pub const DeprecatedWriter = io.GenericWriter(File, WriteError, write);
-
-/// Deprecated in favor of `Writer`.
-pub fn deprecatedWriter(file: File) DeprecatedWriter {
- return .{ .context = file };
-}
-
/// Memoizes key information about a file handle such as:
/// * The size from calling stat, or the error that occurred therein.
/// * The current seek position.
diff --git a/lib/std/json.zig b/lib/std/json.zig
index 52388f38ea..e730cff89b 100644
--- a/lib/std/json.zig
+++ b/lib/std/json.zig
@@ -6,7 +6,7 @@
//! The high-level `parseFromSlice` and `parseFromTokenSource` deserialize a JSON document into a Zig type.
//! Parse into a dynamically-typed `Value` to load any JSON value for runtime inspection.
//!
-//! The low-level `writeStream` emits syntax-conformant JSON tokens to a `std.io.GenericWriter`.
+//! The low-level `writeStream` emits syntax-conformant JSON tokens to a `std.Io.Writer`.
//! The high-level `stringify` serializes a Zig or `Value` type into JSON.
const builtin = @import("builtin");
diff --git a/lib/std/leb128.zig b/lib/std/leb128.zig
index 0de97011fd..d3dd231e3d 100644
--- a/lib/std/leb128.zig
+++ b/lib/std/leb128.zig
@@ -33,28 +33,6 @@ pub fn readUleb128(comptime T: type, reader: anytype) !T {
return @as(T, @truncate(value));
}
-/// Write a single unsigned integer as unsigned LEB128 to the given writer.
-pub fn writeUleb128(writer: anytype, arg: anytype) !void {
- const Arg = @TypeOf(arg);
- const Int = switch (Arg) {
- comptime_int => std.math.IntFittingRange(arg, arg),
- else => Arg,
- };
- const Value = if (@typeInfo(Int).int.bits < 8) u8 else Int;
- var value: Value = arg;
-
- while (true) {
- const byte: u8 = @truncate(value & 0x7f);
- value >>= 7;
- if (value == 0) {
- try writer.writeByte(byte);
- break;
- } else {
- try writer.writeByte(byte | 0x80);
- }
- }
-}
-
/// Read a single signed LEB128 value from the given reader as type T,
/// or error.Overflow if the value cannot fit.
pub fn readIleb128(comptime T: type, reader: anytype) !T {
@@ -374,84 +352,3 @@ test "deserialize unsigned LEB128" {
// Decode sequence of ULEB128 values
try test_read_uleb128_seq(u64, 4, "\x81\x01\x3f\x80\x7f\x80\x80\x80\x00");
}
-
-fn test_write_leb128(value: anytype) !void {
- const T = @TypeOf(value);
- const signedness = @typeInfo(T).int.signedness;
- const t_signed = signedness == .signed;
-
- const writeStream = if (t_signed) writeIleb128 else writeUleb128;
- const readStream = if (t_signed) readIleb128 else readUleb128;
-
- // decode to a larger bit size too, to ensure sign extension
- // is working as expected
- const larger_type_bits = ((@typeInfo(T).int.bits + 8) / 8) * 8;
- const B = std.meta.Int(signedness, larger_type_bits);
-
- const bytes_needed = bn: {
- if (@typeInfo(T).int.bits <= 7) break :bn @as(u16, 1);
-
- const unused_bits = if (value < 0) @clz(~value) else @clz(value);
- const used_bits: u16 = (@typeInfo(T).int.bits - unused_bits) + @intFromBool(t_signed);
- if (used_bits <= 7) break :bn @as(u16, 1);
- break :bn ((used_bits + 6) / 7);
- };
-
- const max_groups = if (@typeInfo(T).int.bits == 0) 1 else (@typeInfo(T).int.bits + 6) / 7;
-
- var buf: [max_groups]u8 = undefined;
- var fbs = std.io.fixedBufferStream(&buf);
-
- // stream write
- try writeStream(fbs.writer(), value);
- const w1_pos = fbs.pos;
- try testing.expect(w1_pos == bytes_needed);
-
- // stream read
- fbs.pos = 0;
- const sr = try readStream(T, fbs.reader());
- try testing.expect(fbs.pos == w1_pos);
- try testing.expect(sr == value);
-
- // bigger type stream read
- fbs.pos = 0;
- const bsr = try readStream(B, fbs.reader());
- try testing.expect(fbs.pos == w1_pos);
- try testing.expect(bsr == value);
-}
-
-test "serialize unsigned LEB128" {
- if (builtin.cpu.arch == .x86 and builtin.abi == .musl and builtin.link_mode == .dynamic) return error.SkipZigTest;
-
- const max_bits = 18;
-
- comptime var t = 0;
- inline while (t <= max_bits) : (t += 1) {
- const T = std.meta.Int(.unsigned, t);
- const min = std.math.minInt(T);
- const max = std.math.maxInt(T);
- var i = @as(std.meta.Int(.unsigned, @typeInfo(T).int.bits + 1), min);
-
- while (i <= max) : (i += 1) try test_write_leb128(@as(T, @intCast(i)));
- }
-}
-
-test "serialize signed LEB128" {
- if (builtin.cpu.arch == .x86 and builtin.abi == .musl and builtin.link_mode == .dynamic) return error.SkipZigTest;
-
- // explicitly test i0 because starting `t` at 0
- // will break the while loop
- try test_write_leb128(@as(i0, 0));
-
- const max_bits = 18;
-
- comptime var t = 1;
- inline while (t <= max_bits) : (t += 1) {
- const T = std.meta.Int(.signed, t);
- const min = std.math.minInt(T);
- const max = std.math.maxInt(T);
- var i = @as(std.meta.Int(.signed, @typeInfo(T).int.bits + 1), min);
-
- while (i <= max) : (i += 1) try test_write_leb128(@as(T, @intCast(i)));
- }
-}
diff --git a/lib/std/macho.zig b/lib/std/macho.zig
index 75aa91e536..a1d23d8b18 100644
--- a/lib/std/macho.zig
+++ b/lib/std/macho.zig
@@ -1883,10 +1883,8 @@ pub const GenericBlob = extern struct {
pub const data_in_code_entry = extern struct {
/// From mach_header to start of data range.
offset: u32,
-
/// Number of bytes in data range.
length: u16,
-
/// A DICE_KIND value.
kind: u16,
};
diff --git a/lib/std/posix/test.zig b/lib/std/posix/test.zig
index 55a53518d9..6c18999d4b 100644
--- a/lib/std/posix/test.zig
+++ b/lib/std/posix/test.zig
@@ -683,11 +683,11 @@ test "mmap" {
const file = try tmp.dir.createFile(test_out_file, .{});
defer file.close();
- const stream = file.deprecatedWriter();
+ var stream = file.writer(&.{});
var i: u32 = 0;
while (i < alloc_size / @sizeOf(u32)) : (i += 1) {
- try stream.writeInt(u32, i, .little);
+ try stream.interface.writeInt(u32, i, .little);
}
}
diff --git a/lib/std/tz.zig b/lib/std/tz.zig
index bff0101439..32b23ddaef 100644
--- a/lib/std/tz.zig
+++ b/lib/std/tz.zig
@@ -1,6 +1,12 @@
-const std = @import("std.zig");
+//! The Time Zone Information Format (TZif)
+//! https://datatracker.ietf.org/doc/html/rfc8536
+
const builtin = @import("builtin");
+const std = @import("std.zig");
+const Reader = std.Io.Reader;
+const Allocator = std.mem.Allocator;
+
pub const Transition = struct {
ts: i64,
timetype: *Timetype,
@@ -34,7 +40,7 @@ pub const Leapsecond = struct {
};
pub const Tz = struct {
- allocator: std.mem.Allocator,
+ allocator: Allocator,
transitions: []const Transition,
timetypes: []const Timetype,
leapseconds: []const Leapsecond,
@@ -54,34 +60,30 @@ pub const Tz = struct {
},
};
- pub fn parse(allocator: std.mem.Allocator, reader: anytype) !Tz {
- var legacy_header = try reader.readStruct(Header);
+ pub fn parse(allocator: Allocator, reader: *Reader) !Tz {
+ const legacy_header = try reader.takeStruct(Header, .big);
if (!std.mem.eql(u8, &legacy_header.magic, "TZif")) return error.BadHeader;
- if (legacy_header.version != 0 and legacy_header.version != '2' and legacy_header.version != '3') return error.BadVersion;
-
- if (builtin.target.cpu.arch.endian() != std.builtin.Endian.big) {
- std.mem.byteSwapAllFields(@TypeOf(legacy_header.counts), &legacy_header.counts);
- }
+ if (legacy_header.version != 0 and legacy_header.version != '2' and legacy_header.version != '3')
+ return error.BadVersion;
- if (legacy_header.version == 0) {
+ if (legacy_header.version == 0)
return parseBlock(allocator, reader, legacy_header, true);
- } else {
- // If the format is modern, just skip over the legacy data
- const skipv = legacy_header.counts.timecnt * 5 + legacy_header.counts.typecnt * 6 + legacy_header.counts.charcnt + legacy_header.counts.leapcnt * 8 + legacy_header.counts.isstdcnt + legacy_header.counts.isutcnt;
- try reader.skipBytes(skipv, .{});
-
- var header = try reader.readStruct(Header);
- if (!std.mem.eql(u8, &header.magic, "TZif")) return error.BadHeader;
- if (header.version != '2' and header.version != '3') return error.BadVersion;
- if (builtin.target.cpu.arch.endian() != std.builtin.Endian.big) {
- std.mem.byteSwapAllFields(@TypeOf(header.counts), &header.counts);
- }
- return parseBlock(allocator, reader, header, false);
- }
+ // If the format is modern, just skip over the legacy data
+ const skip_n = legacy_header.counts.timecnt * 5 +
+ legacy_header.counts.typecnt * 6 +
+ legacy_header.counts.charcnt + legacy_header.counts.leapcnt * 8 +
+ legacy_header.counts.isstdcnt + legacy_header.counts.isutcnt;
+ try reader.discardAll(skip_n);
+
+ var header = try reader.takeStruct(Header, .big);
+ if (!std.mem.eql(u8, &header.magic, "TZif")) return error.BadHeader;
+ if (header.version != '2' and header.version != '3') return error.BadVersion;
+
+ return parseBlock(allocator, reader, header, false);
}
- fn parseBlock(allocator: std.mem.Allocator, reader: anytype, header: Header, legacy: bool) !Tz {
+ fn parseBlock(allocator: Allocator, reader: *Reader, header: Header, legacy: bool) !Tz {
if (header.counts.isstdcnt != 0 and header.counts.isstdcnt != header.counts.typecnt) return error.Malformed; // rfc8536: isstdcnt [...] MUST either be zero or equal to "typecnt"
if (header.counts.isutcnt != 0 and header.counts.isutcnt != header.counts.typecnt) return error.Malformed; // rfc8536: isutcnt [...] MUST either be zero or equal to "typecnt"
if (header.counts.typecnt == 0) return error.Malformed; // rfc8536: typecnt [...] MUST NOT be zero
@@ -98,12 +100,12 @@ pub const Tz = struct {
// Parse transition types
var i: usize = 0;
while (i < header.counts.timecnt) : (i += 1) {
- transitions[i].ts = if (legacy) try reader.readInt(i32, .big) else try reader.readInt(i64, .big);
+ transitions[i].ts = if (legacy) try reader.takeInt(i32, .big) else try reader.takeInt(i64, .big);
}
i = 0;
while (i < header.counts.timecnt) : (i += 1) {
- const tt = try reader.readByte();
+ const tt = try reader.takeByte();
if (tt >= timetypes.len) return error.Malformed; // rfc8536: Each type index MUST be in the range [0, "typecnt" - 1]
transitions[i].timetype = &timetypes[tt];
}
@@ -111,11 +113,11 @@ pub const Tz = struct {
// Parse time types
i = 0;
while (i < header.counts.typecnt) : (i += 1) {
- const offset = try reader.readInt(i32, .big);
+ const offset = try reader.takeInt(i32, .big);
if (offset < -2147483648) return error.Malformed; // rfc8536: utoff [...] MUST NOT be -2**31
- const dst = try reader.readByte();
+ const dst = try reader.takeByte();
if (dst != 0 and dst != 1) return error.Malformed; // rfc8536: (is)dst [...] The value MUST be 0 or 1.
- const idx = try reader.readByte();
+ const idx = try reader.takeByte();
if (idx > header.counts.charcnt - 1) return error.Malformed; // rfc8536: (desig)idx [...] Each index MUST be in the range [0, "charcnt" - 1]
timetypes[i] = .{
.offset = offset,
@@ -128,7 +130,7 @@ pub const Tz = struct {
}
var designators_data: [256 + 6]u8 = undefined;
- try reader.readNoEof(designators_data[0..header.counts.charcnt]);
+ try reader.readSliceAll(designators_data[0..header.counts.charcnt]);
const designators = designators_data[0..header.counts.charcnt];
if (designators[designators.len - 1] != 0) return error.Malformed; // rfc8536: charcnt [...] includes the trailing NUL (0x00) octet
@@ -144,12 +146,12 @@ pub const Tz = struct {
// Parse leap seconds
i = 0;
while (i < header.counts.leapcnt) : (i += 1) {
- const occur: i64 = if (legacy) try reader.readInt(i32, .big) else try reader.readInt(i64, .big);
+ const occur: i64 = if (legacy) try reader.takeInt(i32, .big) else try reader.takeInt(i64, .big);
if (occur < 0) return error.Malformed; // rfc8536: occur [...] MUST be nonnegative
if (i > 0 and leapseconds[i - 1].occurrence + 2419199 > occur) return error.Malformed; // rfc8536: occur [...] each later value MUST be at least 2419199 greater than the previous value
if (occur > std.math.maxInt(i48)) return error.Malformed; // Unreasonably far into the future
- const corr = try reader.readInt(i32, .big);
+ const corr = try reader.takeInt(i32, .big);
if (i == 0 and corr != -1 and corr != 1) return error.Malformed; // rfc8536: The correction value in the first leap-second record, if present, MUST be either one (1) or minus one (-1)
if (i > 0 and leapseconds[i - 1].correction != corr + 1 and leapseconds[i - 1].correction != corr - 1) return error.Malformed; // rfc8536: The correction values in adjacent leap-second records MUST differ by exactly one (1)
if (corr > std.math.maxInt(i16)) return error.Malformed; // Unreasonably large correction
@@ -163,7 +165,7 @@ pub const Tz = struct {
// Parse standard/wall indicators
i = 0;
while (i < header.counts.isstdcnt) : (i += 1) {
- const stdtime = try reader.readByte();
+ const stdtime = try reader.takeByte();
if (stdtime == 1) {
timetypes[i].flags |= 0x02;
}
@@ -172,7 +174,7 @@ pub const Tz = struct {
// Parse UT/local indicators
i = 0;
while (i < header.counts.isutcnt) : (i += 1) {
- const ut = try reader.readByte();
+ const ut = try reader.takeByte();
if (ut == 1) {
timetypes[i].flags |= 0x04;
if (!timetypes[i].standardTimeIndicator()) return error.Malformed; // rfc8536: standard/wall value MUST be one (1) if the UT/local value is one (1)
@@ -182,9 +184,8 @@ pub const Tz = struct {
// Footer
var footer: ?[]u8 = null;
if (!legacy) {
- if ((try reader.readByte()) != '\n') return error.Malformed; // An rfc8536 footer must start with a newline
- var footerdata_buf: [128]u8 = undefined;
- const footer_mem = reader.readUntilDelimiter(&footerdata_buf, '\n') catch |err| switch (err) {
+ if ((try reader.takeByte()) != '\n') return error.Malformed; // An rfc8536 footer must start with a newline
+ const footer_mem = reader.takeSentinel('\n') catch |err| switch (err) {
error.StreamTooLong => return error.OverlargeFooter, // Read more than 128 bytes, much larger than any reasonable POSIX TZ string
else => return err,
};
@@ -194,7 +195,7 @@ pub const Tz = struct {
}
errdefer if (footer) |ft| allocator.free(ft);
- return Tz{
+ return .{
.allocator = allocator,
.transitions = transitions,
.timetypes = timetypes,
@@ -215,9 +216,9 @@ pub const Tz = struct {
test "slim" {
const data = @embedFile("tz/asia_tokyo.tzif");
- var in_stream = std.io.fixedBufferStream(data);
+ var in_stream: Reader = .fixed(data);
- var tz = try std.Tz.parse(std.testing.allocator, in_stream.reader());
+ var tz = try std.Tz.parse(std.testing.allocator, &in_stream);
defer tz.deinit();
try std.testing.expectEqual(tz.transitions.len, 9);
@@ -228,9 +229,9 @@ test "slim" {
test "fat" {
const data = @embedFile("tz/antarctica_davis.tzif");
- var in_stream = std.io.fixedBufferStream(data);
+ var in_stream: Reader = .fixed(data);
- var tz = try std.Tz.parse(std.testing.allocator, in_stream.reader());
+ var tz = try std.Tz.parse(std.testing.allocator, &in_stream);
defer tz.deinit();
try std.testing.expectEqual(tz.transitions.len, 8);
@@ -241,9 +242,9 @@ test "fat" {
test "legacy" {
// Taken from Slackware 8.0, from 2001
const data = @embedFile("tz/europe_vatican.tzif");
- var in_stream = std.io.fixedBufferStream(data);
+ var in_stream: Reader = .fixed(data);
- var tz = try std.Tz.parse(std.testing.allocator, in_stream.reader());
+ var tz = try std.Tz.parse(std.testing.allocator, &in_stream);
defer tz.deinit();
try std.testing.expectEqual(tz.transitions.len, 170);