aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2025-08-29 03:48:45 -0700
committerGitHub <noreply@github.com>2025-08-29 03:48:45 -0700
commit4b948e8556b80cbc874415aa7c4bf9ac0027ffed (patch)
treeca48e7208aa23a24db82e8521c37a6c2abcd5dc1 /lib
parent640c11171bf8d13776629941f3305cf11c62c1f3 (diff)
parent43fbc37a490442ffcecf9817877f542251fee664 (diff)
downloadzig-4b948e8556b80cbc874415aa7c4bf9ac0027ffed.tar.gz
zig-4b948e8556b80cbc874415aa7c4bf9ac0027ffed.zip
Merge pull request #25036 from ziglang/GenericWriter
std.Io: delete GenericWriter, AnyWriter, and null_writer
Diffstat (limited to 'lib')
-rw-r--r--lib/compiler/aro/aro/Attribute.zig2
-rw-r--r--lib/compiler/aro/aro/Builtins/Builtin.zig5
-rw-r--r--lib/compiler/aro/aro/Compilation.zig64
-rw-r--r--lib/compiler/aro/aro/Parser.zig166
-rw-r--r--lib/compiler/aro/aro/Preprocessor.zig29
-rw-r--r--lib/compiler/aro/aro/Type.zig17
-rw-r--r--lib/compiler/aro/aro/Value.zig5
-rw-r--r--lib/compiler/aro_translate_c.zig18
-rw-r--r--lib/compiler/aro_translate_c/ast.zig2
-rw-r--r--lib/compiler/resinator/ani.zig26
-rw-r--r--lib/compiler/resinator/ast.zig80
-rw-r--r--lib/compiler/resinator/bmp.zig40
-rw-r--r--lib/compiler/resinator/cli.zig215
-rw-r--r--lib/compiler/resinator/compile.zig528
-rw-r--r--lib/compiler/resinator/cvtres.zig24
-rw-r--r--lib/compiler/resinator/errors.zig53
-rw-r--r--lib/compiler/resinator/ico.zig100
-rw-r--r--lib/compiler/resinator/lang.zig11
-rw-r--r--lib/compiler/resinator/literals.zig44
-rw-r--r--lib/compiler/resinator/main.zig130
-rw-r--r--lib/compiler/resinator/parse.zig52
-rw-r--r--lib/compiler/resinator/preprocess.zig50
-rw-r--r--lib/compiler/resinator/res.zig22
-rw-r--r--lib/compiler/resinator/source_mapping.zig10
-rw-r--r--lib/compiler/resinator/windows1252.zig45
-rw-r--r--lib/docs/wasm/Decl.zig9
-rw-r--r--lib/docs/wasm/html_render.zig18
-rw-r--r--lib/docs/wasm/main.zig54
-rw-r--r--lib/docs/wasm/markdown/renderer.zig31
-rw-r--r--lib/std/Build/Step/CheckObject.zig52
-rw-r--r--lib/std/Io.zig163
-rw-r--r--lib/std/Io/DeprecatedReader.zig98
-rw-r--r--lib/std/Io/DeprecatedWriter.zig114
-rw-r--r--lib/std/Io/Reader.zig53
-rw-r--r--lib/std/Io/Reader/test.zig351
-rw-r--r--lib/std/Io/Writer.zig34
-rw-r--r--lib/std/Io/fixed_buffer_stream.zig69
-rw-r--r--lib/std/Thread.zig2
-rw-r--r--lib/std/array_list.zig129
-rw-r--r--lib/std/base64.zig3
-rw-r--r--lib/std/crypto/aegis.zig12
-rw-r--r--lib/std/crypto/blake2.zig12
-rw-r--r--lib/std/crypto/blake3.zig12
-rw-r--r--lib/std/crypto/codecs/asn1/der/ArrayListReverse.zig17
-rw-r--r--lib/std/crypto/ml_kem.zig92
-rw-r--r--lib/std/crypto/scrypt.zig21
-rw-r--r--lib/std/crypto/sha2.zig12
-rw-r--r--lib/std/crypto/sha3.zig60
-rw-r--r--lib/std/crypto/siphash.zig12
-rw-r--r--lib/std/debug/Dwarf/expression.zig201
-rw-r--r--lib/std/debug/Pdb.zig343
-rw-r--r--lib/std/debug/SelfInfo.zig51
-rw-r--r--lib/std/fs/File.zig8
-rw-r--r--lib/std/json.zig2
-rw-r--r--lib/std/leb128.zig103
-rw-r--r--lib/std/macho.zig2
-rw-r--r--lib/std/posix/test.zig4
-rw-r--r--lib/std/tz.zig89
58 files changed, 1412 insertions, 2559 deletions
diff --git a/lib/compiler/aro/aro/Attribute.zig b/lib/compiler/aro/aro/Attribute.zig
index 4db287b65c..d60bae46dc 100644
--- a/lib/compiler/aro/aro/Attribute.zig
+++ b/lib/compiler/aro/aro/Attribute.zig
@@ -780,7 +780,7 @@ fn ignoredAttrErr(p: *Parser, tok: TokenIndex, attr: Attribute.Tag, context: []c
const strings_top = p.strings.items.len;
defer p.strings.items.len = strings_top;
- try p.strings.writer().print("attribute '{s}' ignored on {s}", .{ @tagName(attr), context });
+ try p.strings.print("attribute '{s}' ignored on {s}", .{ @tagName(attr), context });
const str = try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items[strings_top..]);
try p.errStr(.ignored_attribute, tok, str);
}
diff --git a/lib/compiler/aro/aro/Builtins/Builtin.zig b/lib/compiler/aro/aro/Builtins/Builtin.zig
index 6e5217b4da..a38eaf1d23 100644
--- a/lib/compiler/aro/aro/Builtins/Builtin.zig
+++ b/lib/compiler/aro/aro/Builtins/Builtin.zig
@@ -119,8 +119,7 @@ pub fn nameFromUniqueIndex(index: u16, buf: []u8) []u8 {
var node_index: u16 = 0;
var count: u16 = index;
- var fbs = std.io.fixedBufferStream(buf);
- const w = fbs.writer();
+ var w: std.Io.Writer = .fixed(buf);
while (true) {
var sibling_index = dafsa[node_index].child_index;
@@ -142,7 +141,7 @@ pub fn nameFromUniqueIndex(index: u16, buf: []u8) []u8 {
if (count == 0) break;
}
- return fbs.getWritten();
+ return w.buffered();
}
/// We're 1 bit shy of being able to fit this in a u32:
diff --git a/lib/compiler/aro/aro/Compilation.zig b/lib/compiler/aro/aro/Compilation.zig
index d7377d7e52..1ba3af2628 100644
--- a/lib/compiler/aro/aro/Compilation.zig
+++ b/lib/compiler/aro/aro/Compilation.zig
@@ -16,6 +16,7 @@ const Pragma = @import("Pragma.zig");
const StrInt = @import("StringInterner.zig");
const record_layout = @import("record_layout.zig");
const target_util = @import("target.zig");
+const Writer = std.Io.Writer;
pub const Error = error{
/// A fatal error has ocurred and compilation has stopped.
@@ -199,7 +200,7 @@ fn getTimestamp(comp: *Compilation) !u47 {
return @intCast(std.math.clamp(timestamp, 0, max_timestamp));
}
-fn generateDateAndTime(w: anytype, timestamp: u47) !void {
+fn generateDateAndTime(w: *Writer, timestamp: u47) !void {
const epoch_seconds = EpochSeconds{ .secs = timestamp };
const epoch_day = epoch_seconds.getEpochDay();
const day_seconds = epoch_seconds.getDaySeconds();
@@ -242,7 +243,7 @@ pub const SystemDefinesMode = enum {
include_system_defines,
};
-fn generateSystemDefines(comp: *Compilation, w: anytype) !void {
+fn generateSystemDefines(comp: *Compilation, w: *Writer) !void {
const ptr_width = comp.target.ptrBitWidth();
if (comp.langopts.gnuc_version > 0) {
@@ -533,11 +534,20 @@ fn generateSystemDefines(comp: *Compilation, w: anytype) !void {
pub fn generateBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefinesMode) !Source {
try comp.generateBuiltinTypes();
- var buf = std.array_list.Managed(u8).init(comp.gpa);
- defer buf.deinit();
+ var allocating: std.Io.Writer.Allocating = .init(comp.gpa);
+ defer allocating.deinit();
+
+ generateBuiltinMacrosWriter(comp, system_defines_mode, &allocating.writer) catch |err| switch (err) {
+ error.WriteFailed => return error.OutOfMemory,
+ else => |e| return e,
+ };
+
+ return comp.addSourceFromBuffer("<builtin>", allocating.written());
+}
+pub fn generateBuiltinMacrosWriter(comp: *Compilation, system_defines_mode: SystemDefinesMode, buf: *Writer) !void {
if (system_defines_mode == .include_system_defines) {
- try buf.appendSlice(
+ try buf.writeAll(
\\#define __VERSION__ "Aro
++ " " ++ @import("../backend.zig").version_str ++ "\"\n" ++
\\#define __Aro__
@@ -545,11 +555,11 @@ pub fn generateBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefi
);
}
- try buf.appendSlice("#define __STDC__ 1\n");
- try buf.writer().print("#define __STDC_HOSTED__ {d}\n", .{@intFromBool(comp.target.os.tag != .freestanding)});
+ try buf.writeAll("#define __STDC__ 1\n");
+ try buf.print("#define __STDC_HOSTED__ {d}\n", .{@intFromBool(comp.target.os.tag != .freestanding)});
// standard macros
- try buf.appendSlice(
+ try buf.writeAll(
\\#define __STDC_NO_COMPLEX__ 1
\\#define __STDC_NO_THREADS__ 1
\\#define __STDC_NO_VLA__ 1
@@ -561,23 +571,21 @@ pub fn generateBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefi
\\
);
if (comp.langopts.standard.StdCVersionMacro()) |stdc_version| {
- try buf.appendSlice("#define __STDC_VERSION__ ");
- try buf.appendSlice(stdc_version);
- try buf.append('\n');
+ try buf.writeAll("#define __STDC_VERSION__ ");
+ try buf.writeAll(stdc_version);
+ try buf.writeByte('\n');
}
// timestamps
const timestamp = try comp.getTimestamp();
- try generateDateAndTime(buf.writer(), timestamp);
+ try generateDateAndTime(buf, timestamp);
if (system_defines_mode == .include_system_defines) {
- try comp.generateSystemDefines(buf.writer());
+ try comp.generateSystemDefines(buf);
}
-
- return comp.addSourceFromBuffer("<builtin>", buf.items);
}
-fn generateFloatMacros(w: anytype, prefix: []const u8, semantics: target_util.FPSemantics, ext: []const u8) !void {
+fn generateFloatMacros(w: *Writer, prefix: []const u8, semantics: target_util.FPSemantics, ext: []const u8) !void {
const denormMin = semantics.chooseValue(
[]const u8,
.{
@@ -656,7 +664,7 @@ fn generateFloatMacros(w: anytype, prefix: []const u8, semantics: target_util.FP
try w.print("#define {s}MIN__ {s}{s}\n", .{ prefix_slice, min, ext });
}
-fn generateTypeMacro(w: anytype, mapper: StrInt.TypeMapper, name: []const u8, ty: Type, langopts: LangOpts) !void {
+fn generateTypeMacro(w: *Writer, mapper: StrInt.TypeMapper, name: []const u8, ty: Type, langopts: LangOpts) !void {
try w.print("#define {s} ", .{name});
try ty.print(mapper, langopts, w);
try w.writeByte('\n');
@@ -762,7 +770,7 @@ fn generateFastOrLeastType(
bits: usize,
kind: enum { least, fast },
signedness: std.builtin.Signedness,
- w: anytype,
+ w: *Writer,
mapper: StrInt.TypeMapper,
) !void {
const ty = comp.intLeastN(bits, signedness); // defining the fast types as the least types is permitted
@@ -793,7 +801,7 @@ fn generateFastOrLeastType(
try comp.generateFmt(prefix, w, ty);
}
-fn generateFastAndLeastWidthTypes(comp: *Compilation, w: anytype, mapper: StrInt.TypeMapper) !void {
+fn generateFastAndLeastWidthTypes(comp: *Compilation, w: *Writer, mapper: StrInt.TypeMapper) !void {
const sizes = [_]usize{ 8, 16, 32, 64 };
for (sizes) |size| {
try comp.generateFastOrLeastType(size, .least, .signed, w, mapper);
@@ -803,7 +811,7 @@ fn generateFastAndLeastWidthTypes(comp: *Compilation, w: anytype, mapper: StrInt
}
}
-fn generateExactWidthTypes(comp: *const Compilation, w: anytype, mapper: StrInt.TypeMapper) !void {
+fn generateExactWidthTypes(comp: *const Compilation, w: *Writer, mapper: StrInt.TypeMapper) !void {
try comp.generateExactWidthType(w, mapper, .schar);
if (comp.intSize(.short) > comp.intSize(.char)) {
@@ -851,7 +859,7 @@ fn generateExactWidthTypes(comp: *const Compilation, w: anytype, mapper: StrInt.
}
}
-fn generateFmt(comp: *const Compilation, prefix: []const u8, w: anytype, ty: Type) !void {
+fn generateFmt(comp: *const Compilation, prefix: []const u8, w: *Writer, ty: Type) !void {
const unsigned = ty.isUnsignedInt(comp);
const modifier = ty.formatModifier();
const formats = if (unsigned) "ouxX" else "di";
@@ -860,7 +868,7 @@ fn generateFmt(comp: *const Compilation, prefix: []const u8, w: anytype, ty: Typ
}
}
-fn generateSuffixMacro(comp: *const Compilation, prefix: []const u8, w: anytype, ty: Type) !void {
+fn generateSuffixMacro(comp: *const Compilation, prefix: []const u8, w: *Writer, ty: Type) !void {
return w.print("#define {s}_C_SUFFIX__ {s}\n", .{ prefix, ty.intValueSuffix(comp) });
}
@@ -868,7 +876,7 @@ fn generateSuffixMacro(comp: *const Compilation, prefix: []const u8, w: anytype,
/// Name macro (e.g. #define __UINT32_TYPE__ unsigned int)
/// Format strings (e.g. #define __UINT32_FMTu__ "u")
/// Suffix macro (e.g. #define __UINT32_C_SUFFIX__ U)
-fn generateExactWidthType(comp: *const Compilation, w: anytype, mapper: StrInt.TypeMapper, specifier: Type.Specifier) !void {
+fn generateExactWidthType(comp: *const Compilation, w: *Writer, mapper: StrInt.TypeMapper, specifier: Type.Specifier) !void {
var ty = Type{ .specifier = specifier };
const width = 8 * ty.sizeof(comp).?;
const unsigned = ty.isUnsignedInt(comp);
@@ -998,7 +1006,7 @@ fn generateVaListType(comp: *Compilation) !Type {
return ty;
}
-fn generateIntMax(comp: *const Compilation, w: anytype, name: []const u8, ty: Type) !void {
+fn generateIntMax(comp: *const Compilation, w: *Writer, name: []const u8, ty: Type) !void {
const bit_count: u8 = @intCast(ty.sizeof(comp).? * 8);
const unsigned = ty.isUnsignedInt(comp);
const max: u128 = switch (bit_count) {
@@ -1023,7 +1031,7 @@ pub fn wcharMax(comp: *const Compilation) u32 {
};
}
-fn generateExactWidthIntMax(comp: *const Compilation, w: anytype, specifier: Type.Specifier) !void {
+fn generateExactWidthIntMax(comp: *const Compilation, w: *Writer, specifier: Type.Specifier) !void {
var ty = Type{ .specifier = specifier };
const bit_count: u8 = @intCast(ty.sizeof(comp).? * 8);
const unsigned = ty.isUnsignedInt(comp);
@@ -1040,16 +1048,16 @@ fn generateExactWidthIntMax(comp: *const Compilation, w: anytype, specifier: Typ
return comp.generateIntMax(w, name, ty);
}
-fn generateIntWidth(comp: *Compilation, w: anytype, name: []const u8, ty: Type) !void {
+fn generateIntWidth(comp: *Compilation, w: *Writer, name: []const u8, ty: Type) !void {
try w.print("#define __{s}_WIDTH__ {d}\n", .{ name, 8 * ty.sizeof(comp).? });
}
-fn generateIntMaxAndWidth(comp: *Compilation, w: anytype, name: []const u8, ty: Type) !void {
+fn generateIntMaxAndWidth(comp: *Compilation, w: *Writer, name: []const u8, ty: Type) !void {
try comp.generateIntMax(w, name, ty);
try comp.generateIntWidth(w, name, ty);
}
-fn generateSizeofType(comp: *Compilation, w: anytype, name: []const u8, ty: Type) !void {
+fn generateSizeofType(comp: *Compilation, w: *Writer, name: []const u8, ty: Type) !void {
try w.print("#define {s} {d}\n", .{ name, ty.sizeof(comp).? });
}
diff --git a/lib/compiler/aro/aro/Parser.zig b/lib/compiler/aro/aro/Parser.zig
index 14cd70fdfd..20a8ee3701 100644
--- a/lib/compiler/aro/aro/Parser.zig
+++ b/lib/compiler/aro/aro/Parser.zig
@@ -101,7 +101,7 @@ value_map: Tree.ValueMap,
// buffers used during compilation
syms: SymbolStack = .{},
-strings: std.array_list.AlignedManaged(u8, .@"4"),
+strings: std.array_list.Managed(u8),
labels: std.array_list.Managed(Label),
list_buf: NodeList,
decl_buf: NodeList,
@@ -447,7 +447,17 @@ pub fn typeStr(p: *Parser, ty: Type) ![]const u8 {
defer p.strings.items.len = strings_top;
const mapper = p.comp.string_interner.getSlowTypeMapper();
- try ty.print(mapper, p.comp.langopts, p.strings.writer());
+ {
+ var unmanaged = p.strings.moveToUnmanaged();
+ var allocating: std.Io.Writer.Allocating = .fromArrayList(p.comp.gpa, &unmanaged);
+ defer {
+ unmanaged = allocating.toArrayList();
+ p.strings = unmanaged.toManaged(p.comp.gpa);
+ }
+ ty.print(mapper, p.comp.langopts, &allocating.writer) catch |e| switch (e) {
+ error.WriteFailed => return error.OutOfMemory,
+ };
+ }
return try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items[strings_top..]);
}
@@ -455,7 +465,7 @@ pub fn typePairStr(p: *Parser, a: Type, b: Type) ![]const u8 {
return p.typePairStrExtra(a, " and ", b);
}
-pub fn typePairStrExtra(p: *Parser, a: Type, msg: []const u8, b: Type) ![]const u8 {
+pub fn typePairStrExtra(p: *Parser, a: Type, msg: []const u8, b: Type) Error![]const u8 {
if (@import("builtin").mode != .Debug) {
if (a.is(.invalid) or b.is(.invalid)) {
return "Tried to render invalid type - this is an aro bug.";
@@ -466,29 +476,60 @@ pub fn typePairStrExtra(p: *Parser, a: Type, msg: []const u8, b: Type) ![]const
try p.strings.append('\'');
const mapper = p.comp.string_interner.getSlowTypeMapper();
- try a.print(mapper, p.comp.langopts, p.strings.writer());
+ {
+ var unmanaged = p.strings.moveToUnmanaged();
+ var allocating: std.Io.Writer.Allocating = .fromArrayList(p.comp.gpa, &unmanaged);
+ defer {
+ unmanaged = allocating.toArrayList();
+ p.strings = unmanaged.toManaged(p.comp.gpa);
+ }
+ a.print(mapper, p.comp.langopts, &allocating.writer) catch |e| switch (e) {
+ error.WriteFailed => return error.OutOfMemory,
+ };
+ }
try p.strings.append('\'');
try p.strings.appendSlice(msg);
try p.strings.append('\'');
- try b.print(mapper, p.comp.langopts, p.strings.writer());
+ {
+ var unmanaged = p.strings.moveToUnmanaged();
+ var allocating: std.Io.Writer.Allocating = .fromArrayList(p.comp.gpa, &unmanaged);
+ defer {
+ unmanaged = allocating.toArrayList();
+ p.strings = unmanaged.toManaged(p.comp.gpa);
+ }
+ b.print(mapper, p.comp.langopts, &allocating.writer) catch |e| switch (e) {
+ error.WriteFailed => return error.OutOfMemory,
+ };
+ }
try p.strings.append('\'');
return try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items[strings_top..]);
}
-pub fn valueChangedStr(p: *Parser, res: *Result, old_value: Value, int_ty: Type) ![]const u8 {
+pub fn valueChangedStr(p: *Parser, res: *Result, old_value: Value, int_ty: Type) Error![]const u8 {
const strings_top = p.strings.items.len;
defer p.strings.items.len = strings_top;
- var w = p.strings.writer();
const type_pair_str = try p.typePairStrExtra(res.ty, " to ", int_ty);
- try w.writeAll(type_pair_str);
+ {
+ var unmanaged = p.strings.moveToUnmanaged();
+ var allocating: std.Io.Writer.Allocating = .fromArrayList(p.comp.gpa, &unmanaged);
+ defer {
+ unmanaged = allocating.toArrayList();
+ p.strings = unmanaged.toManaged(p.comp.gpa);
+ }
+ allocating.writer.writeAll(type_pair_str) catch return error.OutOfMemory;
- try w.writeAll(" changes ");
- if (res.val.isZero(p.comp)) try w.writeAll("non-zero ");
- try w.writeAll("value from ");
- try old_value.print(res.ty, p.comp, w);
- try w.writeAll(" to ");
- try res.val.print(int_ty, p.comp, w);
+ allocating.writer.writeAll(" changes ") catch return error.OutOfMemory;
+ if (res.val.isZero(p.comp)) allocating.writer.writeAll("non-zero ") catch return error.OutOfMemory;
+ allocating.writer.writeAll("value from ") catch return error.OutOfMemory;
+ old_value.print(res.ty, p.comp, &allocating.writer) catch |e| switch (e) {
+ error.WriteFailed => return error.OutOfMemory,
+ };
+ allocating.writer.writeAll(" to ") catch return error.OutOfMemory;
+ res.val.print(int_ty, p.comp, &allocating.writer) catch |e| switch (e) {
+ error.WriteFailed => return error.OutOfMemory,
+ };
+ }
return try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items[strings_top..]);
}
@@ -498,9 +539,8 @@ fn checkDeprecatedUnavailable(p: *Parser, ty: Type, usage_tok: TokenIndex, decl_
const strings_top = p.strings.items.len;
defer p.strings.items.len = strings_top;
- const w = p.strings.writer();
const msg_str = p.comp.interner.get(@"error".msg.ref()).bytes;
- try w.print("call to '{s}' declared with attribute error: {f}", .{
+ try p.strings.print("call to '{s}' declared with attribute error: {f}", .{
p.tokSlice(@"error".__name_tok), std.zig.fmtString(msg_str),
});
const str = try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items[strings_top..]);
@@ -510,9 +550,8 @@ fn checkDeprecatedUnavailable(p: *Parser, ty: Type, usage_tok: TokenIndex, decl_
const strings_top = p.strings.items.len;
defer p.strings.items.len = strings_top;
- const w = p.strings.writer();
const msg_str = p.comp.interner.get(warning.msg.ref()).bytes;
- try w.print("call to '{s}' declared with attribute warning: {f}", .{
+ try p.strings.print("call to '{s}' declared with attribute warning: {f}", .{
p.tokSlice(warning.__name_tok), std.zig.fmtString(msg_str),
});
const str = try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items[strings_top..]);
@@ -532,17 +571,16 @@ fn errDeprecated(p: *Parser, tag: Diagnostics.Tag, tok_i: TokenIndex, msg: ?Valu
const strings_top = p.strings.items.len;
defer p.strings.items.len = strings_top;
- const w = p.strings.writer();
- try w.print("'{s}' is ", .{p.tokSlice(tok_i)});
+ try p.strings.print("'{s}' is ", .{p.tokSlice(tok_i)});
const reason: []const u8 = switch (tag) {
.unavailable => "unavailable",
.deprecated_declarations => "deprecated",
else => unreachable,
};
- try w.writeAll(reason);
+ try p.strings.appendSlice(reason);
if (msg) |m| {
const str = p.comp.interner.get(m.ref()).bytes;
- try w.print(": {f}", .{std.zig.fmtString(str)});
+ try p.strings.print(": {f}", .{std.zig.fmtString(str)});
}
const str = try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items[strings_top..]);
return p.errStr(tag, tok_i, str);
@@ -681,7 +719,7 @@ fn diagnoseIncompleteDefinitions(p: *Parser) !void {
}
/// root : (decl | assembly ';' | staticAssert)*
-pub fn parse(pp: *Preprocessor) Compilation.Error!Tree {
+pub fn parse(pp: *Preprocessor) Error!Tree {
assert(pp.linemarkers == .none);
pp.comp.pragmaEvent(.before_parse);
@@ -693,7 +731,7 @@ pub fn parse(pp: *Preprocessor) Compilation.Error!Tree {
.gpa = pp.comp.gpa,
.arena = arena.allocator(),
.tok_ids = pp.tokens.items(.id),
- .strings = std.array_list.AlignedManaged(u8, .@"4").init(pp.comp.gpa),
+ .strings = std.array_list.Managed(u8).init(pp.comp.gpa),
.value_map = Tree.ValueMap.init(pp.comp.gpa),
.data = NodeList.init(pp.comp.gpa),
.labels = std.array_list.Managed(Label).init(pp.comp.gpa),
@@ -1218,38 +1256,46 @@ fn decl(p: *Parser) Error!bool {
return true;
}
-fn staticAssertMessage(p: *Parser, cond_node: NodeIndex, message: Result) !?[]const u8 {
+fn staticAssertMessage(p: *Parser, cond_node: NodeIndex, message: Result) Error!?[]const u8 {
const cond_tag = p.nodes.items(.tag)[@intFromEnum(cond_node)];
if (cond_tag != .builtin_types_compatible_p and message.node == .none) return null;
- var buf = std.array_list.Managed(u8).init(p.gpa);
- defer buf.deinit();
+ var allocating: std.Io.Writer.Allocating = .init(p.gpa);
+ defer allocating.deinit();
+
+ const buf = &allocating.writer;
if (cond_tag == .builtin_types_compatible_p) {
const mapper = p.comp.string_interner.getSlowTypeMapper();
const data = p.nodes.items(.data)[@intFromEnum(cond_node)].bin;
- try buf.appendSlice("'__builtin_types_compatible_p(");
+ buf.writeAll("'__builtin_types_compatible_p(") catch return error.OutOfMemory;
const lhs_ty = p.nodes.items(.ty)[@intFromEnum(data.lhs)];
- try lhs_ty.print(mapper, p.comp.langopts, buf.writer());
- try buf.appendSlice(", ");
+ lhs_ty.print(mapper, p.comp.langopts, buf) catch |e| switch (e) {
+ error.WriteFailed => return error.OutOfMemory,
+ };
+ buf.writeAll(", ") catch return error.OutOfMemory;
const rhs_ty = p.nodes.items(.ty)[@intFromEnum(data.rhs)];
- try rhs_ty.print(mapper, p.comp.langopts, buf.writer());
+ rhs_ty.print(mapper, p.comp.langopts, buf) catch |e| switch (e) {
+ error.WriteFailed => return error.OutOfMemory,
+ };
- try buf.appendSlice(")'");
+ buf.writeAll(")'") catch return error.OutOfMemory;
}
if (message.node != .none) {
assert(p.nodes.items(.tag)[@intFromEnum(message.node)] == .string_literal_expr);
- if (buf.items.len > 0) {
- try buf.append(' ');
+ if (buf.buffered().len > 0) {
+ buf.writeByte(' ') catch return error.OutOfMemory;
}
const bytes = p.comp.interner.get(message.val.ref()).bytes;
- try buf.ensureUnusedCapacity(bytes.len);
- try Value.printString(bytes, message.ty, p.comp, buf.writer());
+ try allocating.ensureUnusedCapacity(bytes.len);
+ Value.printString(bytes, message.ty, p.comp, buf) catch |e| switch (e) {
+ error.WriteFailed => return error.OutOfMemory,
+ };
}
- return try p.comp.diagnostics.arena.allocator().dupe(u8, buf.items);
+ return try p.comp.diagnostics.arena.allocator().dupe(u8, allocating.written());
}
/// staticAssert
@@ -4981,7 +5027,7 @@ const CallExpr = union(enum) {
return true;
}
- fn checkVarArg(self: CallExpr, p: *Parser, first_after: TokenIndex, param_tok: TokenIndex, arg: *Result, arg_idx: u32) !void {
+ fn checkVarArg(self: CallExpr, p: *Parser, first_after: TokenIndex, param_tok: TokenIndex, arg: *Result, arg_idx: u32) Error!void {
if (self == .standard) return;
const builtin_tok = p.nodes.items(.data)[@intFromEnum(self.builtin.node)].decl.name;
@@ -5183,7 +5229,17 @@ pub const Result = struct {
const strings_top = p.strings.items.len;
defer p.strings.items.len = strings_top;
- try res.val.print(res.ty, p.comp, p.strings.writer());
+ {
+ var unmanaged = p.strings.moveToUnmanaged();
+ var allocating: std.Io.Writer.Allocating = .fromArrayList(p.comp.gpa, &unmanaged);
+ defer {
+ unmanaged = allocating.toArrayList();
+ p.strings = unmanaged.toManaged(p.comp.gpa);
+ }
+ res.val.print(res.ty, p.comp, &allocating.writer) catch |e| switch (e) {
+ error.WriteFailed => return error.OutOfMemory,
+ };
+ }
return try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items[strings_top..]);
}
@@ -5347,7 +5403,7 @@ pub const Result = struct {
conditional,
add,
sub,
- }) !bool {
+ }) Error!bool {
if (b.ty.specifier == .invalid) {
try a.saveValue(p);
a.ty = Type.invalid;
@@ -5643,7 +5699,7 @@ pub const Result = struct {
}
}
- fn floatToIntWarning(res: *Result, p: *Parser, int_ty: Type, old_value: Value, change_kind: Value.FloatToIntChangeKind, tok: TokenIndex) !void {
+ fn floatToIntWarning(res: *Result, p: *Parser, int_ty: Type, old_value: Value, change_kind: Value.FloatToIntChangeKind, tok: TokenIndex) Error!void {
switch (change_kind) {
.none => return p.errStr(.float_to_int, tok, try p.typePairStrExtra(res.ty, " to ", int_ty)),
.out_of_range => return p.errStr(.float_out_of_range, tok, try p.typePairStrExtra(res.ty, " to ", int_ty)),
@@ -5866,7 +5922,7 @@ pub const Result = struct {
res.val = .{};
}
- fn castType(res: *Result, p: *Parser, to: Type, operand_tok: TokenIndex, l_paren: TokenIndex) !void {
+ fn castType(res: *Result, p: *Parser, to: Type, operand_tok: TokenIndex, l_paren: TokenIndex) Error!void {
var cast_kind: Tree.CastKind = undefined;
if (to.is(.void)) {
@@ -7595,9 +7651,19 @@ fn validateFieldAccess(p: *Parser, record_ty: *const Type.Record, expr_ty: Type,
p.strings.items.len = 0;
- try p.strings.writer().print("'{s}' in '", .{p.tokSlice(field_name_tok)});
+ try p.strings.print("'{s}' in '", .{p.tokSlice(field_name_tok)});
const mapper = p.comp.string_interner.getSlowTypeMapper();
- try expr_ty.print(mapper, p.comp.langopts, p.strings.writer());
+ {
+ var unmanaged = p.strings.moveToUnmanaged();
+ var allocating: std.Io.Writer.Allocating = .fromArrayList(p.comp.gpa, &unmanaged);
+ defer {
+ unmanaged = allocating.toArrayList();
+ p.strings = unmanaged.toManaged(p.comp.gpa);
+ }
+ expr_ty.print(mapper, p.comp.langopts, &allocating.writer) catch |e| switch (e) {
+ error.WriteFailed => return error.OutOfMemory,
+ };
+ }
try p.strings.append('\'');
const duped = try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items);
@@ -8016,7 +8082,17 @@ fn primaryExpr(p: *Parser) Error!Result {
defer p.strings.items.len = strings_top;
const mapper = p.comp.string_interner.getSlowTypeMapper();
- try Type.printNamed(func_ty, p.tokSlice(p.func.name), mapper, p.comp.langopts, p.strings.writer());
+ {
+ var unmanaged = p.strings.moveToUnmanaged();
+ var allocating: std.Io.Writer.Allocating = .fromArrayList(p.comp.gpa, &unmanaged);
+ defer {
+ unmanaged = allocating.toArrayList();
+ p.strings = unmanaged.toManaged(p.comp.gpa);
+ }
+ Type.printNamed(func_ty, p.tokSlice(p.func.name), mapper, p.comp.langopts, &allocating.writer) catch |e| switch (e) {
+ error.WriteFailed => return error.OutOfMemory,
+ };
+ }
try p.strings.append(0);
const predef = try p.makePredefinedIdentifier(strings_top);
ty = predef.ty;
diff --git a/lib/compiler/aro/aro/Preprocessor.zig b/lib/compiler/aro/aro/Preprocessor.zig
index 1ad666fecd..69f461f434 100644
--- a/lib/compiler/aro/aro/Preprocessor.zig
+++ b/lib/compiler/aro/aro/Preprocessor.zig
@@ -15,6 +15,7 @@ const TokenWithExpansionLocs = Tree.TokenWithExpansionLocs;
const Attribute = @import("Attribute.zig");
const features = @import("features.zig");
const Hideset = @import("Hideset.zig");
+const Writer = std.Io.Writer;
const DefineMap = std.StringHashMapUnmanaged(Macro);
const RawTokenList = std.array_list.Managed(RawToken);
@@ -982,7 +983,7 @@ fn expr(pp: *Preprocessor, tokenizer: *Tokenizer) MacroError!bool {
.tok_i = @intCast(token_state.tokens_len),
.arena = pp.arena.allocator(),
.in_macro = true,
- .strings = std.array_list.AlignedManaged(u8, .@"4").init(pp.comp.gpa),
+ .strings = std.array_list.Managed(u8).init(pp.comp.gpa),
.data = undefined,
.value_map = undefined,
@@ -1193,24 +1194,21 @@ fn expandObjMacro(pp: *Preprocessor, simple_macro: *const Macro) Error!ExpandBuf
.macro_file => {
const start = pp.comp.generated_buf.items.len;
const source = pp.comp.getSource(pp.expansion_source_loc.id);
- const w = pp.comp.generated_buf.writer(pp.gpa);
- try w.print("\"{s}\"\n", .{source.path});
+ try pp.comp.generated_buf.print(pp.gpa, "\"{s}\"\n", .{source.path});
buf.appendAssumeCapacity(try pp.makeGeneratedToken(start, .string_literal, tok));
},
.macro_line => {
const start = pp.comp.generated_buf.items.len;
const source = pp.comp.getSource(pp.expansion_source_loc.id);
- const w = pp.comp.generated_buf.writer(pp.gpa);
- try w.print("{d}\n", .{source.physicalLine(pp.expansion_source_loc)});
+ try pp.comp.generated_buf.print(pp.gpa, "{d}\n", .{source.physicalLine(pp.expansion_source_loc)});
buf.appendAssumeCapacity(try pp.makeGeneratedToken(start, .pp_num, tok));
},
.macro_counter => {
defer pp.counter += 1;
const start = pp.comp.generated_buf.items.len;
- const w = pp.comp.generated_buf.writer(pp.gpa);
- try w.print("{d}\n", .{pp.counter});
+ try pp.comp.generated_buf.print(pp.gpa, "{d}\n", .{pp.counter});
buf.appendAssumeCapacity(try pp.makeGeneratedToken(start, .pp_num, tok));
},
@@ -1682,8 +1680,7 @@ fn expandFuncMacro(
break :blk false;
} else try pp.handleBuiltinMacro(raw.id, arg, macro_tok.loc);
const start = pp.comp.generated_buf.items.len;
- const w = pp.comp.generated_buf.writer(pp.gpa);
- try w.print("{}\n", .{@intFromBool(result)});
+ try pp.comp.generated_buf.print(pp.gpa, "{}\n", .{@intFromBool(result)});
try buf.append(try pp.makeGeneratedToken(start, .pp_num, tokFromRaw(raw)));
},
.macro_param_has_c_attribute => {
@@ -2988,18 +2985,16 @@ fn embed(pp: *Preprocessor, tokenizer: *Tokenizer) MacroError!void {
// TODO: We currently only support systems with CHAR_BIT == 8
// If the target's CHAR_BIT is not 8, we need to write out correctly-sized embed_bytes
// and correctly account for the target's endianness
- const writer = pp.comp.generated_buf.writer(pp.gpa);
-
{
const byte = embed_bytes[0];
const start = pp.comp.generated_buf.items.len;
- try writer.print("{d}", .{byte});
+ try pp.comp.generated_buf.print(pp.gpa, "{d}", .{byte});
pp.addTokenAssumeCapacity(try pp.makeGeneratedToken(start, .embed_byte, filename_tok));
}
for (embed_bytes[1..]) |byte| {
const start = pp.comp.generated_buf.items.len;
- try writer.print(",{d}", .{byte});
+ try pp.comp.generated_buf.print(pp.gpa, ",{d}", .{byte});
pp.addTokenAssumeCapacity(.{ .id = .comma, .loc = .{ .id = .generated, .byte_offset = @intCast(start) } });
pp.addTokenAssumeCapacity(try pp.makeGeneratedToken(start + 1, .embed_byte, filename_tok));
}
@@ -3241,7 +3236,7 @@ fn findIncludeSource(pp: *Preprocessor, tokenizer: *Tokenizer, first: RawToken,
fn printLinemarker(
pp: *Preprocessor,
- w: anytype,
+ w: *Writer,
line_no: u32,
source: Source,
start_resume: enum(u8) { start, @"resume", none },
@@ -3301,7 +3296,7 @@ pub const DumpMode = enum {
/// Pretty-print the macro define or undef at location `loc`.
/// We re-tokenize the directive because we are printing a macro that may have the same name as one in
/// `pp.defines` but a different definition (due to being #undef'ed and then redefined)
-fn prettyPrintMacro(pp: *Preprocessor, w: anytype, loc: Source.Location, parts: enum { name_only, name_and_body }) !void {
+fn prettyPrintMacro(pp: *Preprocessor, w: *Writer, loc: Source.Location, parts: enum { name_only, name_and_body }) !void {
const source = pp.comp.getSource(loc.id);
var tokenizer: Tokenizer = .{
.buf = source.buf,
@@ -3339,7 +3334,7 @@ fn prettyPrintMacro(pp: *Preprocessor, w: anytype, loc: Source.Location, parts:
}
}
-fn prettyPrintMacrosOnly(pp: *Preprocessor, w: anytype) !void {
+fn prettyPrintMacrosOnly(pp: *Preprocessor, w: *Writer) !void {
var it = pp.defines.valueIterator();
while (it.next()) |macro| {
if (macro.is_builtin) continue;
@@ -3351,7 +3346,7 @@ fn prettyPrintMacrosOnly(pp: *Preprocessor, w: anytype) !void {
}
/// Pretty print tokens and try to preserve whitespace.
-pub fn prettyPrintTokens(pp: *Preprocessor, w: anytype, macro_dump_mode: DumpMode) !void {
+pub fn prettyPrintTokens(pp: *Preprocessor, w: *Writer, macro_dump_mode: DumpMode) !void {
if (macro_dump_mode == .macros_only) {
return pp.prettyPrintMacrosOnly(w);
}
diff --git a/lib/compiler/aro/aro/Type.zig b/lib/compiler/aro/aro/Type.zig
index 6bec686a21..a422150377 100644
--- a/lib/compiler/aro/aro/Type.zig
+++ b/lib/compiler/aro/aro/Type.zig
@@ -9,6 +9,7 @@ const StringInterner = @import("StringInterner.zig");
const StringId = StringInterner.StringId;
const target_util = @import("target.zig");
const LangOpts = @import("LangOpts.zig");
+const Writer = std.Io.Writer;
pub const Qualifiers = packed struct {
@"const": bool = false,
@@ -23,7 +24,7 @@ pub const Qualifiers = packed struct {
return quals.@"const" or quals.restrict or quals.@"volatile" or quals.atomic;
}
- pub fn dump(quals: Qualifiers, w: anytype) !void {
+ pub fn dump(quals: Qualifiers, w: *Writer) !void {
if (quals.@"const") try w.writeAll("const ");
if (quals.atomic) try w.writeAll("_Atomic ");
if (quals.@"volatile") try w.writeAll("volatile ");
@@ -2411,12 +2412,12 @@ pub fn intValueSuffix(ty: Type, comp: *const Compilation) []const u8 {
}
/// Print type in C style
-pub fn print(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: anytype) @TypeOf(w).Error!void {
+pub fn print(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: *Writer) Writer.Error!void {
_ = try ty.printPrologue(mapper, langopts, w);
try ty.printEpilogue(mapper, langopts, w);
}
-pub fn printNamed(ty: Type, name: []const u8, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: anytype) @TypeOf(w).Error!void {
+pub fn printNamed(ty: Type, name: []const u8, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: *Writer) Writer.Error!void {
const simple = try ty.printPrologue(mapper, langopts, w);
if (simple) try w.writeByte(' ');
try w.writeAll(name);
@@ -2426,7 +2427,7 @@ pub fn printNamed(ty: Type, name: []const u8, mapper: StringInterner.TypeMapper,
const StringGetter = fn (TokenIndex) []const u8;
/// return true if `ty` is simple
-fn printPrologue(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: anytype) @TypeOf(w).Error!bool {
+fn printPrologue(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: *Writer) Writer.Error!bool {
if (ty.qual.atomic) {
var non_atomic_ty = ty;
non_atomic_ty.qual.atomic = false;
@@ -2497,7 +2498,7 @@ fn printPrologue(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts
return true;
}
-fn printEpilogue(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: anytype) @TypeOf(w).Error!void {
+fn printEpilogue(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: *Writer) Writer.Error!void {
if (ty.qual.atomic) return;
if (ty.isPtr()) {
const elem_ty = ty.elemType();
@@ -2564,7 +2565,7 @@ fn printEpilogue(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts
const dump_detailed_containers = false;
// Print as Zig types since those are actually readable
-pub fn dump(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: anytype) @TypeOf(w).Error!void {
+pub fn dump(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: *Writer) Writer.Error!void {
try ty.qual.dump(w);
switch (ty.specifier) {
.invalid => try w.writeAll("invalid"),
@@ -2656,7 +2657,7 @@ pub fn dump(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w:
}
}
-fn dumpEnum(@"enum": *Enum, mapper: StringInterner.TypeMapper, w: anytype) @TypeOf(w).Error!void {
+fn dumpEnum(@"enum": *Enum, mapper: StringInterner.TypeMapper, w: *Writer) Writer.Error!void {
try w.writeAll(" {");
for (@"enum".fields) |field| {
try w.print(" {s} = {d},", .{ mapper.lookup(field.name), field.value });
@@ -2664,7 +2665,7 @@ fn dumpEnum(@"enum": *Enum, mapper: StringInterner.TypeMapper, w: anytype) @Type
try w.writeAll(" }");
}
-fn dumpRecord(record: *Record, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: anytype) @TypeOf(w).Error!void {
+fn dumpRecord(record: *Record, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: *Writer) Writer.Error!void {
try w.writeAll(" {");
for (record.fields) |field| {
try w.writeByte(' ');
diff --git a/lib/compiler/aro/aro/Value.zig b/lib/compiler/aro/aro/Value.zig
index 183c557976..adc3f08812 100644
--- a/lib/compiler/aro/aro/Value.zig
+++ b/lib/compiler/aro/aro/Value.zig
@@ -9,6 +9,7 @@ const Compilation = @import("Compilation.zig");
const Type = @import("Type.zig");
const target_util = @import("target.zig");
const annex_g = @import("annex_g.zig");
+const Writer = std.Io.Writer;
const Value = @This();
@@ -953,7 +954,7 @@ pub fn maxInt(ty: Type, comp: *Compilation) !Value {
return twosCompIntLimit(.max, ty, comp);
}
-pub fn print(v: Value, ty: Type, comp: *const Compilation, w: anytype) @TypeOf(w).Error!void {
+pub fn print(v: Value, ty: Type, comp: *const Compilation, w: *Writer) Writer.Error!void {
if (ty.is(.bool)) {
return w.writeAll(if (v.isZero(comp)) "false" else "true");
}
@@ -977,7 +978,7 @@ pub fn print(v: Value, ty: Type, comp: *const Compilation, w: anytype) @TypeOf(w
}
}
-pub fn printString(bytes: []const u8, ty: Type, comp: *const Compilation, w: anytype) @TypeOf(w).Error!void {
+pub fn printString(bytes: []const u8, ty: Type, comp: *const Compilation, w: *Writer) Writer.Error!void {
const size: Compilation.CharUnitSize = @enumFromInt(ty.elemType().sizeof(comp).?);
const without_null = bytes[0 .. bytes.len - @intFromEnum(size)];
try w.writeByte('"');
diff --git a/lib/compiler/aro_translate_c.zig b/lib/compiler/aro_translate_c.zig
index cd8bd05c7c..d414e81437 100644
--- a/lib/compiler/aro_translate_c.zig
+++ b/lib/compiler/aro_translate_c.zig
@@ -116,15 +116,17 @@ pub fn translate(
var driver: aro.Driver = .{ .comp = comp };
defer driver.deinit();
- var macro_buf = std.array_list.Managed(u8).init(gpa);
+ var macro_buf: std.Io.Writer.Allocating = .init(gpa);
defer macro_buf.deinit();
- assert(!try driver.parseArgs(std.io.null_writer, macro_buf.writer(), args));
+ var trash: [64]u8 = undefined;
+ var discarding: std.Io.Writer.Discarding = .init(&trash);
+ assert(!try driver.parseArgs(&discarding.writer, &macro_buf.writer, args));
assert(driver.inputs.items.len == 1);
const source = driver.inputs.items[0];
const builtin_macros = try comp.generateBuiltinMacros(.include_system_defines);
- const user_macros = try comp.addSourceFromBuffer("<command line>", macro_buf.items);
+ const user_macros = try comp.addSourceFromBuffer("<command line>", macro_buf.written());
var pp = try aro.Preprocessor.initDefault(comp);
defer pp.deinit();
@@ -698,11 +700,10 @@ fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: *const Type.Enum, field_
}
fn getTypeStr(c: *Context, ty: Type) ![]const u8 {
- var buf: std.ArrayListUnmanaged(u8) = .empty;
- defer buf.deinit(c.gpa);
- const w = buf.writer(c.gpa);
- try ty.print(c.mapper, c.comp.langopts, w);
- return c.arena.dupe(u8, buf.items);
+ var allocating: std.Io.Writer.Allocating = .init(c.gpa);
+ defer allocating.deinit();
+ ty.print(c.mapper, c.comp.langopts, &allocating.writer) catch return error.OutOfMemory;
+ return c.arena.dupe(u8, allocating.written());
}
fn transType(c: *Context, scope: *Scope, raw_ty: Type, qual_handling: Type.QualHandling, source_loc: TokenIndex) TypeError!ZigNode {
@@ -1820,6 +1821,7 @@ pub fn main() !void {
var tree = translate(gpa, &aro_comp, args) catch |err| switch (err) {
error.ParsingFailed, error.FatalError => renderErrorsAndExit(&aro_comp),
error.OutOfMemory => return error.OutOfMemory,
+ error.WriteFailed => return error.WriteFailed,
error.StreamTooLong => std.process.fatal("An input file was larger than 4GiB", .{}),
};
defer tree.deinit(gpa);
diff --git a/lib/compiler/aro_translate_c/ast.zig b/lib/compiler/aro_translate_c/ast.zig
index b1786a5fd4..797f36e9f6 100644
--- a/lib/compiler/aro_translate_c/ast.zig
+++ b/lib/compiler/aro_translate_c/ast.zig
@@ -832,7 +832,7 @@ const Context = struct {
fn addTokenFmt(c: *Context, tag: TokenTag, comptime format: []const u8, args: anytype) Allocator.Error!TokenIndex {
const start_index = c.buf.items.len;
- try c.buf.writer().print(format ++ " ", args);
+ try c.buf.print(format ++ " ", args);
try c.tokens.append(c.gpa, .{
.tag = tag,
diff --git a/lib/compiler/resinator/ani.zig b/lib/compiler/resinator/ani.zig
index 770351351e..88064d2219 100644
--- a/lib/compiler/resinator/ani.zig
+++ b/lib/compiler/resinator/ani.zig
@@ -16,31 +16,31 @@ const std = @import("std");
const AF_ICON: u32 = 1;
-pub fn isAnimatedIcon(reader: anytype) bool {
+pub fn isAnimatedIcon(reader: *std.Io.Reader) bool {
const flags = getAniheaderFlags(reader) catch return false;
return flags & AF_ICON == AF_ICON;
}
-fn getAniheaderFlags(reader: anytype) !u32 {
- const riff_header = try reader.readBytesNoEof(4);
- if (!std.mem.eql(u8, &riff_header, "RIFF")) return error.InvalidFormat;
+fn getAniheaderFlags(reader: *std.Io.Reader) !u32 {
+ const riff_header = try reader.takeArray(4);
+ if (!std.mem.eql(u8, riff_header, "RIFF")) return error.InvalidFormat;
- _ = try reader.readInt(u32, .little); // size of RIFF chunk
+ _ = try reader.takeInt(u32, .little); // size of RIFF chunk
- const form_type = try reader.readBytesNoEof(4);
- if (!std.mem.eql(u8, &form_type, "ACON")) return error.InvalidFormat;
+ const form_type = try reader.takeArray(4);
+ if (!std.mem.eql(u8, form_type, "ACON")) return error.InvalidFormat;
while (true) {
- const chunk_id = try reader.readBytesNoEof(4);
- const chunk_len = try reader.readInt(u32, .little);
- if (!std.mem.eql(u8, &chunk_id, "anih")) {
+ const chunk_id = try reader.takeArray(4);
+ const chunk_len = try reader.takeInt(u32, .little);
+ if (!std.mem.eql(u8, chunk_id, "anih")) {
// TODO: Move file cursor instead of skipBytes
- try reader.skipBytes(chunk_len, .{});
+ try reader.discardAll(chunk_len);
continue;
}
- const aniheader = try reader.readStruct(ANIHEADER);
- return std.mem.nativeToLittle(u32, aniheader.flags);
+ const aniheader = try reader.takeStruct(ANIHEADER, .little);
+ return aniheader.flags;
}
}
diff --git a/lib/compiler/resinator/ast.zig b/lib/compiler/resinator/ast.zig
index 20eedb652d..07698bd65e 100644
--- a/lib/compiler/resinator/ast.zig
+++ b/lib/compiler/resinator/ast.zig
@@ -22,13 +22,13 @@ pub const Tree = struct {
return @alignCast(@fieldParentPtr("base", self.node));
}
- pub fn dump(self: *Tree, writer: anytype) @TypeOf(writer).Error!void {
+ pub fn dump(self: *Tree, writer: *std.io.Writer) !void {
try self.node.dump(self, writer, 0);
}
};
pub const CodePageLookup = struct {
- lookup: std.ArrayListUnmanaged(SupportedCodePage) = .empty,
+ lookup: std.ArrayList(SupportedCodePage) = .empty,
allocator: Allocator,
default_code_page: SupportedCodePage,
@@ -726,10 +726,10 @@ pub const Node = struct {
pub fn dump(
node: *const Node,
tree: *const Tree,
- writer: anytype,
+ writer: *std.io.Writer,
indent: usize,
- ) @TypeOf(writer).Error!void {
- try writer.writeByteNTimes(' ', indent);
+ ) std.io.Writer.Error!void {
+ try writer.splatByteAll(' ', indent);
try writer.writeAll(@tagName(node.id));
switch (node.id) {
.root => {
@@ -768,11 +768,11 @@ pub const Node = struct {
.grouped_expression => {
const grouped: *const Node.GroupedExpression = @alignCast(@fieldParentPtr("base", node));
try writer.writeAll("\n");
- try writer.writeByteNTimes(' ', indent);
+ try writer.splatByteAll(' ', indent);
try writer.writeAll(grouped.open_token.slice(tree.source));
try writer.writeAll("\n");
try grouped.expression.dump(tree, writer, indent + 1);
- try writer.writeByteNTimes(' ', indent);
+ try writer.splatByteAll(' ', indent);
try writer.writeAll(grouped.close_token.slice(tree.source));
try writer.writeAll("\n");
},
@@ -790,13 +790,13 @@ pub const Node = struct {
for (accelerators.optional_statements) |statement| {
try statement.dump(tree, writer, indent + 1);
}
- try writer.writeByteNTimes(' ', indent);
+ try writer.splatByteAll(' ', indent);
try writer.writeAll(accelerators.begin_token.slice(tree.source));
try writer.writeAll("\n");
for (accelerators.accelerators) |accelerator| {
try accelerator.dump(tree, writer, indent + 1);
}
- try writer.writeByteNTimes(' ', indent);
+ try writer.splatByteAll(' ', indent);
try writer.writeAll(accelerators.end_token.slice(tree.source));
try writer.writeAll("\n");
},
@@ -815,25 +815,25 @@ pub const Node = struct {
const dialog: *const Node.Dialog = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s} {s} [{d} common_resource_attributes]\n", .{ dialog.id.slice(tree.source), dialog.type.slice(tree.source), dialog.common_resource_attributes.len });
inline for (.{ "x", "y", "width", "height" }) |arg| {
- try writer.writeByteNTimes(' ', indent + 1);
+ try writer.splatByteAll(' ', indent + 1);
try writer.writeAll(arg ++ ":\n");
try @field(dialog, arg).dump(tree, writer, indent + 2);
}
if (dialog.help_id) |help_id| {
- try writer.writeByteNTimes(' ', indent + 1);
+ try writer.splatByteAll(' ', indent + 1);
try writer.writeAll("help_id:\n");
try help_id.dump(tree, writer, indent + 2);
}
for (dialog.optional_statements) |statement| {
try statement.dump(tree, writer, indent + 1);
}
- try writer.writeByteNTimes(' ', indent);
+ try writer.splatByteAll(' ', indent);
try writer.writeAll(dialog.begin_token.slice(tree.source));
try writer.writeAll("\n");
for (dialog.controls) |control| {
try control.dump(tree, writer, indent + 1);
}
- try writer.writeByteNTimes(' ', indent);
+ try writer.splatByteAll(' ', indent);
try writer.writeAll(dialog.end_token.slice(tree.source));
try writer.writeAll("\n");
},
@@ -845,30 +845,30 @@ pub const Node = struct {
}
try writer.writeByte('\n');
if (control.class) |class| {
- try writer.writeByteNTimes(' ', indent + 1);
+ try writer.splatByteAll(' ', indent + 1);
try writer.writeAll("class:\n");
try class.dump(tree, writer, indent + 2);
}
inline for (.{ "id", "x", "y", "width", "height" }) |arg| {
- try writer.writeByteNTimes(' ', indent + 1);
+ try writer.splatByteAll(' ', indent + 1);
try writer.writeAll(arg ++ ":\n");
try @field(control, arg).dump(tree, writer, indent + 2);
}
inline for (.{ "style", "exstyle", "help_id" }) |arg| {
if (@field(control, arg)) |val_node| {
- try writer.writeByteNTimes(' ', indent + 1);
+ try writer.splatByteAll(' ', indent + 1);
try writer.writeAll(arg ++ ":\n");
try val_node.dump(tree, writer, indent + 2);
}
}
if (control.extra_data_begin != null) {
- try writer.writeByteNTimes(' ', indent);
+ try writer.splatByteAll(' ', indent);
try writer.writeAll(control.extra_data_begin.?.slice(tree.source));
try writer.writeAll("\n");
for (control.extra_data) |data_node| {
try data_node.dump(tree, writer, indent + 1);
}
- try writer.writeByteNTimes(' ', indent);
+ try writer.splatByteAll(' ', indent);
try writer.writeAll(control.extra_data_end.?.slice(tree.source));
try writer.writeAll("\n");
}
@@ -877,17 +877,17 @@ pub const Node = struct {
const toolbar: *const Node.Toolbar = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s} {s} [{d} common_resource_attributes]\n", .{ toolbar.id.slice(tree.source), toolbar.type.slice(tree.source), toolbar.common_resource_attributes.len });
inline for (.{ "button_width", "button_height" }) |arg| {
- try writer.writeByteNTimes(' ', indent + 1);
+ try writer.splatByteAll(' ', indent + 1);
try writer.writeAll(arg ++ ":\n");
try @field(toolbar, arg).dump(tree, writer, indent + 2);
}
- try writer.writeByteNTimes(' ', indent);
+ try writer.splatByteAll(' ', indent);
try writer.writeAll(toolbar.begin_token.slice(tree.source));
try writer.writeAll("\n");
for (toolbar.buttons) |button_or_sep| {
try button_or_sep.dump(tree, writer, indent + 1);
}
- try writer.writeByteNTimes(' ', indent);
+ try writer.splatByteAll(' ', indent);
try writer.writeAll(toolbar.end_token.slice(tree.source));
try writer.writeAll("\n");
},
@@ -898,17 +898,17 @@ pub const Node = struct {
try statement.dump(tree, writer, indent + 1);
}
if (menu.help_id) |help_id| {
- try writer.writeByteNTimes(' ', indent + 1);
+ try writer.splatByteAll(' ', indent + 1);
try writer.writeAll("help_id:\n");
try help_id.dump(tree, writer, indent + 2);
}
- try writer.writeByteNTimes(' ', indent);
+ try writer.splatByteAll(' ', indent);
try writer.writeAll(menu.begin_token.slice(tree.source));
try writer.writeAll("\n");
for (menu.items) |item| {
try item.dump(tree, writer, indent + 1);
}
- try writer.writeByteNTimes(' ', indent);
+ try writer.splatByteAll(' ', indent);
try writer.writeAll(menu.end_token.slice(tree.source));
try writer.writeAll("\n");
},
@@ -926,7 +926,7 @@ pub const Node = struct {
try writer.print(" {s} {s}\n", .{ menu_item.menuitem.slice(tree.source), menu_item.text.slice(tree.source) });
inline for (.{ "id", "type", "state" }) |arg| {
if (@field(menu_item, arg)) |val_node| {
- try writer.writeByteNTimes(' ', indent + 1);
+ try writer.splatByteAll(' ', indent + 1);
try writer.writeAll(arg ++ ":\n");
try val_node.dump(tree, writer, indent + 2);
}
@@ -935,13 +935,13 @@ pub const Node = struct {
.popup => {
const popup: *const Node.Popup = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s} {s} [{d} options]\n", .{ popup.popup.slice(tree.source), popup.text.slice(tree.source), popup.option_list.len });
- try writer.writeByteNTimes(' ', indent);
+ try writer.splatByteAll(' ', indent);
try writer.writeAll(popup.begin_token.slice(tree.source));
try writer.writeAll("\n");
for (popup.items) |item| {
try item.dump(tree, writer, indent + 1);
}
- try writer.writeByteNTimes(' ', indent);
+ try writer.splatByteAll(' ', indent);
try writer.writeAll(popup.end_token.slice(tree.source));
try writer.writeAll("\n");
},
@@ -950,18 +950,18 @@ pub const Node = struct {
try writer.print(" {s} {s}\n", .{ popup.popup.slice(tree.source), popup.text.slice(tree.source) });
inline for (.{ "id", "type", "state", "help_id" }) |arg| {
if (@field(popup, arg)) |val_node| {
- try writer.writeByteNTimes(' ', indent + 1);
+ try writer.splatByteAll(' ', indent + 1);
try writer.writeAll(arg ++ ":\n");
try val_node.dump(tree, writer, indent + 2);
}
}
- try writer.writeByteNTimes(' ', indent);
+ try writer.splatByteAll(' ', indent);
try writer.writeAll(popup.begin_token.slice(tree.source));
try writer.writeAll("\n");
for (popup.items) |item| {
try item.dump(tree, writer, indent + 1);
}
- try writer.writeByteNTimes(' ', indent);
+ try writer.splatByteAll(' ', indent);
try writer.writeAll(popup.end_token.slice(tree.source));
try writer.writeAll("\n");
},
@@ -971,13 +971,13 @@ pub const Node = struct {
for (version_info.fixed_info) |fixed_info| {
try fixed_info.dump(tree, writer, indent + 1);
}
- try writer.writeByteNTimes(' ', indent);
+ try writer.splatByteAll(' ', indent);
try writer.writeAll(version_info.begin_token.slice(tree.source));
try writer.writeAll("\n");
for (version_info.block_statements) |block| {
try block.dump(tree, writer, indent + 1);
}
- try writer.writeByteNTimes(' ', indent);
+ try writer.splatByteAll(' ', indent);
try writer.writeAll(version_info.end_token.slice(tree.source));
try writer.writeAll("\n");
},
@@ -994,13 +994,13 @@ pub const Node = struct {
for (block.values) |value| {
try value.dump(tree, writer, indent + 1);
}
- try writer.writeByteNTimes(' ', indent);
+ try writer.splatByteAll(' ', indent);
try writer.writeAll(block.begin_token.slice(tree.source));
try writer.writeAll("\n");
for (block.children) |child| {
try child.dump(tree, writer, indent + 1);
}
- try writer.writeByteNTimes(' ', indent);
+ try writer.splatByteAll(' ', indent);
try writer.writeAll(block.end_token.slice(tree.source));
try writer.writeAll("\n");
},
@@ -1025,13 +1025,13 @@ pub const Node = struct {
for (string_table.optional_statements) |statement| {
try statement.dump(tree, writer, indent + 1);
}
- try writer.writeByteNTimes(' ', indent);
+ try writer.splatByteAll(' ', indent);
try writer.writeAll(string_table.begin_token.slice(tree.source));
try writer.writeAll("\n");
for (string_table.strings) |string| {
try string.dump(tree, writer, indent + 1);
}
- try writer.writeByteNTimes(' ', indent);
+ try writer.splatByteAll(' ', indent);
try writer.writeAll(string_table.end_token.slice(tree.source));
try writer.writeAll("\n");
},
@@ -1039,7 +1039,7 @@ pub const Node = struct {
try writer.writeAll("\n");
const string: *const Node.StringTableString = @alignCast(@fieldParentPtr("base", node));
try string.id.dump(tree, writer, indent + 1);
- try writer.writeByteNTimes(' ', indent + 1);
+ try writer.splatByteAll(' ', indent + 1);
try writer.print("{s}\n", .{string.string.slice(tree.source)});
},
.language_statement => {
@@ -1051,12 +1051,12 @@ pub const Node = struct {
.font_statement => {
const font: *const Node.FontStatement = @alignCast(@fieldParentPtr("base", node));
try writer.print(" {s} typeface: {s}\n", .{ font.identifier.slice(tree.source), font.typeface.slice(tree.source) });
- try writer.writeByteNTimes(' ', indent + 1);
+ try writer.splatByteAll(' ', indent + 1);
try writer.writeAll("point_size:\n");
try font.point_size.dump(tree, writer, indent + 2);
inline for (.{ "weight", "italic", "char_set" }) |arg| {
if (@field(font, arg)) |arg_node| {
- try writer.writeByteNTimes(' ', indent + 1);
+ try writer.splatByteAll(' ', indent + 1);
try writer.writeAll(arg ++ ":\n");
try arg_node.dump(tree, writer, indent + 2);
}
@@ -1071,7 +1071,7 @@ pub const Node = struct {
const invalid: *const Node.Invalid = @alignCast(@fieldParentPtr("base", node));
try writer.print(" context.len: {}\n", .{invalid.context.len});
for (invalid.context) |context_token| {
- try writer.writeByteNTimes(' ', indent + 1);
+ try writer.splatByteAll(' ', indent + 1);
try writer.print("{s}:{s}", .{ @tagName(context_token.id), context_token.slice(tree.source) });
try writer.writeByte('\n');
}
diff --git a/lib/compiler/resinator/bmp.zig b/lib/compiler/resinator/bmp.zig
index c9a0c29da0..651be2e450 100644
--- a/lib/compiler/resinator/bmp.zig
+++ b/lib/compiler/resinator/bmp.zig
@@ -27,6 +27,7 @@ pub const windows_format_id = std.mem.readInt(u16, "BM", native_endian);
pub const file_header_len = 14;
pub const ReadError = error{
+ ReadFailed,
UnexpectedEOF,
InvalidFileHeader,
ImpossiblePixelDataOffset,
@@ -94,9 +95,12 @@ pub const BitmapInfo = struct {
}
};
-pub fn read(reader: anytype, max_size: u64) ReadError!BitmapInfo {
+pub fn read(reader: *std.Io.Reader, max_size: u64) ReadError!BitmapInfo {
var bitmap_info: BitmapInfo = undefined;
- const file_header = reader.readBytesNoEof(file_header_len) catch return error.UnexpectedEOF;
+ const file_header = reader.takeArray(file_header_len) catch |err| switch (err) {
+ error.EndOfStream => return error.UnexpectedEOF,
+ else => |e| return e,
+ };
const id = std.mem.readInt(u16, file_header[0..2], native_endian);
if (id != windows_format_id) return error.InvalidFileHeader;
@@ -104,14 +108,17 @@ pub fn read(reader: anytype, max_size: u64) ReadError!BitmapInfo {
bitmap_info.pixel_data_offset = std.mem.readInt(u32, file_header[10..14], .little);
if (bitmap_info.pixel_data_offset > max_size) return error.ImpossiblePixelDataOffset;
- bitmap_info.dib_header_size = reader.readInt(u32, .little) catch return error.UnexpectedEOF;
+ bitmap_info.dib_header_size = reader.takeInt(u32, .little) catch return error.UnexpectedEOF;
if (bitmap_info.pixel_data_offset < file_header_len + bitmap_info.dib_header_size) return error.ImpossiblePixelDataOffset;
const dib_version = BitmapHeader.Version.get(bitmap_info.dib_header_size);
switch (dib_version) {
.@"nt3.1", .@"nt4.0", .@"nt5.0" => {
var dib_header_buf: [@sizeOf(BITMAPINFOHEADER)]u8 align(@alignOf(BITMAPINFOHEADER)) = undefined;
std.mem.writeInt(u32, dib_header_buf[0..4], bitmap_info.dib_header_size, .little);
- reader.readNoEof(dib_header_buf[4..]) catch return error.UnexpectedEOF;
+ reader.readSliceAll(dib_header_buf[4..]) catch |err| switch (err) {
+ error.EndOfStream => return error.UnexpectedEOF,
+ error.ReadFailed => |e| return e,
+ };
var dib_header: *BITMAPINFOHEADER = @ptrCast(&dib_header_buf);
structFieldsLittleToNative(BITMAPINFOHEADER, dib_header);
@@ -126,7 +133,10 @@ pub fn read(reader: anytype, max_size: u64) ReadError!BitmapInfo {
.@"win2.0" => {
var dib_header_buf: [@sizeOf(BITMAPCOREHEADER)]u8 align(@alignOf(BITMAPCOREHEADER)) = undefined;
std.mem.writeInt(u32, dib_header_buf[0..4], bitmap_info.dib_header_size, .little);
- reader.readNoEof(dib_header_buf[4..]) catch return error.UnexpectedEOF;
+ reader.readSliceAll(dib_header_buf[4..]) catch |err| switch (err) {
+ error.EndOfStream => return error.UnexpectedEOF,
+ error.ReadFailed => |e| return e,
+ };
const dib_header: *BITMAPCOREHEADER = @ptrCast(&dib_header_buf);
structFieldsLittleToNative(BITMAPCOREHEADER, dib_header);
@@ -238,26 +248,26 @@ fn structFieldsLittleToNative(comptime T: type, x: *T) void {
test "read" {
var bmp_data = "BM<\x00\x00\x00\x00\x00\x00\x006\x00\x00\x00(\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x01\x00\x10\x00\x00\x00\x00\x00\x06\x00\x00\x00\x12\x0b\x00\x00\x12\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x7f\x00\x00\x00\x00".*;
- var fbs = std.io.fixedBufferStream(&bmp_data);
+ var fbs: std.Io.Reader = .fixed(&bmp_data);
{
- const bitmap = try read(fbs.reader(), bmp_data.len);
+ const bitmap = try read(&fbs, bmp_data.len);
try std.testing.expectEqual(@as(u32, BitmapHeader.Version.@"nt3.1".len()), bitmap.dib_header_size);
}
{
- fbs.reset();
+ fbs.seek = 0;
bmp_data[file_header_len] = 11;
- try std.testing.expectError(error.UnknownBitmapVersion, read(fbs.reader(), bmp_data.len));
+ try std.testing.expectError(error.UnknownBitmapVersion, read(&fbs, bmp_data.len));
// restore
bmp_data[file_header_len] = BitmapHeader.Version.@"nt3.1".len();
}
{
- fbs.reset();
+ fbs.seek = 0;
bmp_data[0] = 'b';
- try std.testing.expectError(error.InvalidFileHeader, read(fbs.reader(), bmp_data.len));
+ try std.testing.expectError(error.InvalidFileHeader, read(&fbs, bmp_data.len));
// restore
bmp_data[0] = 'B';
@@ -265,13 +275,13 @@ test "read" {
{
const cutoff_len = file_header_len + BitmapHeader.Version.@"nt3.1".len() - 1;
- var dib_cutoff_fbs = std.io.fixedBufferStream(bmp_data[0..cutoff_len]);
- try std.testing.expectError(error.UnexpectedEOF, read(dib_cutoff_fbs.reader(), bmp_data.len));
+ var dib_cutoff_fbs: std.Io.Reader = .fixed(bmp_data[0..cutoff_len]);
+ try std.testing.expectError(error.UnexpectedEOF, read(&dib_cutoff_fbs, bmp_data.len));
}
{
const cutoff_len = file_header_len - 1;
- var bmp_cutoff_fbs = std.io.fixedBufferStream(bmp_data[0..cutoff_len]);
- try std.testing.expectError(error.UnexpectedEOF, read(bmp_cutoff_fbs.reader(), bmp_data.len));
+ var bmp_cutoff_fbs: std.Io.Reader = .fixed(bmp_data[0..cutoff_len]);
+ try std.testing.expectError(error.UnexpectedEOF, read(&bmp_cutoff_fbs, bmp_data.len));
}
}
diff --git a/lib/compiler/resinator/cli.zig b/lib/compiler/resinator/cli.zig
index 1527c60105..d39c126acf 100644
--- a/lib/compiler/resinator/cli.zig
+++ b/lib/compiler/resinator/cli.zig
@@ -80,20 +80,20 @@ pub const usage_string_after_command_name =
\\
;
-pub fn writeUsage(writer: anytype, command_name: []const u8) !void {
+pub fn writeUsage(writer: *std.Io.Writer, command_name: []const u8) !void {
try writer.writeAll("Usage: ");
try writer.writeAll(command_name);
try writer.writeAll(usage_string_after_command_name);
}
pub const Diagnostics = struct {
- errors: std.ArrayListUnmanaged(ErrorDetails) = .empty,
+ errors: std.ArrayList(ErrorDetails) = .empty,
allocator: Allocator,
pub const ErrorDetails = struct {
arg_index: usize,
arg_span: ArgSpan = .{},
- msg: std.ArrayListUnmanaged(u8) = .empty,
+ msg: std.ArrayList(u8) = .empty,
type: Type = .err,
print_args: bool = true,
@@ -148,7 +148,7 @@ pub const Options = struct {
allocator: Allocator,
input_source: IoSource = .{ .filename = &[_]u8{} },
output_source: IoSource = .{ .filename = &[_]u8{} },
- extra_include_paths: std.ArrayListUnmanaged([]const u8) = .empty,
+ extra_include_paths: std.ArrayList([]const u8) = .empty,
ignore_include_env_var: bool = false,
preprocess: Preprocess = .yes,
default_language_id: ?u16 = null,
@@ -295,7 +295,7 @@ pub const Options = struct {
}
}
- pub fn dumpVerbose(self: *const Options, writer: anytype) !void {
+ pub fn dumpVerbose(self: *const Options, writer: *std.Io.Writer) !void {
const input_source_name = switch (self.input_source) {
.stdio => "<stdin>",
.filename => |filename| filename,
@@ -520,8 +520,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
// - or / on its own is an error
else => {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.optionAndAfterSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("invalid option: {s}", .{arg.prefixSlice()});
+ try err_details.msg.print(allocator, "invalid option: {s}", .{arg.prefixSlice()});
try diagnostics.append(err_details);
arg_i += 1;
continue :next_arg;
@@ -532,8 +531,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
const args_remaining = args.len - arg_i;
if (args_remaining <= 2 and arg.looksLikeFilepath()) {
var err_details = Diagnostics.ErrorDetails{ .type = .note, .print_args = true, .arg_index = arg_i };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.writeAll("this argument was inferred to be a filepath, so argument parsing was terminated");
+ try err_details.msg.appendSlice(allocator, "this argument was inferred to be a filepath, so argument parsing was terminated");
try diagnostics.append(err_details);
break;
@@ -550,16 +548,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
} else if (std.ascii.startsWithIgnoreCase(arg_name, ":output-format")) {
const value = arg.value(":output-format".len, arg_i, args) catch {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":output-format".len) });
+ try err_details.msg.print(allocator, "missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":output-format".len) });
try diagnostics.append(err_details);
arg_i += 1;
break :next_arg;
};
output_format = std.meta.stringToEnum(Options.OutputFormat, value.slice) orelse blk: {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("invalid output format setting: {s} ", .{value.slice});
+ try err_details.msg.print(allocator, "invalid output format setting: {s} ", .{value.slice});
try diagnostics.append(err_details);
break :blk output_format;
};
@@ -569,16 +565,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
} else if (std.ascii.startsWithIgnoreCase(arg_name, ":auto-includes")) {
const value = arg.value(":auto-includes".len, arg_i, args) catch {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":auto-includes".len) });
+ try err_details.msg.print(allocator, "missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":auto-includes".len) });
try diagnostics.append(err_details);
arg_i += 1;
break :next_arg;
};
options.auto_includes = std.meta.stringToEnum(Options.AutoIncludes, value.slice) orelse blk: {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("invalid auto includes setting: {s} ", .{value.slice});
+ try err_details.msg.print(allocator, "invalid auto includes setting: {s} ", .{value.slice});
try diagnostics.append(err_details);
break :blk options.auto_includes;
};
@@ -587,16 +581,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
} else if (std.ascii.startsWithIgnoreCase(arg_name, ":input-format")) {
const value = arg.value(":input-format".len, arg_i, args) catch {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":input-format".len) });
+ try err_details.msg.print(allocator, "missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":input-format".len) });
try diagnostics.append(err_details);
arg_i += 1;
break :next_arg;
};
input_format = std.meta.stringToEnum(Options.InputFormat, value.slice) orelse blk: {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("invalid input format setting: {s} ", .{value.slice});
+ try err_details.msg.print(allocator, "invalid input format setting: {s} ", .{value.slice});
try diagnostics.append(err_details);
break :blk input_format;
};
@@ -606,16 +598,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
} else if (std.ascii.startsWithIgnoreCase(arg_name, ":depfile-fmt")) {
const value = arg.value(":depfile-fmt".len, arg_i, args) catch {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":depfile-fmt".len) });
+ try err_details.msg.print(allocator, "missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":depfile-fmt".len) });
try diagnostics.append(err_details);
arg_i += 1;
break :next_arg;
};
options.depfile_fmt = std.meta.stringToEnum(Options.DepfileFormat, value.slice) orelse blk: {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("invalid depfile format setting: {s} ", .{value.slice});
+ try err_details.msg.print(allocator, "invalid depfile format setting: {s} ", .{value.slice});
try diagnostics.append(err_details);
break :blk options.depfile_fmt;
};
@@ -624,8 +614,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
} else if (std.ascii.startsWithIgnoreCase(arg_name, ":depfile")) {
const value = arg.value(":depfile".len, arg_i, args) catch {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":depfile".len) });
+ try err_details.msg.print(allocator, "missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":depfile".len) });
try diagnostics.append(err_details);
arg_i += 1;
break :next_arg;
@@ -643,8 +632,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
} else if (std.ascii.startsWithIgnoreCase(arg_name, ":target")) {
const value = arg.value(":target".len, arg_i, args) catch {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":target".len) });
+ try err_details.msg.print(allocator, "missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":target".len) });
try diagnostics.append(err_details);
arg_i += 1;
break :next_arg;
@@ -655,8 +643,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
const arch_str = target_it.first();
const arch = cvtres.supported_targets.Arch.fromStringIgnoreCase(arch_str) orelse {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("invalid or unsupported target architecture: {s}", .{arch_str});
+ try err_details.msg.print(allocator, "invalid or unsupported target architecture: {s}", .{arch_str});
try diagnostics.append(err_details);
arg_i += value.index_increment;
continue :next_arg;
@@ -680,13 +667,11 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
.prefix_len = arg.prefixSlice().len,
.value_offset = arg.name_offset + 3,
} };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("missing value for {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(3) });
+ try err_details.msg.print(allocator, "missing value for {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(3) });
try diagnostics.append(err_details);
}
var err_details = Diagnostics.ErrorDetails{ .type = .err, .arg_index = arg_i, .arg_span = arg.optionAndAfterSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(3) });
+ try err_details.msg.print(allocator, "the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(3) });
try diagnostics.append(err_details);
arg_i += 1;
continue :next_arg;
@@ -695,16 +680,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
else if (std.ascii.startsWithIgnoreCase(arg_name, "tn")) {
const value = arg.value(2, arg_i, args) catch no_value: {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
+ try err_details.msg.print(allocator, "missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
try diagnostics.append(err_details);
// dummy zero-length slice starting where the value would have been
const value_start = arg.name_offset + 2;
break :no_value Arg.Value{ .slice = arg.full[value_start..value_start] };
};
var err_details = Diagnostics.ErrorDetails{ .type = .err, .arg_index = arg_i, .arg_span = arg.optionAndAfterSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
+ try err_details.msg.print(allocator, "the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
try diagnostics.append(err_details);
arg_i += value.index_increment;
continue :next_arg;
@@ -716,16 +699,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
{
const value = arg.value(2, arg_i, args) catch no_value: {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
+ try err_details.msg.print(allocator, "missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
try diagnostics.append(err_details);
// dummy zero-length slice starting where the value would have been
const value_start = arg.name_offset + 2;
break :no_value Arg.Value{ .slice = arg.full[value_start..value_start] };
};
var err_details = Diagnostics.ErrorDetails{ .type = .err, .arg_index = arg_i, .arg_span = arg.optionAndAfterSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
+ try err_details.msg.print(allocator, "the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
try diagnostics.append(err_details);
arg_i += value.index_increment;
continue :next_arg;
@@ -733,8 +714,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
// Unsupported MUI options that do not need a value
else if (std.ascii.startsWithIgnoreCase(arg_name, "g1")) {
var err_details = Diagnostics.ErrorDetails{ .type = .err, .arg_index = arg_i, .arg_span = arg.optionSpan(2) };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
+ try err_details.msg.print(allocator, "the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
try diagnostics.append(err_details);
arg.name_offset += 2;
}
@@ -747,15 +727,13 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
std.ascii.startsWithIgnoreCase(arg_name, "ta"))
{
var err_details = Diagnostics.ErrorDetails{ .type = .err, .arg_index = arg_i, .arg_span = arg.optionSpan(2) };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
+ try err_details.msg.print(allocator, "the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
try diagnostics.append(err_details);
arg.name_offset += 2;
} else if (std.ascii.startsWithIgnoreCase(arg_name, "fo")) {
const value = arg.value(2, arg_i, args) catch {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("missing output path after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
+ try err_details.msg.print(allocator, "missing output path after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
try diagnostics.append(err_details);
arg_i += 1;
break :next_arg;
@@ -767,8 +745,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
} else if (std.ascii.startsWithIgnoreCase(arg_name, "sl")) {
const value = arg.value(2, arg_i, args) catch {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("missing language tag after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
+ try err_details.msg.print(allocator, "missing language tag after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
try diagnostics.append(err_details);
arg_i += 1;
break :next_arg;
@@ -776,24 +753,20 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
const percent_str = value.slice;
const percent: u32 = parsePercent(percent_str) catch {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("invalid percent format '{s}'", .{percent_str});
+ try err_details.msg.print(allocator, "invalid percent format '{s}'", .{percent_str});
try diagnostics.append(err_details);
var note_details = Diagnostics.ErrorDetails{ .type = .note, .print_args = false, .arg_index = arg_i };
- var note_writer = note_details.msg.writer(allocator);
- try note_writer.writeAll("string length percent must be an integer between 1 and 100 (inclusive)");
+ try note_details.msg.appendSlice(allocator, "string length percent must be an integer between 1 and 100 (inclusive)");
try diagnostics.append(note_details);
arg_i += value.index_increment;
continue :next_arg;
};
if (percent == 0 or percent > 100) {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("percent out of range: {} (parsed from '{s}')", .{ percent, percent_str });
+ try err_details.msg.print(allocator, "percent out of range: {} (parsed from '{s}')", .{ percent, percent_str });
try diagnostics.append(err_details);
var note_details = Diagnostics.ErrorDetails{ .type = .note, .print_args = false, .arg_index = arg_i };
- var note_writer = note_details.msg.writer(allocator);
- try note_writer.writeAll("string length percent must be an integer between 1 and 100 (inclusive)");
+ try note_details.msg.appendSlice(allocator, "string length percent must be an integer between 1 and 100 (inclusive)");
try diagnostics.append(note_details);
arg_i += value.index_increment;
continue :next_arg;
@@ -805,8 +778,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
} else if (std.ascii.startsWithIgnoreCase(arg_name, "ln")) {
const value = arg.value(2, arg_i, args) catch {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("missing language tag after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
+ try err_details.msg.print(allocator, "missing language tag after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(2) });
try diagnostics.append(err_details);
arg_i += 1;
break :next_arg;
@@ -814,16 +786,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
const tag = value.slice;
options.default_language_id = lang.tagToInt(tag) catch {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("invalid language tag: {s}", .{tag});
+ try err_details.msg.print(allocator, "invalid language tag: {s}", .{tag});
try diagnostics.append(err_details);
arg_i += value.index_increment;
continue :next_arg;
};
if (options.default_language_id.? == lang.LOCALE_CUSTOM_UNSPECIFIED) {
var err_details = Diagnostics.ErrorDetails{ .type = .warning, .arg_index = arg_i, .arg_span = value.argSpan(arg) };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("language tag '{s}' does not have an assigned ID so it will be resolved to LOCALE_CUSTOM_UNSPECIFIED (id=0x{x})", .{ tag, lang.LOCALE_CUSTOM_UNSPECIFIED });
+ try err_details.msg.print(allocator, "language tag '{s}' does not have an assigned ID so it will be resolved to LOCALE_CUSTOM_UNSPECIFIED (id=0x{x})", .{ tag, lang.LOCALE_CUSTOM_UNSPECIFIED });
try diagnostics.append(err_details);
}
arg_i += value.index_increment;
@@ -831,8 +801,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
} else if (std.ascii.startsWithIgnoreCase(arg_name, "l")) {
const value = arg.value(1, arg_i, args) catch {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("missing language ID after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
+ try err_details.msg.print(allocator, "missing language ID after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
try diagnostics.append(err_details);
arg_i += 1;
break :next_arg;
@@ -840,8 +809,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
const num_str = value.slice;
options.default_language_id = lang.parseInt(num_str) catch {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("invalid language ID: {s}", .{num_str});
+ try err_details.msg.print(allocator, "invalid language ID: {s}", .{num_str});
try diagnostics.append(err_details);
arg_i += value.index_increment;
continue :next_arg;
@@ -860,16 +828,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
{
const value = arg.value(1, arg_i, args) catch no_value: {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
+ try err_details.msg.print(allocator, "missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
try diagnostics.append(err_details);
// dummy zero-length slice starting where the value would have been
const value_start = arg.name_offset + 1;
break :no_value Arg.Value{ .slice = arg.full[value_start..value_start] };
};
var err_details = Diagnostics.ErrorDetails{ .type = .err, .arg_index = arg_i, .arg_span = arg.optionAndAfterSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
+ try err_details.msg.print(allocator, "the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
try diagnostics.append(err_details);
arg_i += value.index_increment;
continue :next_arg;
@@ -882,16 +848,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
{
const value = arg.value(1, arg_i, args) catch no_value: {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
+ try err_details.msg.print(allocator, "missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
try diagnostics.append(err_details);
// dummy zero-length slice starting where the value would have been
const value_start = arg.name_offset + 1;
break :no_value Arg.Value{ .slice = arg.full[value_start..value_start] };
};
var err_details = Diagnostics.ErrorDetails{ .type = .err, .arg_index = arg_i, .arg_span = arg.optionAndAfterSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
+ try err_details.msg.print(allocator, "the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
try diagnostics.append(err_details);
arg_i += value.index_increment;
continue :next_arg;
@@ -899,15 +863,13 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
// 1 char unsupported LCX/LCE options that do not need a value
else if (std.ascii.startsWithIgnoreCase(arg_name, "t")) {
var err_details = Diagnostics.ErrorDetails{ .type = .err, .arg_index = arg_i, .arg_span = arg.optionSpan(1) };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
+ try err_details.msg.print(allocator, "the {s}{s} option is unsupported", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
try diagnostics.append(err_details);
arg.name_offset += 1;
} else if (std.ascii.startsWithIgnoreCase(arg_name, "c")) {
const value = arg.value(1, arg_i, args) catch {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("missing code page ID after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
+ try err_details.msg.print(allocator, "missing code page ID after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
try diagnostics.append(err_details);
arg_i += 1;
break :next_arg;
@@ -915,8 +877,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
const num_str = value.slice;
const code_page_id = std.fmt.parseUnsigned(u16, num_str, 10) catch {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("invalid code page ID: {s}", .{num_str});
+ try err_details.msg.print(allocator, "invalid code page ID: {s}", .{num_str});
try diagnostics.append(err_details);
arg_i += value.index_increment;
continue :next_arg;
@@ -924,16 +885,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
options.default_code_page = code_pages.getByIdentifierEnsureSupported(code_page_id) catch |err| switch (err) {
error.InvalidCodePage => {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("invalid or unknown code page ID: {}", .{code_page_id});
+ try err_details.msg.print(allocator, "invalid or unknown code page ID: {}", .{code_page_id});
try diagnostics.append(err_details);
arg_i += value.index_increment;
continue :next_arg;
},
error.UnsupportedCodePage => {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("unsupported code page: {s} (id={})", .{
+ try err_details.msg.print(allocator, "unsupported code page: {s} (id={})", .{
@tagName(code_pages.getByIdentifier(code_page_id) catch unreachable),
code_page_id,
});
@@ -957,8 +916,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
} else if (std.ascii.startsWithIgnoreCase(arg_name, "i")) {
const value = arg.value(1, arg_i, args) catch {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("missing include path after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
+ try err_details.msg.print(allocator, "missing include path after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
try diagnostics.append(err_details);
arg_i += 1;
break :next_arg;
@@ -986,15 +944,13 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
// Undocumented option with unknown function
// TODO: More investigation to figure out what it does (if anything)
var err_details = Diagnostics.ErrorDetails{ .type = .warning, .arg_index = arg_i, .arg_span = arg.optionSpan(1) };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("option {s}{s} has no effect (it is undocumented and its function is unknown in the Win32 RC compiler)", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
+ try err_details.msg.print(allocator, "option {s}{s} has no effect (it is undocumented and its function is unknown in the Win32 RC compiler)", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
try diagnostics.append(err_details);
arg.name_offset += 1;
} else if (std.ascii.startsWithIgnoreCase(arg_name, "d")) {
const value = arg.value(1, arg_i, args) catch {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("missing symbol to define after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
+ try err_details.msg.print(allocator, "missing symbol to define after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
try diagnostics.append(err_details);
arg_i += 1;
break :next_arg;
@@ -1009,8 +965,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
try options.define(symbol, symbol_value);
} else {
var err_details = Diagnostics.ErrorDetails{ .type = .warning, .arg_index = arg_i, .arg_span = value.argSpan(arg) };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("symbol \"{s}\" is not a valid identifier and therefore cannot be defined", .{symbol});
+ try err_details.msg.print(allocator, "symbol \"{s}\" is not a valid identifier and therefore cannot be defined", .{symbol});
try diagnostics.append(err_details);
}
arg_i += value.index_increment;
@@ -1018,8 +973,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
} else if (std.ascii.startsWithIgnoreCase(arg_name, "u")) {
const value = arg.value(1, arg_i, args) catch {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("missing symbol to undefine after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
+ try err_details.msg.print(allocator, "missing symbol to undefine after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(1) });
try diagnostics.append(err_details);
arg_i += 1;
break :next_arg;
@@ -1029,16 +983,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
try options.undefine(symbol);
} else {
var err_details = Diagnostics.ErrorDetails{ .type = .warning, .arg_index = arg_i, .arg_span = value.argSpan(arg) };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("symbol \"{s}\" is not a valid identifier and therefore cannot be undefined", .{symbol});
+ try err_details.msg.print(allocator, "symbol \"{s}\" is not a valid identifier and therefore cannot be undefined", .{symbol});
try diagnostics.append(err_details);
}
arg_i += value.index_increment;
continue :next_arg;
} else {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.optionAndAfterSpan() };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("invalid option: {s}{s}", .{ arg.prefixSlice(), arg.name() });
+ try err_details.msg.print(allocator, "invalid option: {s}{s}", .{ arg.prefixSlice(), arg.name() });
try diagnostics.append(err_details);
arg_i += 1;
continue :next_arg;
@@ -1055,16 +1007,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
if (positionals.len == 0) {
var err_details = Diagnostics.ErrorDetails{ .print_args = false, .arg_index = arg_i };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.writeAll("missing input filename");
+ try err_details.msg.appendSlice(allocator, "missing input filename");
try diagnostics.append(err_details);
if (args.len > 0) {
const last_arg = args[args.len - 1];
if (arg_i > 0 and last_arg.len > 0 and last_arg[0] == '/' and isSupportedInputExtension(std.fs.path.extension(last_arg))) {
var note_details = Diagnostics.ErrorDetails{ .type = .note, .print_args = true, .arg_index = arg_i - 1 };
- var note_writer = note_details.msg.writer(allocator);
- try note_writer.writeAll("if this argument was intended to be the input filename, adding -- in front of it will exclude it from option parsing");
+ try note_details.msg.appendSlice(allocator, "if this argument was intended to be the input filename, adding -- in front of it will exclude it from option parsing");
try diagnostics.append(note_details);
}
}
@@ -1099,16 +1049,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
if (positionals.len > 1) {
if (output_filename != null) {
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i + 1 };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.writeAll("output filename already specified");
+ try err_details.msg.appendSlice(allocator, "output filename already specified");
try diagnostics.append(err_details);
var note_details = Diagnostics.ErrorDetails{
.type = .note,
.arg_index = output_filename_context.arg.index,
.arg_span = output_filename_context.arg.value.argSpan(output_filename_context.arg.arg),
};
- var note_writer = note_details.msg.writer(allocator);
- try note_writer.writeAll("output filename previously specified here");
+ try note_details.msg.appendSlice(allocator, "output filename previously specified here");
try diagnostics.append(note_details);
} else {
output_filename = positionals[1];
@@ -1173,16 +1121,15 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
var print_output_format_source_note: bool = false;
if (options.depfile_path != null and (options.input_format == .res or options.output_format == .rcpp)) {
var err_details = Diagnostics.ErrorDetails{ .type = .warning, .arg_index = depfile_context.index, .arg_span = depfile_context.value.argSpan(depfile_context.arg) };
- var msg_writer = err_details.msg.writer(allocator);
if (options.input_format == .res) {
- try msg_writer.print("the {s}{s} option was ignored because the input format is '{s}'", .{
+ try err_details.msg.print(allocator, "the {s}{s} option was ignored because the input format is '{s}'", .{
depfile_context.arg.prefixSlice(),
depfile_context.arg.optionWithoutPrefix(depfile_context.option_len),
@tagName(options.input_format),
});
print_input_format_source_note = true;
} else if (options.output_format == .rcpp) {
- try msg_writer.print("the {s}{s} option was ignored because the output format is '{s}'", .{
+ try err_details.msg.print(allocator, "the {s}{s} option was ignored because the output format is '{s}'", .{
depfile_context.arg.prefixSlice(),
depfile_context.arg.optionWithoutPrefix(depfile_context.option_len),
@tagName(options.output_format),
@@ -1193,16 +1140,14 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
}
if (!isSupportedTransformation(options.input_format, options.output_format)) {
var err_details = Diagnostics.ErrorDetails{ .arg_index = input_filename_arg_i, .print_args = false };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("input format '{s}' cannot be converted to output format '{s}'", .{ @tagName(options.input_format), @tagName(options.output_format) });
+ try err_details.msg.print(allocator, "input format '{s}' cannot be converted to output format '{s}'", .{ @tagName(options.input_format), @tagName(options.output_format) });
try diagnostics.append(err_details);
print_input_format_source_note = true;
print_output_format_source_note = true;
}
if (options.preprocess == .only and options.output_format != .rcpp) {
var err_details = Diagnostics.ErrorDetails{ .arg_index = preprocess_only_context.index };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("the {s}{s} option cannot be used with output format '{s}'", .{
+ try err_details.msg.print(allocator, "the {s}{s} option cannot be used with output format '{s}'", .{
preprocess_only_context.arg.prefixSlice(),
preprocess_only_context.arg.optionWithoutPrefix(preprocess_only_context.option_len),
@tagName(options.output_format),
@@ -1214,8 +1159,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
switch (input_format_source) {
.inferred_from_input_filename => {
var err_details = Diagnostics.ErrorDetails{ .type = .note, .arg_index = input_filename_arg_i };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.writeAll("the input format was inferred from the input filename");
+ try err_details.msg.appendSlice(allocator, "the input format was inferred from the input filename");
try diagnostics.append(err_details);
},
.input_format_arg => {
@@ -1224,8 +1168,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
.arg_index = input_format_context.index,
.arg_span = input_format_context.value.argSpan(input_format_context.arg),
};
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.writeAll("the input format was specified here");
+ try err_details.msg.appendSlice(allocator, "the input format was specified here");
try diagnostics.append(err_details);
},
}
@@ -1234,11 +1177,10 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
switch (output_format_source) {
.inferred_from_input_filename, .unable_to_infer_from_input_filename => {
var err_details = Diagnostics.ErrorDetails{ .type = .note, .arg_index = input_filename_arg_i };
- var msg_writer = err_details.msg.writer(allocator);
if (output_format_source == .inferred_from_input_filename) {
- try msg_writer.writeAll("the output format was inferred from the input filename");
+ try err_details.msg.appendSlice(allocator, "the output format was inferred from the input filename");
} else {
- try msg_writer.writeAll("the output format was unable to be inferred from the input filename, so the default was used");
+ try err_details.msg.appendSlice(allocator, "the output format was unable to be inferred from the input filename, so the default was used");
}
try diagnostics.append(err_details);
},
@@ -1248,11 +1190,10 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
.arg => |ctx| .{ .type = .note, .arg_index = ctx.index, .arg_span = ctx.value.argSpan(ctx.arg) },
.unspecified => unreachable,
};
- var msg_writer = err_details.msg.writer(allocator);
if (output_format_source == .inferred_from_output_filename) {
- try msg_writer.writeAll("the output format was inferred from the output filename");
+ try err_details.msg.appendSlice(allocator, "the output format was inferred from the output filename");
} else {
- try msg_writer.writeAll("the output format was unable to be inferred from the output filename, so the default was used");
+ try err_details.msg.appendSlice(allocator, "the output format was unable to be inferred from the output filename, so the default was used");
}
try diagnostics.append(err_details);
},
@@ -1262,14 +1203,12 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
.arg_index = output_format_context.index,
.arg_span = output_format_context.value.argSpan(output_format_context.arg),
};
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.writeAll("the output format was specified here");
+ try err_details.msg.appendSlice(allocator, "the output format was specified here");
try diagnostics.append(err_details);
},
.inferred_from_preprocess_only => {
var err_details = Diagnostics.ErrorDetails{ .type = .note, .arg_index = preprocess_only_context.index };
- var msg_writer = err_details.msg.writer(allocator);
- try msg_writer.print("the output format was inferred from the usage of the {s}{s} option", .{
+ try err_details.msg.print(allocator, "the output format was inferred from the usage of the {s}{s} option", .{
preprocess_only_context.arg.prefixSlice(),
preprocess_only_context.arg.optionWithoutPrefix(preprocess_only_context.option_len),
});
@@ -1291,19 +1230,19 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
}
pub fn filepathWithExtension(allocator: Allocator, path: []const u8, ext: []const u8) ![]const u8 {
- var buf = std.array_list.Managed(u8).init(allocator);
- errdefer buf.deinit();
+ var buf: std.ArrayList(u8) = .empty;
+ errdefer buf.deinit(allocator);
if (std.fs.path.dirname(path)) |dirname| {
var end_pos = dirname.len;
// We want to ensure that we write a path separator at the end, so if the dirname
// doesn't end with a path sep then include the char after the dirname
// which must be a path sep.
if (!std.fs.path.isSep(dirname[dirname.len - 1])) end_pos += 1;
- try buf.appendSlice(path[0..end_pos]);
+ try buf.appendSlice(allocator, path[0..end_pos]);
}
- try buf.appendSlice(std.fs.path.stem(path));
- try buf.appendSlice(ext);
- return try buf.toOwnedSlice();
+ try buf.appendSlice(allocator, std.fs.path.stem(path));
+ try buf.appendSlice(allocator, ext);
+ return try buf.toOwnedSlice(allocator);
}
pub fn isSupportedInputExtension(ext: []const u8) bool {
@@ -1537,7 +1476,7 @@ fn testParseOutput(args: []const []const u8, expected_output: []const u8) !?Opti
var options = parse(std.testing.allocator, args, &diagnostics) catch |err| switch (err) {
error.ParseError => {
try diagnostics.renderToWriter(args, &output.writer, .no_color);
- try std.testing.expectEqualStrings(expected_output, output.getWritten());
+ try std.testing.expectEqualStrings(expected_output, output.written());
return null;
},
else => |e| return e,
@@ -1545,7 +1484,7 @@ fn testParseOutput(args: []const []const u8, expected_output: []const u8) !?Opti
errdefer options.deinit();
try diagnostics.renderToWriter(args, &output.writer, .no_color);
- try std.testing.expectEqualStrings(expected_output, output.getWritten());
+ try std.testing.expectEqualStrings(expected_output, output.written());
return options;
}
diff --git a/lib/compiler/resinator/compile.zig b/lib/compiler/resinator/compile.zig
index 60d91eeb73..986740798d 100644
--- a/lib/compiler/resinator/compile.zig
+++ b/lib/compiler/resinator/compile.zig
@@ -35,10 +35,7 @@ pub const CompileOptions = struct {
diagnostics: *Diagnostics,
source_mappings: ?*SourceMappings = null,
/// List of paths (absolute or relative to `cwd`) for every file that the resources within the .rc file depend on.
- /// Items within the list will be allocated using the allocator of the ArrayList and must be
- /// freed by the caller.
- /// TODO: Maybe a dedicated struct for this purpose so that it's a bit nicer to work with.
- dependencies_list: ?*std.array_list.Managed([]const u8) = null,
+ dependencies: ?*Dependencies = null,
default_code_page: SupportedCodePage = .windows1252,
/// If true, the first #pragma code_page directive only sets the input code page, but not the output code page.
/// This check must be done before comments are removed from the file.
@@ -61,7 +58,26 @@ pub const CompileOptions = struct {
warn_instead_of_error_on_invalid_code_page: bool = false,
};
-pub fn compile(allocator: Allocator, source: []const u8, writer: anytype, options: CompileOptions) !void {
+pub const Dependencies = struct {
+ list: std.ArrayList([]const u8),
+ allocator: Allocator,
+
+ pub fn init(allocator: Allocator) Dependencies {
+ return .{
+ .list = .empty,
+ .allocator = allocator,
+ };
+ }
+
+ pub fn deinit(self: *Dependencies) void {
+ for (self.list.items) |item| {
+ self.allocator.free(item);
+ }
+ self.list.deinit(self.allocator);
+ }
+};
+
+pub fn compile(allocator: Allocator, source: []const u8, writer: *std.Io.Writer, options: CompileOptions) !void {
var lexer = lex.Lexer.init(source, .{
.default_code_page = options.default_code_page,
.source_mappings = options.source_mappings,
@@ -74,12 +90,12 @@ pub fn compile(allocator: Allocator, source: []const u8, writer: anytype, option
var tree = try parser.parse(allocator, options.diagnostics);
defer tree.deinit();
- var search_dirs = std.array_list.Managed(SearchDir).init(allocator);
+ var search_dirs: std.ArrayList(SearchDir) = .empty;
defer {
for (search_dirs.items) |*search_dir| {
search_dir.deinit(allocator);
}
- search_dirs.deinit();
+ search_dirs.deinit(allocator);
}
if (options.source_mappings) |source_mappings| {
@@ -89,7 +105,7 @@ pub fn compile(allocator: Allocator, source: []const u8, writer: anytype, option
if (std.fs.path.dirname(root_path)) |root_dir_path| {
var root_dir = try options.cwd.openDir(root_dir_path, .{});
errdefer root_dir.close();
- try search_dirs.append(.{ .dir = root_dir, .path = try allocator.dupe(u8, root_dir_path) });
+ try search_dirs.append(allocator, .{ .dir = root_dir, .path = try allocator.dupe(u8, root_dir_path) });
}
}
// Re-open the passed in cwd since we want to be able to close it (std.fs.cwd() shouldn't be closed)
@@ -111,14 +127,14 @@ pub fn compile(allocator: Allocator, source: []const u8, writer: anytype, option
});
return error.CompileError;
};
- try search_dirs.append(.{ .dir = cwd_dir, .path = null });
+ try search_dirs.append(allocator, .{ .dir = cwd_dir, .path = null });
for (options.extra_include_paths) |extra_include_path| {
var dir = openSearchPathDir(options.cwd, extra_include_path) catch {
// TODO: maybe a warning that the search path is skipped?
continue;
};
errdefer dir.close();
- try search_dirs.append(.{ .dir = dir, .path = try allocator.dupe(u8, extra_include_path) });
+ try search_dirs.append(allocator, .{ .dir = dir, .path = try allocator.dupe(u8, extra_include_path) });
}
for (options.system_include_paths) |system_include_path| {
var dir = openSearchPathDir(options.cwd, system_include_path) catch {
@@ -126,7 +142,7 @@ pub fn compile(allocator: Allocator, source: []const u8, writer: anytype, option
continue;
};
errdefer dir.close();
- try search_dirs.append(.{ .dir = dir, .path = try allocator.dupe(u8, system_include_path) });
+ try search_dirs.append(allocator, .{ .dir = dir, .path = try allocator.dupe(u8, system_include_path) });
}
if (!options.ignore_include_env_var) {
const INCLUDE = std.process.getEnvVarOwned(allocator, "INCLUDE") catch "";
@@ -142,7 +158,7 @@ pub fn compile(allocator: Allocator, source: []const u8, writer: anytype, option
while (it.next()) |search_path| {
var dir = openSearchPathDir(options.cwd, search_path) catch continue;
errdefer dir.close();
- try search_dirs.append(.{ .dir = dir, .path = try allocator.dupe(u8, search_path) });
+ try search_dirs.append(allocator, .{ .dir = dir, .path = try allocator.dupe(u8, search_path) });
}
}
@@ -156,7 +172,7 @@ pub fn compile(allocator: Allocator, source: []const u8, writer: anytype, option
.allocator = allocator,
.cwd = options.cwd,
.diagnostics = options.diagnostics,
- .dependencies_list = options.dependencies_list,
+ .dependencies = options.dependencies,
.input_code_pages = &tree.input_code_pages,
.output_code_pages = &tree.output_code_pages,
// This is only safe because we know search_dirs won't be modified past this point
@@ -178,7 +194,7 @@ pub const Compiler = struct {
cwd: std.fs.Dir,
state: State = .{},
diagnostics: *Diagnostics,
- dependencies_list: ?*std.array_list.Managed([]const u8),
+ dependencies: ?*Dependencies,
input_code_pages: *const CodePageLookup,
output_code_pages: *const CodePageLookup,
search_dirs: []SearchDir,
@@ -194,7 +210,7 @@ pub const Compiler = struct {
characteristics: u32 = 0,
};
- pub fn writeRoot(self: *Compiler, root: *Node.Root, writer: anytype) !void {
+ pub fn writeRoot(self: *Compiler, root: *Node.Root, writer: *std.Io.Writer) !void {
try writeEmptyResource(writer);
for (root.body) |node| {
try self.writeNode(node, writer);
@@ -236,7 +252,7 @@ pub const Compiler = struct {
}
}
- pub fn writeNode(self: *Compiler, node: *Node, writer: anytype) !void {
+ pub fn writeNode(self: *Compiler, node: *Node, writer: *std.Io.Writer) !void {
switch (node.id) {
.root => unreachable, // writeRoot should be called directly instead
.resource_external => try self.writeResourceExternal(@alignCast(@fieldParentPtr("base", node)), writer),
@@ -279,32 +295,32 @@ pub const Compiler = struct {
.literal, .number => {
const slice = literal_node.token.slice(self.source);
const code_page = self.input_code_pages.getForToken(literal_node.token);
- var buf = try std.array_list.Managed(u8).initCapacity(self.allocator, slice.len);
- errdefer buf.deinit();
+ var buf = try std.ArrayList(u8).initCapacity(self.allocator, slice.len);
+ errdefer buf.deinit(self.allocator);
var index: usize = 0;
while (code_page.codepointAt(index, slice)) |codepoint| : (index += codepoint.byte_len) {
const c = codepoint.value;
if (c == code_pages.Codepoint.invalid) {
- try buf.appendSlice("�");
+ try buf.appendSlice(self.allocator, "�");
} else {
// Anything that is not returned as an invalid codepoint must be encodable as UTF-8.
const utf8_len = std.unicode.utf8CodepointSequenceLength(c) catch unreachable;
- try buf.ensureUnusedCapacity(utf8_len);
+ try buf.ensureUnusedCapacity(self.allocator, utf8_len);
_ = std.unicode.utf8Encode(c, buf.unusedCapacitySlice()) catch unreachable;
buf.items.len += utf8_len;
}
}
- return buf.toOwnedSlice();
+ return buf.toOwnedSlice(self.allocator);
},
.quoted_ascii_string, .quoted_wide_string => {
const slice = literal_node.token.slice(self.source);
const column = literal_node.token.calculateColumn(self.source, 8, null);
const bytes = SourceBytes{ .slice = slice, .code_page = self.input_code_pages.getForToken(literal_node.token) };
- var buf = std.array_list.Managed(u8).init(self.allocator);
- errdefer buf.deinit();
+ var buf: std.ArrayList(u8) = .empty;
+ errdefer buf.deinit(self.allocator);
// Filenames are sort-of parsed as if they were wide strings, but the max escape width of
// hex/octal escapes is still determined by the L prefix. Since we want to end up with
@@ -320,19 +336,19 @@ pub const Compiler = struct {
while (try parser.nextUnchecked()) |parsed| {
const c = parsed.codepoint;
if (c == code_pages.Codepoint.invalid) {
- try buf.appendSlice("�");
+ try buf.appendSlice(self.allocator, "�");
} else {
var codepoint_buf: [4]u8 = undefined;
// If the codepoint cannot be encoded, we fall back to �
if (std.unicode.utf8Encode(c, &codepoint_buf)) |len| {
- try buf.appendSlice(codepoint_buf[0..len]);
+ try buf.appendSlice(self.allocator, codepoint_buf[0..len]);
} else |_| {
- try buf.appendSlice("�");
+ try buf.appendSlice(self.allocator, "�");
}
}
}
- return buf.toOwnedSlice();
+ return buf.toOwnedSlice(self.allocator);
},
else => unreachable, // no other token types should be in a filename literal node
}
@@ -386,10 +402,10 @@ pub const Compiler = struct {
const file = try utils.openFileNotDir(std.fs.cwd(), path, .{});
errdefer file.close();
- if (self.dependencies_list) |dependencies_list| {
- const duped_path = try dependencies_list.allocator.dupe(u8, path);
- errdefer dependencies_list.allocator.free(duped_path);
- try dependencies_list.append(duped_path);
+ if (self.dependencies) |dependencies| {
+ const duped_path = try dependencies.allocator.dupe(u8, path);
+ errdefer dependencies.allocator.free(duped_path);
+ try dependencies.list.append(dependencies.allocator, duped_path);
}
}
@@ -398,12 +414,12 @@ pub const Compiler = struct {
if (utils.openFileNotDir(search_dir.dir, path, .{})) |file| {
errdefer file.close();
- if (self.dependencies_list) |dependencies_list| {
- const searched_file_path = try std.fs.path.join(dependencies_list.allocator, &.{
+ if (self.dependencies) |dependencies| {
+ const searched_file_path = try std.fs.path.join(dependencies.allocator, &.{
search_dir.path orelse "", path,
});
- errdefer dependencies_list.allocator.free(searched_file_path);
- try dependencies_list.append(searched_file_path);
+ errdefer dependencies.allocator.free(searched_file_path);
+ try dependencies.list.append(dependencies.allocator, searched_file_path);
}
return file;
@@ -421,8 +437,8 @@ pub const Compiler = struct {
const bytes = self.sourceBytesForToken(token);
const output_code_page = self.output_code_pages.getForToken(token);
- var buf = try std.array_list.Managed(u8).initCapacity(self.allocator, bytes.slice.len);
- errdefer buf.deinit();
+ var buf = try std.ArrayList(u8).initCapacity(self.allocator, bytes.slice.len);
+ errdefer buf.deinit(self.allocator);
var iterative_parser = literals.IterativeStringParser.init(bytes, .{
.start_column = token.calculateColumn(self.source, 8, null),
@@ -444,11 +460,11 @@ pub const Compiler = struct {
switch (iterative_parser.declared_string_type) {
.wide => {
if (windows1252.bestFitFromCodepoint(c)) |best_fit| {
- try buf.append(best_fit);
+ try buf.append(self.allocator, best_fit);
} else if (c < 0x10000 or c == code_pages.Codepoint.invalid or parsed.escaped_surrogate_pair) {
- try buf.append('?');
+ try buf.append(self.allocator, '?');
} else {
- try buf.appendSlice("??");
+ try buf.appendSlice(self.allocator, "??");
}
},
.ascii => {
@@ -456,30 +472,30 @@ pub const Compiler = struct {
const truncated: u8 = @truncate(c);
switch (output_code_page) {
.utf8 => switch (truncated) {
- 0...0x7F => try buf.append(truncated),
- else => try buf.append('?'),
+ 0...0x7F => try buf.append(self.allocator, truncated),
+ else => try buf.append(self.allocator, '?'),
},
.windows1252 => {
- try buf.append(truncated);
+ try buf.append(self.allocator, truncated);
},
}
} else {
if (windows1252.bestFitFromCodepoint(c)) |best_fit| {
- try buf.append(best_fit);
+ try buf.append(self.allocator, best_fit);
} else if (c < 0x10000 or c == code_pages.Codepoint.invalid) {
- try buf.append('?');
+ try buf.append(self.allocator, '?');
} else {
- try buf.appendSlice("??");
+ try buf.appendSlice(self.allocator, "??");
}
}
},
}
}
- return buf.toOwnedSlice();
+ return buf.toOwnedSlice(self.allocator);
}
- pub fn writeResourceExternal(self: *Compiler, node: *Node.ResourceExternal, writer: anytype) !void {
+ pub fn writeResourceExternal(self: *Compiler, node: *Node.ResourceExternal, writer: *std.Io.Writer) !void {
// Init header with data size zero for now, will need to fill it in later
var header = try self.resourceHeader(node.id, node.type, .{});
defer header.deinit(self.allocator);
@@ -572,7 +588,7 @@ pub const Compiler = struct {
switch (predefined_type) {
.GROUP_ICON, .GROUP_CURSOR => {
// Check for animated icon first
- if (ani.isAnimatedIcon(file_reader.interface.adaptToOldInterface())) {
+ if (ani.isAnimatedIcon(&file_reader.interface)) {
// Animated icons are just put into the resource unmodified,
// and the resource type changes to ANIICON/ANICURSOR
@@ -584,7 +600,12 @@ pub const Compiler = struct {
header.type_value.ordinal = @intFromEnum(new_predefined_type);
header.memory_flags = MemoryFlags.defaults(new_predefined_type);
header.applyMemoryFlags(node.common_resource_attributes, self.source);
- header.data_size = @intCast(try file_reader.getSize());
+ header.data_size = std.math.cast(u32, try file_reader.getSize()) orelse {
+ return self.addErrorDetailsAndFail(.{
+ .err = .resource_data_size_exceeds_max,
+ .token = node.id,
+ });
+ };
try header.write(writer, self.errContext(node.id));
try file_reader.seekTo(0);
@@ -595,7 +616,7 @@ pub const Compiler = struct {
// isAnimatedIcon moved the file cursor so reset to the start
try file_reader.seekTo(0);
- const icon_dir = ico.read(self.allocator, file_reader.interface.adaptToOldInterface(), try file_reader.getSize()) catch |err| switch (err) {
+ const icon_dir = ico.read(self.allocator, &file_reader.interface, try file_reader.getSize()) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
else => |e| {
return self.iconReadError(
@@ -861,7 +882,7 @@ pub const Compiler = struct {
header.applyMemoryFlags(node.common_resource_attributes, self.source);
const file_size = try file_reader.getSize();
- const bitmap_info = bmp.read(file_reader.interface.adaptToOldInterface(), file_size) catch |err| {
+ const bitmap_info = bmp.read(&file_reader.interface, file_size) catch |err| {
const filename_string_index = try self.diagnostics.putString(filename_utf8);
return self.addErrorDetailsAndFail(.{
.err = .bmp_read_error,
@@ -969,13 +990,19 @@ pub const Compiler = struct {
header.data_size = @intCast(file_size);
try header.write(writer, self.errContext(node.id));
- var header_slurping_reader = headerSlurpingReader(148, file_reader.interface.adaptToOldInterface());
- var adapter = header_slurping_reader.reader().adaptToNewApi(&.{});
- try writeResourceData(writer, &adapter.new_interface, header.data_size);
+ // Slurp the first 148 bytes separately so we can store them in the FontDir
+ var font_dir_header_buf: [148]u8 = @splat(0);
+ const populated_len: u32 = @intCast(try file_reader.interface.readSliceShort(&font_dir_header_buf));
+
+ // Write only the populated bytes slurped from the header
+ try writer.writeAll(font_dir_header_buf[0..populated_len]);
+ // Then write the rest of the bytes and the padding
+ try writeResourceDataNoPadding(writer, &file_reader.interface, header.data_size - populated_len);
+ try writeDataPadding(writer, header.data_size);
try self.state.font_dir.add(self.arena, FontDir.Font{
.id = header.name_value.ordinal,
- .header_bytes = header_slurping_reader.slurped_header,
+ .header_bytes = font_dir_header_buf,
}, node.id);
return;
},
@@ -1053,7 +1080,7 @@ pub const Compiler = struct {
}
}
- pub fn write(self: Data, writer: anytype) !void {
+ pub fn write(self: Data, writer: *std.Io.Writer) !void {
switch (self) {
.number => |number| switch (number.is_long) {
false => try writer.writeInt(WORD, number.asWord(), .little),
@@ -1225,38 +1252,30 @@ pub const Compiler = struct {
}
}
- pub fn writeResourceRawData(self: *Compiler, node: *Node.ResourceRawData, writer: anytype) !void {
- var data_buffer = std.array_list.Managed(u8).init(self.allocator);
+ pub fn writeResourceRawData(self: *Compiler, node: *Node.ResourceRawData, writer: *std.Io.Writer) !void {
+ var data_buffer: std.Io.Writer.Allocating = .init(self.allocator);
defer data_buffer.deinit();
- // The header's data length field is a u32 so limit the resource's data size so that
- // we know we can always specify the real size.
- var limited_writer = limitedWriter(data_buffer.writer(), std.math.maxInt(u32));
- const data_writer = limited_writer.writer();
for (node.raw_data) |expression| {
const data = try self.evaluateDataExpression(expression);
defer data.deinit(self.allocator);
- data.write(data_writer) catch |err| switch (err) {
- error.NoSpaceLeft => {
- return self.addErrorDetailsAndFail(.{
- .err = .resource_data_size_exceeds_max,
- .token = node.id,
- });
- },
- else => |e| return e,
- };
+ try data.write(&data_buffer.writer);
}
- // This intCast can't fail because the limitedWriter above guarantees that
- // we will never write more than maxInt(u32) bytes.
- const data_len: u32 = @intCast(data_buffer.items.len);
+ // TODO: Limit data_buffer in some way to error when writing more than u32 max bytes
+ const data_len: u32 = std.math.cast(u32, data_buffer.written().len) orelse {
+ return self.addErrorDetailsAndFail(.{
+ .err = .resource_data_size_exceeds_max,
+ .token = node.id,
+ });
+ };
try self.writeResourceHeader(writer, node.id, node.type, data_len, node.common_resource_attributes, self.state.language);
- var data_fbs: std.Io.Reader = .fixed(data_buffer.items);
+ var data_fbs: std.Io.Reader = .fixed(data_buffer.written());
try writeResourceData(writer, &data_fbs, data_len);
}
- pub fn writeResourceHeader(self: *Compiler, writer: anytype, id_token: Token, type_token: Token, data_size: u32, common_resource_attributes: []Token, language: res.Language) !void {
+ pub fn writeResourceHeader(self: *Compiler, writer: *std.Io.Writer, id_token: Token, type_token: Token, data_size: u32, common_resource_attributes: []Token, language: res.Language) !void {
var header = try self.resourceHeader(id_token, type_token, .{
.language = language,
.data_size = data_size,
@@ -1272,7 +1291,7 @@ pub const Compiler = struct {
try data_reader.streamExact(writer, data_size);
}
- pub fn writeResourceData(writer: anytype, data_reader: *std.Io.Reader, data_size: u32) !void {
+ pub fn writeResourceData(writer: *std.Io.Writer, data_reader: *std.Io.Reader, data_size: u32) !void {
try writeResourceDataNoPadding(writer, data_reader, data_size);
try writeDataPadding(writer, data_size);
}
@@ -1305,28 +1324,19 @@ pub const Compiler = struct {
}
}
- pub fn writeAccelerators(self: *Compiler, node: *Node.Accelerators, writer: anytype) !void {
- var data_buffer = std.array_list.Managed(u8).init(self.allocator);
+ pub fn writeAccelerators(self: *Compiler, node: *Node.Accelerators, writer: *std.Io.Writer) !void {
+ var data_buffer: std.Io.Writer.Allocating = .init(self.allocator);
defer data_buffer.deinit();
- // The header's data length field is a u32 so limit the resource's data size so that
- // we know we can always specify the real size.
- var limited_writer = limitedWriter(data_buffer.writer(), std.math.maxInt(u32));
- const data_writer = limited_writer.writer();
+ try self.writeAcceleratorsData(node, &data_buffer.writer);
- self.writeAcceleratorsData(node, data_writer) catch |err| switch (err) {
- error.NoSpaceLeft => {
- return self.addErrorDetailsAndFail(.{
- .err = .resource_data_size_exceeds_max,
- .token = node.id,
- });
- },
- else => |e| return e,
+ // TODO: Limit data_buffer in some way to error when writing more than u32 max bytes
+ const data_size: u32 = std.math.cast(u32, data_buffer.written().len) orelse {
+ return self.addErrorDetailsAndFail(.{
+ .err = .resource_data_size_exceeds_max,
+ .token = node.id,
+ });
};
-
- // This intCast can't fail because the limitedWriter above guarantees that
- // we will never write more than maxInt(u32) bytes.
- const data_size: u32 = @intCast(data_buffer.items.len);
var header = try self.resourceHeader(node.id, node.type, .{
.data_size = data_size,
});
@@ -1337,13 +1347,13 @@ pub const Compiler = struct {
try header.write(writer, self.errContext(node.id));
- var data_fbs: std.Io.Reader = .fixed(data_buffer.items);
+ var data_fbs: std.Io.Reader = .fixed(data_buffer.written());
try writeResourceData(writer, &data_fbs, data_size);
}
/// Expects `data_writer` to be a LimitedWriter limited to u32, meaning all writes to
/// the writer within this function could return error.NoSpaceLeft
- pub fn writeAcceleratorsData(self: *Compiler, node: *Node.Accelerators, data_writer: anytype) !void {
+ pub fn writeAcceleratorsData(self: *Compiler, node: *Node.Accelerators, data_writer: *std.Io.Writer) !void {
for (node.accelerators, 0..) |accel_node, i| {
const accelerator: *Node.Accelerator = @alignCast(@fieldParentPtr("base", accel_node));
var modifiers = res.AcceleratorModifiers{};
@@ -1404,13 +1414,9 @@ pub const Compiler = struct {
caption: ?Token = null,
};
- pub fn writeDialog(self: *Compiler, node: *Node.Dialog, writer: anytype) !void {
- var data_buffer = std.array_list.Managed(u8).init(self.allocator);
+ pub fn writeDialog(self: *Compiler, node: *Node.Dialog, writer: *std.Io.Writer) !void {
+ var data_buffer: std.Io.Writer.Allocating = .init(self.allocator);
defer data_buffer.deinit();
- // The header's data length field is a u32 so limit the resource's data size so that
- // we know we can always specify the real size.
- var limited_writer = limitedWriter(data_buffer.writer(), std.math.maxInt(u32));
- const data_writer = limited_writer.writer();
const resource = ResourceType.fromString(.{
.slice = node.type.slice(self.source),
@@ -1671,21 +1677,18 @@ pub const Compiler = struct {
optional_statement_values.style |= res.WS.CAPTION;
}
- self.writeDialogHeaderAndStrings(
+ // NOTE: Dialog header and menu/class/title strings can never exceed u32 bytes
+ // on their own.
+ try self.writeDialogHeaderAndStrings(
node,
- data_writer,
+ &data_buffer.writer,
resource,
&optional_statement_values,
x,
y,
width,
height,
- ) catch |err| switch (err) {
- // Dialog header and menu/class/title strings can never exceed u32 bytes
- // on their own, so this error is unreachable.
- error.NoSpaceLeft => unreachable,
- else => |e| return e,
- };
+ );
var controls_by_id = std.AutoHashMap(u32, *const Node.ControlStatement).init(self.allocator);
// Number of controls are guaranteed by the parser to be within maxInt(u16).
@@ -1695,31 +1698,30 @@ pub const Compiler = struct {
for (node.controls) |control_node| {
const control: *Node.ControlStatement = @alignCast(@fieldParentPtr("base", control_node));
- self.writeDialogControl(
+ try self.writeDialogControl(
control,
- data_writer,
+ &data_buffer.writer,
resource,
// We know the data_buffer len is limited to u32 max.
- @intCast(data_buffer.items.len),
+ @intCast(data_buffer.written().len),
&controls_by_id,
- ) catch |err| switch (err) {
- error.NoSpaceLeft => {
- try self.addErrorDetails(.{
- .err = .resource_data_size_exceeds_max,
- .token = node.id,
- });
- return self.addErrorDetailsAndFail(.{
- .err = .resource_data_size_exceeds_max,
- .type = .note,
- .token = control.type,
- });
- },
- else => |e| return e,
- };
+ );
+
+ if (data_buffer.written().len > std.math.maxInt(u32)) {
+ try self.addErrorDetails(.{
+ .err = .resource_data_size_exceeds_max,
+ .token = node.id,
+ });
+ return self.addErrorDetailsAndFail(.{
+ .err = .resource_data_size_exceeds_max,
+ .type = .note,
+ .token = control.type,
+ });
+ }
}
// We know the data_buffer len is limited to u32 max.
- const data_size: u32 = @intCast(data_buffer.items.len);
+ const data_size: u32 = @intCast(data_buffer.written().len);
var header = try self.resourceHeader(node.id, node.type, .{
.data_size = data_size,
});
@@ -1730,14 +1732,14 @@ pub const Compiler = struct {
try header.write(writer, self.errContext(node.id));
- var data_fbs: std.Io.Reader = .fixed(data_buffer.items);
+ var data_fbs: std.Io.Reader = .fixed(data_buffer.written());
try writeResourceData(writer, &data_fbs, data_size);
}
fn writeDialogHeaderAndStrings(
self: *Compiler,
node: *Node.Dialog,
- data_writer: anytype,
+ data_writer: *std.Io.Writer,
resource: ResourceType,
optional_statement_values: *const DialogOptionalStatementValues,
x: Number,
@@ -1797,7 +1799,7 @@ pub const Compiler = struct {
fn writeDialogControl(
self: *Compiler,
control: *Node.ControlStatement,
- data_writer: anytype,
+ data_writer: *std.Io.Writer,
resource: ResourceType,
bytes_written_so_far: u32,
controls_by_id: *std.AutoHashMap(u32, *const Node.ControlStatement),
@@ -1821,7 +1823,7 @@ pub const Compiler = struct {
.token = control.type,
});
}
- try data_writer.writeByteNTimes(0, num_padding);
+ try data_writer.splatByteAll(0, num_padding);
const style = if (control.style) |style_expression|
// Certain styles are implied by the control type
@@ -1973,40 +1975,37 @@ pub const Compiler = struct {
try NameOrOrdinal.writeEmpty(data_writer);
}
- var extra_data_buf = std.array_list.Managed(u8).init(self.allocator);
- defer extra_data_buf.deinit();
// The extra data byte length must be able to fit within a u16.
- var limited_extra_data_writer = limitedWriter(extra_data_buf.writer(), std.math.maxInt(u16));
- const extra_data_writer = limited_extra_data_writer.writer();
+ var extra_data_buf: std.Io.Writer.Allocating = .init(self.allocator);
+ defer extra_data_buf.deinit();
for (control.extra_data) |data_expression| {
const data = try self.evaluateDataExpression(data_expression);
defer data.deinit(self.allocator);
- data.write(extra_data_writer) catch |err| switch (err) {
- error.NoSpaceLeft => {
- try self.addErrorDetails(.{
- .err = .control_extra_data_size_exceeds_max,
- .token = control.type,
- });
- return self.addErrorDetailsAndFail(.{
- .err = .control_extra_data_size_exceeds_max,
- .type = .note,
- .token = data_expression.getFirstToken(),
- .token_span_end = data_expression.getLastToken(),
- });
- },
- else => |e| return e,
- };
+ try data.write(&extra_data_buf.writer);
+
+ if (extra_data_buf.written().len > std.math.maxInt(u16)) {
+ try self.addErrorDetails(.{
+ .err = .control_extra_data_size_exceeds_max,
+ .token = control.type,
+ });
+ return self.addErrorDetailsAndFail(.{
+ .err = .control_extra_data_size_exceeds_max,
+ .type = .note,
+ .token = data_expression.getFirstToken(),
+ .token_span_end = data_expression.getLastToken(),
+ });
+ }
}
// We know the extra_data_buf size fits within a u16.
- const extra_data_size: u16 = @intCast(extra_data_buf.items.len);
+ const extra_data_size: u16 = @intCast(extra_data_buf.written().len);
try data_writer.writeInt(u16, extra_data_size, .little);
- try data_writer.writeAll(extra_data_buf.items);
+ try data_writer.writeAll(extra_data_buf.written());
}
- pub fn writeToolbar(self: *Compiler, node: *Node.Toolbar, writer: anytype) !void {
- var data_buffer = std.array_list.Managed(u8).init(self.allocator);
+ pub fn writeToolbar(self: *Compiler, node: *Node.Toolbar, writer: *std.Io.Writer) !void {
+ var data_buffer: std.Io.Writer.Allocating = .init(self.allocator);
defer data_buffer.deinit();
- const data_writer = data_buffer.writer();
+ const data_writer = &data_buffer.writer;
const button_width = evaluateNumberExpression(node.button_width, self.source, self.input_code_pages);
const button_height = evaluateNumberExpression(node.button_height, self.source, self.input_code_pages);
@@ -2034,7 +2033,7 @@ pub const Compiler = struct {
}
}
- const data_size: u32 = @intCast(data_buffer.items.len);
+ const data_size: u32 = @intCast(data_buffer.written().len);
var header = try self.resourceHeader(node.id, node.type, .{
.data_size = data_size,
});
@@ -2044,7 +2043,7 @@ pub const Compiler = struct {
try header.write(writer, self.errContext(node.id));
- var data_fbs: std.Io.Reader = .fixed(data_buffer.items);
+ var data_fbs: std.Io.Reader = .fixed(data_buffer.written());
try writeResourceData(writer, &data_fbs, data_size);
}
@@ -2056,7 +2055,7 @@ pub const Compiler = struct {
node: *Node.FontStatement,
};
- pub fn writeDialogFont(self: *Compiler, resource: ResourceType, values: FontStatementValues, writer: anytype) !void {
+ pub fn writeDialogFont(self: *Compiler, resource: ResourceType, values: FontStatementValues, writer: *std.Io.Writer) !void {
const node = values.node;
const point_size = evaluateNumberExpression(node.point_size, self.source, self.input_code_pages);
try writer.writeInt(u16, point_size.asWord(), .little);
@@ -2081,13 +2080,9 @@ pub const Compiler = struct {
try writer.writeAll(std.mem.sliceAsBytes(typeface[0 .. typeface.len + 1]));
}
- pub fn writeMenu(self: *Compiler, node: *Node.Menu, writer: anytype) !void {
- var data_buffer = std.array_list.Managed(u8).init(self.allocator);
+ pub fn writeMenu(self: *Compiler, node: *Node.Menu, writer: *std.Io.Writer) !void {
+ var data_buffer: std.Io.Writer.Allocating = .init(self.allocator);
defer data_buffer.deinit();
- // The header's data length field is a u32 so limit the resource's data size so that
- // we know we can always specify the real size.
- var limited_writer = limitedWriter(data_buffer.writer(), std.math.maxInt(u32));
- const data_writer = limited_writer.writer();
const type_bytes = SourceBytes{
.slice = node.type.slice(self.source),
@@ -2096,21 +2091,15 @@ pub const Compiler = struct {
const resource = ResourceType.fromString(type_bytes);
std.debug.assert(resource == .menu or resource == .menuex);
- var adapted = data_writer.adaptToNewApi(&.{});
+ try self.writeMenuData(node, &data_buffer.writer, resource);
- self.writeMenuData(node, &adapted.new_interface, resource) catch |err| switch (err) {
- error.WriteFailed => {
- return self.addErrorDetailsAndFail(.{
- .err = .resource_data_size_exceeds_max,
- .token = node.id,
- });
- },
- else => |e| return e,
+ // TODO: Limit data_buffer in some way to error when writing more than u32 max bytes
+ const data_size: u32 = std.math.cast(u32, data_buffer.written().len) orelse {
+ return self.addErrorDetailsAndFail(.{
+ .err = .resource_data_size_exceeds_max,
+ .token = node.id,
+ });
};
-
- // This intCast can't fail because the limitedWriter above guarantees that
- // we will never write more than maxInt(u32) bytes.
- const data_size: u32 = @intCast(data_buffer.items.len);
var header = try self.resourceHeader(node.id, node.type, .{
.data_size = data_size,
});
@@ -2121,7 +2110,7 @@ pub const Compiler = struct {
try header.write(writer, self.errContext(node.id));
- var data_fbs: std.Io.Reader = .fixed(data_buffer.items);
+ var data_fbs: std.Io.Reader = .fixed(data_buffer.written());
try writeResourceData(writer, &data_fbs, data_size);
}
@@ -2264,13 +2253,11 @@ pub const Compiler = struct {
}
}
- pub fn writeVersionInfo(self: *Compiler, node: *Node.VersionInfo, writer: anytype) !void {
- var data_buffer = std.array_list.Managed(u8).init(self.allocator);
+ pub fn writeVersionInfo(self: *Compiler, node: *Node.VersionInfo, writer: *std.Io.Writer) !void {
+ // NOTE: The node's length field (which is inclusive of the length of all of its children) is a u16
+ var data_buffer: std.Io.Writer.Allocating = .init(self.allocator);
defer data_buffer.deinit();
- // The node's length field (which is inclusive of the length of all of its children) is a u16
- // so limit the node's data size so that we know we can always specify the real size.
- var limited_writer = limitedWriter(data_buffer.writer(), std.math.maxInt(u16));
- const data_writer = limited_writer.writer();
+ const data_writer = &data_buffer.writer;
try data_writer.writeInt(u16, 0, .little); // placeholder size
try data_writer.writeInt(u16, res.FixedFileInfo.byte_len, .little);
@@ -2354,29 +2341,32 @@ pub const Compiler = struct {
try fixed_file_info.write(data_writer);
for (node.block_statements) |statement| {
- var adapted = data_writer.adaptToNewApi(&.{});
- self.writeVersionNode(statement, &adapted.new_interface, &data_buffer) catch |err| switch (err) {
- error.WriteFailed => {
- try self.addErrorDetails(.{
- .err = .version_node_size_exceeds_max,
- .token = node.id,
- });
- return self.addErrorDetailsAndFail(.{
- .err = .version_node_size_exceeds_max,
- .type = .note,
- .token = statement.getFirstToken(),
- .token_span_end = statement.getLastToken(),
- });
+ var overflow = false;
+ self.writeVersionNode(statement, data_writer) catch |err| switch (err) {
+ error.NoSpaceLeft => {
+ overflow = true;
},
else => |e| return e,
};
+ if (overflow or data_buffer.written().len > std.math.maxInt(u16)) {
+ try self.addErrorDetails(.{
+ .err = .version_node_size_exceeds_max,
+ .token = node.id,
+ });
+ return self.addErrorDetailsAndFail(.{
+ .err = .version_node_size_exceeds_max,
+ .type = .note,
+ .token = statement.getFirstToken(),
+ .token_span_end = statement.getLastToken(),
+ });
+ }
}
- // We know that data_buffer.items.len is within the limits of a u16, since we
- // limited the writer to maxInt(u16)
- const data_size: u16 = @intCast(data_buffer.items.len);
+ // We know that data_buffer len is within the limits of a u16, since we check in the block
+ // statements loop above which is the only place it can overflow.
+ const data_size: u16 = @intCast(data_buffer.written().len);
// And now that we know the full size of this node (including its children), set its size
- std.mem.writeInt(u16, data_buffer.items[0..2], data_size, .little);
+ std.mem.writeInt(u16, data_buffer.written()[0..2], data_size, .little);
var header = try self.resourceHeader(node.id, node.versioninfo, .{
.data_size = data_size,
@@ -2387,22 +2377,21 @@ pub const Compiler = struct {
try header.write(writer, self.errContext(node.id));
- var data_fbs: std.Io.Reader = .fixed(data_buffer.items);
+ var data_fbs: std.Io.Reader = .fixed(data_buffer.written());
try writeResourceData(writer, &data_fbs, data_size);
}
- /// Expects writer to be a LimitedWriter limited to u16, meaning all writes to
- /// the writer within this function could return error.NoSpaceLeft, and that buf.items.len
- /// will never be able to exceed maxInt(u16).
- pub fn writeVersionNode(self: *Compiler, node: *Node, writer: *std.Io.Writer, buf: *std.array_list.Managed(u8)) !void {
+ /// Assumes that writer is Writer.Allocating (specifically, that buffered() gets the entire data)
+ /// TODO: This function could be nicer if writer was guaranteed to fail if it wrote more than u16 max bytes
+ pub fn writeVersionNode(self: *Compiler, node: *Node, writer: *std.Io.Writer) !void {
// We can assume that buf.items.len will never be able to exceed the limits of a u16
- try writeDataPadding(writer, @as(u16, @intCast(buf.items.len)));
+ try writeDataPadding(writer, std.math.cast(u16, writer.buffered().len) orelse return error.NoSpaceLeft);
- const node_and_children_size_offset = buf.items.len;
+ const node_and_children_size_offset = writer.buffered().len;
try writer.writeInt(u16, 0, .little); // placeholder for size
- const data_size_offset = buf.items.len;
+ const data_size_offset = writer.buffered().len;
try writer.writeInt(u16, 0, .little); // placeholder for data size
- const data_type_offset = buf.items.len;
+ const data_type_offset = writer.buffered().len;
// Data type is string unless the node contains values that are numbers.
try writer.writeInt(u16, res.VersionNode.type_string, .little);
@@ -2432,7 +2421,7 @@ pub const Compiler = struct {
// during parsing, so we can just do the correct thing here.
var values_size: usize = 0;
- try writeDataPadding(writer, @intCast(buf.items.len));
+ try writeDataPadding(writer, std.math.cast(u16, writer.buffered().len) orelse return error.NoSpaceLeft);
for (block_or_value.values, 0..) |value_value_node_uncasted, i| {
const value_value_node = value_value_node_uncasted.cast(.block_value_value).?;
@@ -2471,26 +2460,26 @@ pub const Compiler = struct {
}
}
}
- var data_size_slice = buf.items[data_size_offset..];
+ var data_size_slice = writer.buffered()[data_size_offset..];
std.mem.writeInt(u16, data_size_slice[0..@sizeOf(u16)], @as(u16, @intCast(values_size)), .little);
if (has_number_value) {
- const data_type_slice = buf.items[data_type_offset..];
+ const data_type_slice = writer.buffered()[data_type_offset..];
std.mem.writeInt(u16, data_type_slice[0..@sizeOf(u16)], res.VersionNode.type_binary, .little);
}
if (node_type == .block) {
const block = block_or_value;
for (block.children) |child| {
- try self.writeVersionNode(child, writer, buf);
+ try self.writeVersionNode(child, writer);
}
}
},
else => unreachable,
}
- const node_and_children_size = buf.items.len - node_and_children_size_offset;
- const node_and_children_size_slice = buf.items[node_and_children_size_offset..];
+ const node_and_children_size = writer.buffered().len - node_and_children_size_offset;
+ const node_and_children_size_slice = writer.buffered()[node_and_children_size_offset..];
std.mem.writeInt(u16, node_and_children_size_slice[0..@sizeOf(u16)], @as(u16, @intCast(node_and_children_size)), .little);
}
@@ -2683,11 +2672,11 @@ pub const Compiler = struct {
return .{ .bytes = header_size, .padding_after_name = padding_after_name };
}
- pub fn writeAssertNoOverflow(self: ResourceHeader, writer: anytype) !void {
+ pub fn writeAssertNoOverflow(self: ResourceHeader, writer: *std.Io.Writer) !void {
return self.writeSizeInfo(writer, self.calcSize() catch unreachable);
}
- pub fn write(self: ResourceHeader, writer: anytype, err_ctx: errors.DiagnosticsContext) !void {
+ pub fn write(self: ResourceHeader, writer: *std.Io.Writer, err_ctx: errors.DiagnosticsContext) !void {
const size_info = self.calcSize() catch {
try err_ctx.diagnostics.append(.{
.err = .resource_data_size_exceeds_max,
@@ -2825,7 +2814,7 @@ pub const Compiler = struct {
return null;
}
- pub fn writeEmptyResource(writer: anytype) !void {
+ pub fn writeEmptyResource(writer: *std.Io.Writer) !void {
const header = ResourceHeader{
.name_value = .{ .ordinal = 0 },
.type_value = .{ .ordinal = 0 },
@@ -2942,87 +2931,8 @@ pub const SearchDir = struct {
}
};
-/// Slurps the first `size` bytes read into `slurped_header`
-pub fn HeaderSlurpingReader(comptime size: usize, comptime ReaderType: anytype) type {
- return struct {
- child_reader: ReaderType,
- bytes_read: usize = 0,
- slurped_header: [size]u8 = [_]u8{0x00} ** size,
-
- pub const Error = ReaderType.Error;
- pub const Reader = std.io.GenericReader(*@This(), Error, read);
-
- pub fn read(self: *@This(), buf: []u8) Error!usize {
- const amt = try self.child_reader.read(buf);
- if (self.bytes_read < size) {
- const bytes_to_add = @min(amt, size - self.bytes_read);
- const end_index = self.bytes_read + bytes_to_add;
- @memcpy(self.slurped_header[self.bytes_read..end_index], buf[0..bytes_to_add]);
- }
- self.bytes_read +|= amt;
- return amt;
- }
-
- pub fn reader(self: *@This()) Reader {
- return .{ .context = self };
- }
- };
-}
-
-pub fn headerSlurpingReader(comptime size: usize, reader: anytype) HeaderSlurpingReader(size, @TypeOf(reader)) {
- return .{ .child_reader = reader };
-}
-
-/// Sort of like std.io.LimitedReader, but a Writer.
-/// Returns an error if writing the requested number of bytes
-/// would ever exceed bytes_left, i.e. it does not always
-/// write up to the limit and instead will error if the
-/// limit would be breached if the entire slice was written.
-pub fn LimitedWriter(comptime WriterType: type) type {
- return struct {
- inner_writer: WriterType,
- bytes_left: u64,
-
- pub const Error = error{NoSpaceLeft} || WriterType.Error;
- pub const Writer = std.io.GenericWriter(*Self, Error, write);
-
- const Self = @This();
-
- pub fn write(self: *Self, bytes: []const u8) Error!usize {
- if (bytes.len > self.bytes_left) return error.NoSpaceLeft;
- const amt = try self.inner_writer.write(bytes);
- self.bytes_left -= amt;
- return amt;
- }
-
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
- };
-}
-
-/// Returns an initialised `LimitedWriter`
-/// `bytes_left` is a `u64` to be able to take 64 bit file offsets
-pub fn limitedWriter(inner_writer: anytype, bytes_left: u64) LimitedWriter(@TypeOf(inner_writer)) {
- return .{ .inner_writer = inner_writer, .bytes_left = bytes_left };
-}
-
-test "limitedWriter basic usage" {
- var buf: [4]u8 = undefined;
- var fbs = std.io.fixedBufferStream(&buf);
- var limited_stream = limitedWriter(fbs.writer(), 4);
- var writer = limited_stream.writer();
-
- try std.testing.expectEqual(@as(usize, 3), try writer.write("123"));
- try std.testing.expectEqualSlices(u8, "123", buf[0..3]);
- try std.testing.expectError(error.NoSpaceLeft, writer.write("45"));
- try std.testing.expectEqual(@as(usize, 1), try writer.write("4"));
- try std.testing.expectEqualSlices(u8, "1234", buf[0..4]);
- try std.testing.expectError(error.NoSpaceLeft, writer.write("5"));
-}
-
pub const FontDir = struct {
- fonts: std.ArrayListUnmanaged(Font) = .empty,
+ fonts: std.ArrayList(Font) = .empty,
/// To keep track of which ids are set and where they were set from
ids: std.AutoHashMapUnmanaged(u16, Token) = .empty,
@@ -3040,7 +2950,7 @@ pub const FontDir = struct {
try self.fonts.append(allocator, font);
}
- pub fn writeResData(self: *FontDir, compiler: *Compiler, writer: anytype) !void {
+ pub fn writeResData(self: *FontDir, compiler: *Compiler, writer: *std.Io.Writer) !void {
if (self.fonts.items.len == 0) return;
// We know the number of fonts is limited to maxInt(u16) because fonts
@@ -3164,7 +3074,7 @@ pub const StringTable = struct {
blocks: std.AutoArrayHashMapUnmanaged(u16, Block) = .empty,
pub const Block = struct {
- strings: std.ArrayListUnmanaged(Token) = .empty,
+ strings: std.ArrayList(Token) = .empty,
set_indexes: std.bit_set.IntegerBitSet(16) = .{ .mask = 0 },
memory_flags: MemoryFlags = MemoryFlags.defaults(res.RT.STRING),
characteristics: u32,
@@ -3245,10 +3155,10 @@ pub const StringTable = struct {
try std.testing.expectEqualStrings("a", trimToDoubleNUL(u8, "a\x00\x00b"));
}
- pub fn writeResData(self: *Block, compiler: *Compiler, language: res.Language, block_id: u16, writer: anytype) !void {
- var data_buffer = std.array_list.Managed(u8).init(compiler.allocator);
+ pub fn writeResData(self: *Block, compiler: *Compiler, language: res.Language, block_id: u16, writer: *std.Io.Writer) !void {
+ var data_buffer: std.Io.Writer.Allocating = .init(compiler.allocator);
defer data_buffer.deinit();
- const data_writer = data_buffer.writer();
+ const data_writer = &data_buffer.writer;
var i: u8 = 0;
var string_i: u8 = 0;
@@ -3307,7 +3217,7 @@ pub const StringTable = struct {
// 16 * (131,070 + 2) = 2,097,152 which is well within the u32 max.
//
// Note: The string literal maximum length is enforced by the lexer.
- const data_size: u32 = @intCast(data_buffer.items.len);
+ const data_size: u32 = @intCast(data_buffer.written().len);
const header = Compiler.ResourceHeader{
.name_value = .{ .ordinal = block_id },
@@ -3322,7 +3232,7 @@ pub const StringTable = struct {
// we fully control and know are numbers, so they have a fixed size.
try header.writeAssertNoOverflow(writer);
- var data_fbs: std.Io.Reader = .fixed(data_buffer.items);
+ var data_fbs: std.Io.Reader = .fixed(data_buffer.written());
try Compiler.writeResourceData(writer, &data_fbs, data_size);
}
};
diff --git a/lib/compiler/resinator/cvtres.zig b/lib/compiler/resinator/cvtres.zig
index a375a1fffe..d0fb4c2d1c 100644
--- a/lib/compiler/resinator/cvtres.zig
+++ b/lib/compiler/resinator/cvtres.zig
@@ -43,7 +43,7 @@ pub const Resource = struct {
};
pub const ParsedResources = struct {
- list: std.ArrayListUnmanaged(Resource) = .empty,
+ list: std.ArrayList(Resource) = .empty,
allocator: Allocator,
pub fn init(allocator: Allocator) ParsedResources {
@@ -157,7 +157,7 @@ pub fn parseNameOrOrdinal(allocator: Allocator, reader: *std.Io.Reader) !NameOrO
const ordinal_value = try reader.takeInt(u16, .little);
return .{ .ordinal = ordinal_value };
}
- var name_buf = try std.ArrayListUnmanaged(u16).initCapacity(allocator, 16);
+ var name_buf = try std.ArrayList(u16).initCapacity(allocator, 16);
errdefer name_buf.deinit(allocator);
var code_unit = first_code_unit;
while (code_unit != 0) {
@@ -373,7 +373,7 @@ pub fn writeCoff(allocator: Allocator, writer: *std.Io.Writer, resources: []cons
try writer.writeAll(string_table.bytes.items);
}
-fn writeSymbol(writer: anytype, symbol: std.coff.Symbol) !void {
+fn writeSymbol(writer: *std.Io.Writer, symbol: std.coff.Symbol) !void {
try writer.writeAll(&symbol.name);
try writer.writeInt(u32, symbol.value, .little);
try writer.writeInt(u16, @intFromEnum(symbol.section_number), .little);
@@ -383,7 +383,7 @@ fn writeSymbol(writer: anytype, symbol: std.coff.Symbol) !void {
try writer.writeInt(u8, symbol.number_of_aux_symbols, .little);
}
-fn writeSectionDefinition(writer: anytype, def: std.coff.SectionDefinition) !void {
+fn writeSectionDefinition(writer: *std.Io.Writer, def: std.coff.SectionDefinition) !void {
try writer.writeInt(u32, def.length, .little);
try writer.writeInt(u16, def.number_of_relocations, .little);
try writer.writeInt(u16, def.number_of_linenumbers, .little);
@@ -417,7 +417,7 @@ pub const ResourceDirectoryEntry = extern struct {
to_subdirectory: bool,
},
- pub fn writeCoff(self: ResourceDirectoryEntry, writer: anytype) !void {
+ pub fn writeCoff(self: ResourceDirectoryEntry, writer: *std.Io.Writer) !void {
try writer.writeInt(u32, @bitCast(self.entry), .little);
try writer.writeInt(u32, @bitCast(self.offset), .little);
}
@@ -435,7 +435,7 @@ const ResourceTree = struct {
type_to_name_map: std.ArrayHashMapUnmanaged(NameOrOrdinal, NameToLanguageMap, NameOrOrdinalHashContext, true),
rsrc_string_table: std.ArrayHashMapUnmanaged(NameOrOrdinal, void, NameOrOrdinalHashContext, true),
deduplicated_data: std.StringArrayHashMapUnmanaged(u32),
- data_offsets: std.ArrayListUnmanaged(u32),
+ data_offsets: std.ArrayList(u32),
rsrc02_len: u32,
coff_options: CoffOptions,
allocator: Allocator,
@@ -675,13 +675,13 @@ const ResourceTree = struct {
return &.{};
}
- var level2_list: std.ArrayListUnmanaged(*const NameToLanguageMap) = .empty;
+ var level2_list: std.ArrayList(*const NameToLanguageMap) = .empty;
defer level2_list.deinit(allocator);
- var level3_list: std.ArrayListUnmanaged(*const LanguageToResourceMap) = .empty;
+ var level3_list: std.ArrayList(*const LanguageToResourceMap) = .empty;
defer level3_list.deinit(allocator);
- var resources_list: std.ArrayListUnmanaged(*const RelocatableResource) = .empty;
+ var resources_list: std.ArrayList(*const RelocatableResource) = .empty;
defer resources_list.deinit(allocator);
var relocations = Relocations.init(allocator);
@@ -896,7 +896,7 @@ const ResourceTree = struct {
return symbols;
}
- fn writeRelocation(writer: anytype, relocation: std.coff.Relocation) !void {
+ fn writeRelocation(writer: *std.Io.Writer, relocation: std.coff.Relocation) !void {
try writer.writeInt(u32, relocation.virtual_address, .little);
try writer.writeInt(u32, relocation.symbol_table_index, .little);
try writer.writeInt(u16, relocation.type, .little);
@@ -928,7 +928,7 @@ const Relocation = struct {
const Relocations = struct {
allocator: Allocator,
- list: std.ArrayListUnmanaged(Relocation) = .empty,
+ list: std.ArrayList(Relocation) = .empty,
cur_symbol_index: u32 = 5,
pub fn init(allocator: Allocator) Relocations {
@@ -952,7 +952,7 @@ const Relocations = struct {
/// Does not do deduplication (only because there's no chance of duplicate strings in this
/// instance).
const StringTable = struct {
- bytes: std.ArrayListUnmanaged(u8) = .empty,
+ bytes: std.ArrayList(u8) = .empty,
pub fn deinit(self: *StringTable, allocator: Allocator) void {
self.bytes.deinit(allocator);
diff --git a/lib/compiler/resinator/errors.zig b/lib/compiler/resinator/errors.zig
index 4bc443c4e7..90a15bca72 100644
--- a/lib/compiler/resinator/errors.zig
+++ b/lib/compiler/resinator/errors.zig
@@ -15,10 +15,10 @@ const builtin = @import("builtin");
const native_endian = builtin.cpu.arch.endian();
pub const Diagnostics = struct {
- errors: std.ArrayListUnmanaged(ErrorDetails) = .empty,
+ errors: std.ArrayList(ErrorDetails) = .empty,
/// Append-only, cannot handle removing strings.
/// Expects to own all strings within the list.
- strings: std.ArrayListUnmanaged([]const u8) = .empty,
+ strings: std.ArrayList([]const u8) = .empty,
allocator: std.mem.Allocator,
pub fn init(allocator: std.mem.Allocator) Diagnostics {
@@ -256,7 +256,7 @@ pub const ErrorDetails = struct {
.{ "literal", "unquoted literal" },
});
- pub fn writeCommaSeparated(self: ExpectedTypes, writer: anytype) !void {
+ pub fn writeCommaSeparated(self: ExpectedTypes, writer: *std.Io.Writer) !void {
const struct_info = @typeInfo(ExpectedTypes).@"struct";
const num_real_fields = struct_info.fields.len - 1;
const num_padding_bits = @bitSizeOf(ExpectedTypes) - num_real_fields;
@@ -441,7 +441,7 @@ pub const ErrorDetails = struct {
} };
}
- pub fn render(self: ErrorDetails, writer: anytype, source: []const u8, strings: []const []const u8) !void {
+ pub fn render(self: ErrorDetails, writer: *std.Io.Writer, source: []const u8, strings: []const []const u8) !void {
switch (self.err) {
.unfinished_string_literal => {
return writer.print("unfinished string literal at '{f}', expected closing '\"'", .{self.fmtToken(source)});
@@ -987,12 +987,14 @@ pub fn renderErrorMessage(writer: *std.io.Writer, tty_config: std.io.tty.Config,
if (corresponding_span != null and corresponding_file != null) {
var worth_printing_lines: bool = true;
var initial_lines_err: ?anyerror = null;
+ var file_reader_buf: [max_source_line_bytes * 2]u8 = undefined;
var corresponding_lines: ?CorrespondingLines = CorrespondingLines.init(
cwd,
err_details,
source_line_for_display.line,
corresponding_span.?,
corresponding_file.?,
+ &file_reader_buf,
) catch |err| switch (err) {
error.NotWorthPrintingLines => blk: {
worth_printing_lines = false;
@@ -1078,10 +1080,17 @@ const CorrespondingLines = struct {
at_eof: bool = false,
span: SourceMappings.CorrespondingSpan,
file: std.fs.File,
- buffered_reader: std.fs.File.Reader,
+ file_reader: std.fs.File.Reader,
code_page: SupportedCodePage,
- pub fn init(cwd: std.fs.Dir, err_details: ErrorDetails, line_for_comparison: []const u8, corresponding_span: SourceMappings.CorrespondingSpan, corresponding_file: []const u8) !CorrespondingLines {
+ pub fn init(
+ cwd: std.fs.Dir,
+ err_details: ErrorDetails,
+ line_for_comparison: []const u8,
+ corresponding_span: SourceMappings.CorrespondingSpan,
+ corresponding_file: []const u8,
+ file_reader_buf: []u8,
+ ) !CorrespondingLines {
// We don't do line comparison for this error, so don't print the note if the line
// number is different
if (err_details.err == .string_literal_too_long and err_details.token.line_number != corresponding_span.start_line) {
@@ -1096,18 +1105,14 @@ const CorrespondingLines = struct {
var corresponding_lines = CorrespondingLines{
.span = corresponding_span,
.file = try utils.openFileNotDir(cwd, corresponding_file, .{}),
- .buffered_reader = undefined,
.code_page = err_details.code_page,
+ .file_reader = undefined,
};
- corresponding_lines.buffered_reader = corresponding_lines.file.reader(&.{});
+ corresponding_lines.file_reader = corresponding_lines.file.reader(file_reader_buf);
errdefer corresponding_lines.deinit();
- var fbs = std.io.fixedBufferStream(&corresponding_lines.line_buf);
- const writer = fbs.writer();
-
try corresponding_lines.writeLineFromStreamVerbatim(
- writer,
- corresponding_lines.buffered_reader.interface.adaptToOldInterface(),
+ &corresponding_lines.file_reader.interface,
corresponding_span.start_line,
);
@@ -1145,12 +1150,8 @@ const CorrespondingLines = struct {
self.line_len = 0;
self.visual_line_len = 0;
- var fbs = std.io.fixedBufferStream(&self.line_buf);
- const writer = fbs.writer();
-
try self.writeLineFromStreamVerbatim(
- writer,
- self.buffered_reader.interface.adaptToOldInterface(),
+ &self.file_reader.interface,
self.line_num,
);
@@ -1164,7 +1165,7 @@ const CorrespondingLines = struct {
return visual_line;
}
- fn writeLineFromStreamVerbatim(self: *CorrespondingLines, writer: anytype, input: anytype, line_num: usize) !void {
+ fn writeLineFromStreamVerbatim(self: *CorrespondingLines, input: *std.Io.Reader, line_num: usize) !void {
while (try readByteOrEof(input)) |byte| {
switch (byte) {
'\n', '\r' => {
@@ -1184,13 +1185,9 @@ const CorrespondingLines = struct {
}
},
else => {
- if (self.line_num == line_num) {
- if (writer.writeByte(byte)) {
- self.line_len += 1;
- } else |err| switch (err) {
- error.NoSpaceLeft => {},
- else => |e| return e,
- }
+ if (self.line_num == line_num and self.line_len < self.line_buf.len) {
+ self.line_buf[self.line_len] = byte;
+ self.line_len += 1;
}
},
}
@@ -1201,8 +1198,8 @@ const CorrespondingLines = struct {
self.line_num += 1;
}
- fn readByteOrEof(reader: anytype) !?u8 {
- return reader.readByte() catch |err| switch (err) {
+ fn readByteOrEof(reader: *std.Io.Reader) !?u8 {
+ return reader.takeByte() catch |err| switch (err) {
error.EndOfStream => return null,
else => |e| return e,
};
diff --git a/lib/compiler/resinator/ico.zig b/lib/compiler/resinator/ico.zig
index bf8883a4c9..dc19f1ed8c 100644
--- a/lib/compiler/resinator/ico.zig
+++ b/lib/compiler/resinator/ico.zig
@@ -8,80 +8,66 @@ const std = @import("std");
const builtin = @import("builtin");
const native_endian = builtin.cpu.arch.endian();
-pub const ReadError = std.mem.Allocator.Error || error{ InvalidHeader, InvalidImageType, ImpossibleDataSize, UnexpectedEOF, ReadError };
-
-pub fn read(allocator: std.mem.Allocator, reader: anytype, max_size: u64) ReadError!IconDir {
- // Some Reader implementations have an empty ReadError error set which would
- // cause 'unreachable else' if we tried to use an else in the switch, so we
- // need to detect this case and not try to translate to ReadError
- const anyerror_reader_errorset = @TypeOf(reader).Error == anyerror;
- const empty_reader_errorset = @typeInfo(@TypeOf(reader).Error).error_set == null or @typeInfo(@TypeOf(reader).Error).error_set.?.len == 0;
- if (empty_reader_errorset and !anyerror_reader_errorset) {
- return readAnyError(allocator, reader, max_size) catch |err| switch (err) {
- error.EndOfStream => error.UnexpectedEOF,
- else => |e| return e,
- };
- } else {
- return readAnyError(allocator, reader, max_size) catch |err| switch (err) {
- error.OutOfMemory,
- error.InvalidHeader,
- error.InvalidImageType,
- error.ImpossibleDataSize,
- => |e| return e,
- error.EndOfStream => error.UnexpectedEOF,
- // The remaining errors are dependent on the `reader`, so
- // we just translate them all to generic ReadError
- else => error.ReadError,
- };
- }
+pub const ReadError = std.mem.Allocator.Error || error{ InvalidHeader, InvalidImageType, ImpossibleDataSize, UnexpectedEOF, ReadFailed };
+
+pub fn read(allocator: std.mem.Allocator, reader: *std.Io.Reader, max_size: u64) ReadError!IconDir {
+ return readInner(allocator, reader, max_size) catch |err| switch (err) {
+ error.OutOfMemory,
+ error.InvalidHeader,
+ error.InvalidImageType,
+ error.ImpossibleDataSize,
+ error.ReadFailed,
+ => |e| return e,
+ error.EndOfStream => error.UnexpectedEOF,
+ };
}
// TODO: This seems like a somewhat strange pattern, could be a better way
// to do this. Maybe it makes more sense to handle the translation
// at the call site instead of having a helper function here.
-pub fn readAnyError(allocator: std.mem.Allocator, reader: anytype, max_size: u64) !IconDir {
- const reserved = try reader.readInt(u16, .little);
+fn readInner(allocator: std.mem.Allocator, reader: *std.Io.Reader, max_size: u64) !IconDir {
+ const reserved = try reader.takeInt(u16, .little);
if (reserved != 0) {
return error.InvalidHeader;
}
- const image_type = reader.readEnum(ImageType, .little) catch |err| switch (err) {
- error.InvalidValue => return error.InvalidImageType,
+ const image_type = reader.takeEnum(ImageType, .little) catch |err| switch (err) {
+ error.InvalidEnumTag => return error.InvalidImageType,
else => |e| return e,
};
- const num_images = try reader.readInt(u16, .little);
+ const num_images = try reader.takeInt(u16, .little);
// To avoid over-allocation in the case of a file that says it has way more
// entries than it actually does, we use an ArrayList with a conservatively
// limited initial capacity instead of allocating the entire slice at once.
const initial_capacity = @min(num_images, 8);
- var entries = try std.array_list.Managed(Entry).initCapacity(allocator, initial_capacity);
- errdefer entries.deinit();
+ var entries = try std.ArrayList(Entry).initCapacity(allocator, initial_capacity);
+ errdefer entries.deinit(allocator);
var i: usize = 0;
while (i < num_images) : (i += 1) {
var entry: Entry = undefined;
- entry.width = try reader.readByte();
- entry.height = try reader.readByte();
- entry.num_colors = try reader.readByte();
- entry.reserved = try reader.readByte();
+ entry.width = try reader.takeByte();
+ entry.height = try reader.takeByte();
+ entry.num_colors = try reader.takeByte();
+ entry.reserved = try reader.takeByte();
switch (image_type) {
.icon => {
entry.type_specific_data = .{ .icon = .{
- .color_planes = try reader.readInt(u16, .little),
- .bits_per_pixel = try reader.readInt(u16, .little),
+ .color_planes = try reader.takeInt(u16, .little),
+ .bits_per_pixel = try reader.takeInt(u16, .little),
} };
},
.cursor => {
entry.type_specific_data = .{ .cursor = .{
- .hotspot_x = try reader.readInt(u16, .little),
- .hotspot_y = try reader.readInt(u16, .little),
+ .hotspot_x = try reader.takeInt(u16, .little),
+ .hotspot_y = try reader.takeInt(u16, .little),
} };
},
}
- entry.data_size_in_bytes = try reader.readInt(u32, .little);
- entry.data_offset_from_start_of_file = try reader.readInt(u32, .little);
+ entry.data_size_in_bytes = try reader.takeInt(u32, .little);
+ entry.data_offset_from_start_of_file = try reader.takeInt(u32, .little);
// Validate that the offset/data size is feasible
if (@as(u64, entry.data_offset_from_start_of_file) + entry.data_size_in_bytes > max_size) {
return error.ImpossibleDataSize;
@@ -101,12 +87,12 @@ pub fn readAnyError(allocator: std.mem.Allocator, reader: anytype, max_size: u64
if (entry.data_size_in_bytes < 16) {
return error.ImpossibleDataSize;
}
- try entries.append(entry);
+ try entries.append(allocator, entry);
}
return .{
.image_type = image_type,
- .entries = try entries.toOwnedSlice(),
+ .entries = try entries.toOwnedSlice(allocator),
.allocator = allocator,
};
}
@@ -135,7 +121,7 @@ pub const IconDir = struct {
return @intCast(IconDir.res_header_byte_len + self.entries.len * Entry.res_byte_len);
}
- pub fn writeResData(self: IconDir, writer: anytype, first_image_id: u16) !void {
+ pub fn writeResData(self: IconDir, writer: *std.Io.Writer, first_image_id: u16) !void {
try writer.writeInt(u16, 0, .little);
try writer.writeInt(u16, @intFromEnum(self.image_type), .little);
// We know that entries.len must fit into a u16
@@ -173,7 +159,7 @@ pub const Entry = struct {
pub const res_byte_len = 14;
- pub fn writeResData(self: Entry, writer: anytype, id: u16) !void {
+ pub fn writeResData(self: Entry, writer: *std.Io.Writer, id: u16) !void {
switch (self.type_specific_data) {
.icon => |icon_data| {
try writer.writeInt(u8, @as(u8, @truncate(self.width)), .little);
@@ -198,8 +184,8 @@ pub const Entry = struct {
test "icon" {
const data = "\x00\x00\x01\x00\x01\x00\x10\x10\x00\x00\x01\x00\x10\x00\x10\x00\x00\x00\x16\x00\x00\x00" ++ [_]u8{0} ** 16;
- var fbs = std.io.fixedBufferStream(data);
- const icon = try read(std.testing.allocator, fbs.reader(), data.len);
+ var fbs: std.Io.Reader = .fixed(data);
+ const icon = try read(std.testing.allocator, &fbs, data.len);
defer icon.deinit();
try std.testing.expectEqual(ImageType.icon, icon.image_type);
@@ -211,26 +197,26 @@ test "icon too many images" {
// it's not possible to hit EOF when looking for more RESDIR structures, since they are
// themselves 16 bytes long, so we'll always hit ImpossibleDataSize instead.
const data = "\x00\x00\x01\x00\x02\x00\x10\x10\x00\x00\x01\x00\x10\x00\x10\x00\x00\x00\x16\x00\x00\x00" ++ [_]u8{0} ** 16;
- var fbs = std.io.fixedBufferStream(data);
- try std.testing.expectError(error.ImpossibleDataSize, read(std.testing.allocator, fbs.reader(), data.len));
+ var fbs: std.Io.Reader = .fixed(data);
+ try std.testing.expectError(error.ImpossibleDataSize, read(std.testing.allocator, &fbs, data.len));
}
test "icon data size past EOF" {
const data = "\x00\x00\x01\x00\x01\x00\x10\x10\x00\x00\x01\x00\x10\x00\x10\x01\x00\x00\x16\x00\x00\x00" ++ [_]u8{0} ** 16;
- var fbs = std.io.fixedBufferStream(data);
- try std.testing.expectError(error.ImpossibleDataSize, read(std.testing.allocator, fbs.reader(), data.len));
+ var fbs: std.Io.Reader = .fixed(data);
+ try std.testing.expectError(error.ImpossibleDataSize, read(std.testing.allocator, &fbs, data.len));
}
test "icon data offset past EOF" {
const data = "\x00\x00\x01\x00\x01\x00\x10\x10\x00\x00\x01\x00\x10\x00\x10\x00\x00\x00\x17\x00\x00\x00" ++ [_]u8{0} ** 16;
- var fbs = std.io.fixedBufferStream(data);
- try std.testing.expectError(error.ImpossibleDataSize, read(std.testing.allocator, fbs.reader(), data.len));
+ var fbs: std.Io.Reader = .fixed(data);
+ try std.testing.expectError(error.ImpossibleDataSize, read(std.testing.allocator, &fbs, data.len));
}
test "icon data size too small" {
const data = "\x00\x00\x01\x00\x01\x00\x10\x10\x00\x00\x01\x00\x10\x00\x0F\x00\x00\x00\x16\x00\x00\x00";
- var fbs = std.io.fixedBufferStream(data);
- try std.testing.expectError(error.ImpossibleDataSize, read(std.testing.allocator, fbs.reader(), data.len));
+ var fbs: std.Io.Reader = .fixed(data);
+ try std.testing.expectError(error.ImpossibleDataSize, read(std.testing.allocator, &fbs, data.len));
}
pub const ImageFormat = enum(u2) {
diff --git a/lib/compiler/resinator/lang.zig b/lib/compiler/resinator/lang.zig
index 769733ceba..c3ddc32fb2 100644
--- a/lib/compiler/resinator/lang.zig
+++ b/lib/compiler/resinator/lang.zig
@@ -119,6 +119,7 @@ test tagToId {
}
test "exhaustive tagToId" {
+ @setEvalBranchQuota(2000);
inline for (@typeInfo(LanguageId).@"enum".fields) |field| {
const id = tagToId(field.name) catch |err| {
std.debug.print("tag: {s}\n", .{field.name});
@@ -131,8 +132,8 @@ test "exhaustive tagToId" {
}
var buf: [32]u8 = undefined;
inline for (valid_alternate_sorts) |parsed_sort| {
- var fbs = std.io.fixedBufferStream(&buf);
- const writer = fbs.writer();
+ var fbs: std.Io.Writer = .fixed(&buf);
+ const writer = &fbs;
writer.writeAll(parsed_sort.language_code) catch unreachable;
writer.writeAll("-") catch unreachable;
writer.writeAll(parsed_sort.country_code.?) catch unreachable;
@@ -146,12 +147,12 @@ test "exhaustive tagToId" {
break :field name_buf;
};
const expected = @field(LanguageId, &expected_field_name);
- const id = tagToId(fbs.getWritten()) catch |err| {
- std.debug.print("tag: {s}\n", .{fbs.getWritten()});
+ const id = tagToId(fbs.buffered()) catch |err| {
+ std.debug.print("tag: {s}\n", .{fbs.buffered()});
return err;
};
try std.testing.expectEqual(expected, id orelse {
- std.debug.print("tag: {s}, expected: {}, got null\n", .{ fbs.getWritten(), expected });
+ std.debug.print("tag: {s}, expected: {}, got null\n", .{ fbs.buffered(), expected });
return error.TestExpectedEqual;
});
}
diff --git a/lib/compiler/resinator/literals.zig b/lib/compiler/resinator/literals.zig
index bdf4f882fb..c994d5fd89 100644
--- a/lib/compiler/resinator/literals.zig
+++ b/lib/compiler/resinator/literals.zig
@@ -469,8 +469,8 @@ pub fn parseQuotedString(
const T = if (literal_type == .ascii) u8 else u16;
std.debug.assert(bytes.slice.len >= 2); // must at least have 2 double quote chars
- var buf = try std.array_list.Managed(T).initCapacity(allocator, bytes.slice.len);
- errdefer buf.deinit();
+ var buf = try std.ArrayList(T).initCapacity(allocator, bytes.slice.len);
+ errdefer buf.deinit(allocator);
var iterative_parser = IterativeStringParser.init(bytes, options);
@@ -480,13 +480,13 @@ pub fn parseQuotedString(
.ascii => switch (options.output_code_page) {
.windows1252 => {
if (parsed.from_escaped_integer) {
- try buf.append(@truncate(c));
+ try buf.append(allocator, @truncate(c));
} else if (windows1252.bestFitFromCodepoint(c)) |best_fit| {
- try buf.append(best_fit);
+ try buf.append(allocator, best_fit);
} else if (c < 0x10000 or c == code_pages.Codepoint.invalid) {
- try buf.append('?');
+ try buf.append(allocator, '?');
} else {
- try buf.appendSlice("??");
+ try buf.appendSlice(allocator, "??");
}
},
.utf8 => {
@@ -500,35 +500,35 @@ pub fn parseQuotedString(
}
var utf8_buf: [4]u8 = undefined;
const utf8_len = std.unicode.utf8Encode(codepoint_to_encode, &utf8_buf) catch unreachable;
- try buf.appendSlice(utf8_buf[0..utf8_len]);
+ try buf.appendSlice(allocator, utf8_buf[0..utf8_len]);
},
},
.wide => {
// Parsing any string type as a wide string is handled separately, see parseQuotedStringAsWideString
std.debug.assert(iterative_parser.declared_string_type == .wide);
if (parsed.from_escaped_integer) {
- try buf.append(std.mem.nativeToLittle(u16, @truncate(c)));
+ try buf.append(allocator, std.mem.nativeToLittle(u16, @truncate(c)));
} else if (c == code_pages.Codepoint.invalid) {
- try buf.append(std.mem.nativeToLittle(u16, '�'));
+ try buf.append(allocator, std.mem.nativeToLittle(u16, '�'));
} else if (c < 0x10000) {
const short: u16 = @intCast(c);
- try buf.append(std.mem.nativeToLittle(u16, short));
+ try buf.append(allocator, std.mem.nativeToLittle(u16, short));
} else {
if (!parsed.escaped_surrogate_pair) {
const high = @as(u16, @intCast((c - 0x10000) >> 10)) + 0xD800;
- try buf.append(std.mem.nativeToLittle(u16, high));
+ try buf.append(allocator, std.mem.nativeToLittle(u16, high));
}
const low = @as(u16, @intCast(c & 0x3FF)) + 0xDC00;
- try buf.append(std.mem.nativeToLittle(u16, low));
+ try buf.append(allocator, std.mem.nativeToLittle(u16, low));
}
},
}
}
if (literal_type == .wide) {
- return buf.toOwnedSliceSentinel(0);
+ return buf.toOwnedSliceSentinel(allocator, 0);
} else {
- return buf.toOwnedSlice();
+ return buf.toOwnedSlice(allocator);
}
}
@@ -564,8 +564,8 @@ pub fn parseQuotedStringAsWideString(allocator: std.mem.Allocator, bytes: Source
// Note: We're only handling the case of parsing an ASCII string into a wide string from here on out.
// TODO: The logic below is similar to that in AcceleratorKeyCodepointTranslator, might be worth merging the two
- var buf = try std.array_list.Managed(u16).initCapacity(allocator, bytes.slice.len);
- errdefer buf.deinit();
+ var buf = try std.ArrayList(u16).initCapacity(allocator, bytes.slice.len);
+ errdefer buf.deinit(allocator);
var iterative_parser = IterativeStringParser.init(bytes, options);
@@ -578,23 +578,23 @@ pub fn parseQuotedStringAsWideString(allocator: std.mem.Allocator, bytes: Source
.windows1252 => windows1252.toCodepoint(byte_to_interpret),
.utf8 => if (byte_to_interpret > 0x7F) '�' else byte_to_interpret,
};
- try buf.append(std.mem.nativeToLittle(u16, code_unit_to_encode));
+ try buf.append(allocator, std.mem.nativeToLittle(u16, code_unit_to_encode));
} else if (c == code_pages.Codepoint.invalid) {
- try buf.append(std.mem.nativeToLittle(u16, '�'));
+ try buf.append(allocator, std.mem.nativeToLittle(u16, '�'));
} else if (c < 0x10000) {
const short: u16 = @intCast(c);
- try buf.append(std.mem.nativeToLittle(u16, short));
+ try buf.append(allocator, std.mem.nativeToLittle(u16, short));
} else {
if (!parsed.escaped_surrogate_pair) {
const high = @as(u16, @intCast((c - 0x10000) >> 10)) + 0xD800;
- try buf.append(std.mem.nativeToLittle(u16, high));
+ try buf.append(allocator, std.mem.nativeToLittle(u16, high));
}
const low = @as(u16, @intCast(c & 0x3FF)) + 0xDC00;
- try buf.append(std.mem.nativeToLittle(u16, low));
+ try buf.append(allocator, std.mem.nativeToLittle(u16, low));
}
}
- return buf.toOwnedSliceSentinel(0);
+ return buf.toOwnedSliceSentinel(allocator, 0);
}
test "parse quoted ascii string" {
diff --git a/lib/compiler/resinator/main.zig b/lib/compiler/resinator/main.zig
index 6e7315e771..0ab3841767 100644
--- a/lib/compiler/resinator/main.zig
+++ b/lib/compiler/resinator/main.zig
@@ -3,6 +3,7 @@ const builtin = @import("builtin");
const removeComments = @import("comments.zig").removeComments;
const parseAndRemoveLineCommands = @import("source_mapping.zig").parseAndRemoveLineCommands;
const compile = @import("compile.zig").compile;
+const Dependencies = @import("compile.zig").Dependencies;
const Diagnostics = @import("errors.zig").Diagnostics;
const cli = @import("cli.zig");
const preprocess = @import("preprocess.zig");
@@ -13,8 +14,6 @@ const hasDisjointCodePage = @import("disjoint_code_page.zig").hasDisjointCodePag
const fmtResourceType = @import("res.zig").NameOrOrdinal.fmtResourceType;
const aro = @import("aro");
-var stdout_buffer: [1024]u8 = undefined;
-
pub fn main() !void {
var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init;
defer std.debug.assert(gpa.deinit() == .ok);
@@ -43,11 +42,13 @@ pub fn main() !void {
cli_args = args[3..];
}
- var stdout_writer2 = std.fs.File.stdout().writer(&stdout_buffer);
+ var stdout_buffer: [1024]u8 = undefined;
+ var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer);
+ const stdout = &stdout_writer.interface;
var error_handler: ErrorHandler = switch (zig_integration) {
true => .{
.server = .{
- .out = &stdout_writer2.interface,
+ .out = stdout,
.in = undefined, // won't be receiving messages
},
},
@@ -83,28 +84,23 @@ pub fn main() !void {
defer options.deinit();
if (options.print_help_and_exit) {
- const stdout = std.fs.File.stdout();
- try cli.writeUsage(stdout.deprecatedWriter(), "zig rc");
+ try cli.writeUsage(stdout, "zig rc");
+ try stdout.flush();
return;
}
// Don't allow verbose when integrating with Zig via stdout
options.verbose = false;
- const stdout_writer = std.fs.File.stdout().deprecatedWriter();
if (options.verbose) {
- try options.dumpVerbose(stdout_writer);
- try stdout_writer.writeByte('\n');
+ try options.dumpVerbose(stdout);
+ try stdout.writeByte('\n');
+ try stdout.flush();
}
- var dependencies_list = std.array_list.Managed([]const u8).init(allocator);
- defer {
- for (dependencies_list.items) |item| {
- allocator.free(item);
- }
- dependencies_list.deinit();
- }
- const maybe_dependencies_list: ?*std.array_list.Managed([]const u8) = if (options.depfile_path != null) &dependencies_list else null;
+ var dependencies = Dependencies.init(allocator);
+ defer dependencies.deinit();
+ const maybe_dependencies: ?*Dependencies = if (options.depfile_path != null) &dependencies else null;
var include_paths = LazyIncludePaths{
.arena = arena,
@@ -115,7 +111,7 @@ pub fn main() !void {
const full_input = full_input: {
if (options.input_format == .rc and options.preprocess != .no) {
- var preprocessed_buf = std.array_list.Managed(u8).init(allocator);
+ var preprocessed_buf: std.Io.Writer.Allocating = .init(allocator);
errdefer preprocessed_buf.deinit();
// We're going to throw away everything except the final preprocessed output anyway,
@@ -127,26 +123,27 @@ pub fn main() !void {
var comp = aro.Compilation.init(aro_arena, std.fs.cwd());
defer comp.deinit();
- var argv = std.array_list.Managed([]const u8).init(comp.gpa);
- defer argv.deinit();
+ var argv: std.ArrayList([]const u8) = .empty;
+ defer argv.deinit(aro_arena);
- try argv.append("arocc"); // dummy command name
+ try argv.append(aro_arena, "arocc"); // dummy command name
const resolved_include_paths = try include_paths.get(&error_handler);
try preprocess.appendAroArgs(aro_arena, &argv, options, resolved_include_paths);
- try argv.append(switch (options.input_source) {
+ try argv.append(aro_arena, switch (options.input_source) {
.stdio => "-",
.filename => |filename| filename,
});
if (options.verbose) {
- try stdout_writer.writeAll("Preprocessor: arocc (built-in)\n");
+ try stdout.writeAll("Preprocessor: arocc (built-in)\n");
for (argv.items[0 .. argv.items.len - 1]) |arg| {
- try stdout_writer.print("{s} ", .{arg});
+ try stdout.print("{s} ", .{arg});
}
- try stdout_writer.print("{s}\n\n", .{argv.items[argv.items.len - 1]});
+ try stdout.print("{s}\n\n", .{argv.items[argv.items.len - 1]});
+ try stdout.flush();
}
- preprocess.preprocess(&comp, preprocessed_buf.writer(), argv.items, maybe_dependencies_list) catch |err| switch (err) {
+ preprocess.preprocess(&comp, &preprocessed_buf.writer, argv.items, maybe_dependencies) catch |err| switch (err) {
error.GeneratedSourceError => {
try error_handler.emitAroDiagnostics(allocator, "failed during preprocessor setup (this is always a bug):", &comp);
std.process.exit(1);
@@ -249,14 +246,15 @@ pub fn main() !void {
defer diagnostics.deinit();
var output_buffer: [4096]u8 = undefined;
- var res_stream_writer = res_stream.source.writer(allocator).adaptToNewApi(&output_buffer);
- const output_buffered_stream = &res_stream_writer.new_interface;
+ var res_stream_writer = res_stream.source.writer(allocator, &output_buffer);
+ defer res_stream_writer.deinit(&res_stream.source);
+ const output_buffered_stream = res_stream_writer.interface();
compile(allocator, final_input, output_buffered_stream, .{
.cwd = std.fs.cwd(),
.diagnostics = &diagnostics,
.source_mappings = &mapping_results.mappings,
- .dependencies_list = maybe_dependencies_list,
+ .dependencies = maybe_dependencies,
.ignore_include_env_var = options.ignore_include_env_var,
.extra_include_paths = options.extra_include_paths.items,
.system_include_paths = try include_paths.get(&error_handler),
@@ -303,7 +301,7 @@ pub fn main() !void {
};
try write_stream.beginArray();
- for (dependencies_list.items) |dep_path| {
+ for (dependencies.list.items) |dep_path| {
try write_stream.write(dep_path);
}
try write_stream.endArray();
@@ -342,10 +340,10 @@ pub fn main() !void {
defer coff_stream.deinit(allocator);
var coff_output_buffer: [4096]u8 = undefined;
- var coff_output_buffered_stream = coff_stream.source.writer(allocator).adaptToNewApi(&coff_output_buffer);
+ var coff_output_buffered_stream = coff_stream.source.writer(allocator, &coff_output_buffer);
var cvtres_diagnostics: cvtres.Diagnostics = .{ .none = {} };
- cvtres.writeCoff(allocator, &coff_output_buffered_stream.new_interface, resources.list.items, options.coff_options, &cvtres_diagnostics) catch |err| {
+ cvtres.writeCoff(allocator, coff_output_buffered_stream.interface(), resources.list.items, options.coff_options, &cvtres_diagnostics) catch |err| {
switch (err) {
error.DuplicateResource => {
const duplicate_resource = resources.list.items[cvtres_diagnostics.duplicate_resource];
@@ -382,7 +380,7 @@ pub fn main() !void {
std.process.exit(1);
};
- try coff_output_buffered_stream.new_interface.flush();
+ try coff_output_buffered_stream.interface().flush();
}
const IoStream = struct {
@@ -425,7 +423,7 @@ const IoStream = struct {
pub const Source = union(enum) {
file: std.fs.File,
stdio: std.fs.File,
- memory: std.ArrayListUnmanaged(u8),
+ memory: std.ArrayList(u8),
/// The source has been closed and any usage of the Source in this state is illegal (except deinit).
closed: void,
@@ -472,26 +470,34 @@ const IoStream = struct {
};
}
- pub const WriterContext = struct {
- self: *Source,
- allocator: std.mem.Allocator,
- };
- pub const WriteError = std.mem.Allocator.Error || std.fs.File.WriteError;
- pub const Writer = std.io.GenericWriter(WriterContext, WriteError, write);
-
- pub fn write(ctx: WriterContext, bytes: []const u8) WriteError!usize {
- switch (ctx.self.*) {
- inline .file, .stdio => |file| return file.write(bytes),
- .memory => |*list| {
- try list.appendSlice(ctx.allocator, bytes);
- return bytes.len;
- },
- .closed => unreachable,
+ pub const Writer = union(enum) {
+ file: std.fs.File.Writer,
+ allocating: std.Io.Writer.Allocating,
+
+ pub const Error = std.mem.Allocator.Error || std.fs.File.WriteError;
+
+ pub fn interface(this: *@This()) *std.Io.Writer {
+ return switch (this.*) {
+ .file => |*fw| &fw.interface,
+ .allocating => |*a| &a.writer,
+ };
}
- }
- pub fn writer(self: *Source, allocator: std.mem.Allocator) Writer {
- return .{ .context = .{ .self = self, .allocator = allocator } };
+ pub fn deinit(this: *@This(), source: *Source) void {
+ switch (this.*) {
+ .file => {},
+ .allocating => |*a| source.memory = a.toArrayList(),
+ }
+ this.* = undefined;
+ }
+ };
+
+ pub fn writer(source: *Source, allocator: std.mem.Allocator, buffer: []u8) Writer {
+ return switch (source.*) {
+ .file, .stdio => |file| .{ .file = file.writer(buffer) },
+ .memory => |*list| .{ .allocating = .fromArrayList(allocator, list) },
+ .closed => unreachable,
+ };
}
};
};
@@ -721,7 +727,7 @@ fn cliDiagnosticsToErrorBundle(
});
var cur_err: ?ErrorBundle.ErrorMessage = null;
- var cur_notes: std.ArrayListUnmanaged(ErrorBundle.ErrorMessage) = .empty;
+ var cur_notes: std.ArrayList(ErrorBundle.ErrorMessage) = .empty;
defer cur_notes.deinit(gpa);
for (diagnostics.errors.items) |err_details| {
switch (err_details.type) {
@@ -763,10 +769,10 @@ fn diagnosticsToErrorBundle(
try bundle.init(gpa);
errdefer bundle.deinit();
- var msg_buf: std.ArrayListUnmanaged(u8) = .empty;
- defer msg_buf.deinit(gpa);
+ var msg_buf: std.Io.Writer.Allocating = .init(gpa);
+ defer msg_buf.deinit();
var cur_err: ?ErrorBundle.ErrorMessage = null;
- var cur_notes: std.ArrayListUnmanaged(ErrorBundle.ErrorMessage) = .empty;
+ var cur_notes: std.ArrayList(ErrorBundle.ErrorMessage) = .empty;
defer cur_notes.deinit(gpa);
for (diagnostics.errors.items) |err_details| {
switch (err_details.type) {
@@ -789,7 +795,7 @@ fn diagnosticsToErrorBundle(
const column = err_details.token.calculateColumn(source, 1, source_line_start) + 1;
msg_buf.clearRetainingCapacity();
- try err_details.render(msg_buf.writer(gpa), source, diagnostics.strings.items);
+ try err_details.render(&msg_buf.writer, source, diagnostics.strings.items);
const src_loc = src_loc: {
var src_loc: ErrorBundle.SourceLocation = .{
@@ -817,7 +823,7 @@ fn diagnosticsToErrorBundle(
try flushErrorMessageIntoBundle(&bundle, err, cur_notes.items);
}
cur_err = .{
- .msg = try bundle.addString(msg_buf.items),
+ .msg = try bundle.addString(msg_buf.written()),
.src_loc = src_loc,
};
cur_notes.clearRetainingCapacity();
@@ -825,7 +831,7 @@ fn diagnosticsToErrorBundle(
.note => {
cur_err.?.notes_len += 1;
try cur_notes.append(gpa, .{
- .msg = try bundle.addString(msg_buf.items),
+ .msg = try bundle.addString(msg_buf.written()),
.src_loc = src_loc,
});
},
@@ -876,7 +882,7 @@ fn aroDiagnosticsToErrorBundle(
var msg_writer = MsgWriter.init(gpa);
defer msg_writer.deinit();
var cur_err: ?ErrorBundle.ErrorMessage = null;
- var cur_notes: std.ArrayListUnmanaged(ErrorBundle.ErrorMessage) = .empty;
+ var cur_notes: std.ArrayList(ErrorBundle.ErrorMessage) = .empty;
defer cur_notes.deinit(gpa);
for (comp.diagnostics.list.items) |msg| {
switch (msg.kind) {
@@ -971,11 +977,11 @@ const MsgWriter = struct {
}
pub fn print(m: *MsgWriter, comptime fmt: []const u8, args: anytype) void {
- m.buf.writer().print(fmt, args) catch {};
+ m.buf.print(fmt, args) catch {};
}
pub fn write(m: *MsgWriter, msg: []const u8) void {
- m.buf.writer().writeAll(msg) catch {};
+ m.buf.appendSlice(msg) catch {};
}
pub fn setColor(m: *MsgWriter, color: std.io.tty.Color) void {
diff --git a/lib/compiler/resinator/parse.zig b/lib/compiler/resinator/parse.zig
index 285b5da843..cc3bc4ae3c 100644
--- a/lib/compiler/resinator/parse.zig
+++ b/lib/compiler/resinator/parse.zig
@@ -82,8 +82,8 @@ pub const Parser = struct {
}
fn parseRoot(self: *Self) Error!*Node {
- var statements = std.array_list.Managed(*Node).init(self.state.allocator);
- defer statements.deinit();
+ var statements: std.ArrayList(*Node) = .empty;
+ defer statements.deinit(self.state.allocator);
try self.parseStatements(&statements);
try self.check(.eof);
@@ -95,7 +95,7 @@ pub const Parser = struct {
return &node.base;
}
- fn parseStatements(self: *Self, statements: *std.array_list.Managed(*Node)) Error!void {
+ fn parseStatements(self: *Self, statements: *std.ArrayList(*Node)) Error!void {
while (true) {
try self.nextToken(.whitespace_delimiter_only);
if (self.state.token.id == .eof) break;
@@ -105,7 +105,7 @@ pub const Parser = struct {
// (usually it will end up with bogus things like 'file
// not found: {')
const statement = try self.parseStatement();
- try statements.append(statement);
+ try statements.append(self.state.allocator, statement);
}
}
@@ -115,7 +115,7 @@ pub const Parser = struct {
/// current token is unchanged.
/// The returned slice is allocated by the parser's arena
fn parseCommonResourceAttributes(self: *Self) ![]Token {
- var common_resource_attributes: std.ArrayListUnmanaged(Token) = .empty;
+ var common_resource_attributes: std.ArrayList(Token) = .empty;
while (true) {
const maybe_common_resource_attribute = try self.lookaheadToken(.normal);
if (maybe_common_resource_attribute.id == .literal and rc.CommonResourceAttributes.map.has(maybe_common_resource_attribute.slice(self.lexer.buffer))) {
@@ -135,7 +135,7 @@ pub const Parser = struct {
/// current token is unchanged.
/// The returned slice is allocated by the parser's arena
fn parseOptionalStatements(self: *Self, resource: ResourceType) ![]*Node {
- var optional_statements: std.ArrayListUnmanaged(*Node) = .empty;
+ var optional_statements: std.ArrayList(*Node) = .empty;
const num_statement_types = @typeInfo(rc.OptionalStatements).@"enum".fields.len;
var statement_type_has_duplicates = [_]bool{false} ** num_statement_types;
@@ -355,8 +355,8 @@ pub const Parser = struct {
const begin_token = self.state.token;
try self.check(.begin);
- var strings = std.array_list.Managed(*Node).init(self.state.allocator);
- defer strings.deinit();
+ var strings: std.ArrayList(*Node) = .empty;
+ defer strings.deinit(self.state.allocator);
while (true) {
const maybe_end_token = try self.lookaheadToken(.normal);
switch (maybe_end_token.id) {
@@ -392,7 +392,7 @@ pub const Parser = struct {
.maybe_comma = comma_token,
.string = self.state.token,
};
- try strings.append(&string_node.base);
+ try strings.append(self.state.allocator, &string_node.base);
}
if (strings.items.len == 0) {
@@ -501,7 +501,7 @@ pub const Parser = struct {
const begin_token = self.state.token;
try self.check(.begin);
- var accelerators: std.ArrayListUnmanaged(*Node) = .empty;
+ var accelerators: std.ArrayList(*Node) = .empty;
while (true) {
const lookahead = try self.lookaheadToken(.normal);
@@ -519,7 +519,7 @@ pub const Parser = struct {
const idvalue = try self.parseExpression(.{ .allowed_types = .{ .number = true } });
- var type_and_options: std.ArrayListUnmanaged(Token) = .empty;
+ var type_and_options: std.ArrayList(Token) = .empty;
while (true) {
if (!(try self.parseOptionalToken(.comma))) break;
@@ -584,7 +584,7 @@ pub const Parser = struct {
const begin_token = self.state.token;
try self.check(.begin);
- var controls: std.ArrayListUnmanaged(*Node) = .empty;
+ var controls: std.ArrayList(*Node) = .empty;
defer controls.deinit(self.state.allocator);
while (try self.parseControlStatement(resource)) |control_node| {
// The number of controls must fit in a u16 in order for it to
@@ -643,7 +643,7 @@ pub const Parser = struct {
const begin_token = self.state.token;
try self.check(.begin);
- var buttons: std.ArrayListUnmanaged(*Node) = .empty;
+ var buttons: std.ArrayList(*Node) = .empty;
defer buttons.deinit(self.state.allocator);
while (try self.parseToolbarButtonStatement()) |button_node| {
// The number of buttons must fit in a u16 in order for it to
@@ -701,7 +701,7 @@ pub const Parser = struct {
const begin_token = self.state.token;
try self.check(.begin);
- var items: std.ArrayListUnmanaged(*Node) = .empty;
+ var items: std.ArrayList(*Node) = .empty;
defer items.deinit(self.state.allocator);
while (try self.parseMenuItemStatement(resource, id_token, 1)) |item_node| {
try items.append(self.state.allocator, item_node);
@@ -735,7 +735,7 @@ pub const Parser = struct {
// common resource attributes must all be contiguous and come before optional-statements
const common_resource_attributes = try self.parseCommonResourceAttributes();
- var fixed_info: std.ArrayListUnmanaged(*Node) = .empty;
+ var fixed_info: std.ArrayList(*Node) = .empty;
while (try self.parseVersionStatement()) |version_statement| {
try fixed_info.append(self.state.arena, version_statement);
}
@@ -744,7 +744,7 @@ pub const Parser = struct {
const begin_token = self.state.token;
try self.check(.begin);
- var block_statements: std.ArrayListUnmanaged(*Node) = .empty;
+ var block_statements: std.ArrayList(*Node) = .empty;
while (try self.parseVersionBlockOrValue(id_token, 1)) |block_node| {
try block_statements.append(self.state.arena, block_node);
}
@@ -852,8 +852,8 @@ pub const Parser = struct {
/// Expects the current token to be a begin token.
/// After return, the current token will be the end token.
fn parseRawDataBlock(self: *Self) Error![]*Node {
- var raw_data = std.array_list.Managed(*Node).init(self.state.allocator);
- defer raw_data.deinit();
+ var raw_data: std.ArrayList(*Node) = .empty;
+ defer raw_data.deinit(self.state.allocator);
while (true) {
const maybe_end_token = try self.lookaheadToken(.normal);
switch (maybe_end_token.id) {
@@ -888,7 +888,7 @@ pub const Parser = struct {
else => {},
}
const expression = try self.parseExpression(.{ .allowed_types = .{ .number = true, .string = true } });
- try raw_data.append(expression);
+ try raw_data.append(self.state.allocator, expression);
if (expression.isNumberExpression()) {
const maybe_close_paren = try self.lookaheadToken(.normal);
@@ -1125,7 +1125,7 @@ pub const Parser = struct {
_ = try self.parseOptionalToken(.comma);
- var options: std.ArrayListUnmanaged(Token) = .empty;
+ var options: std.ArrayList(Token) = .empty;
while (true) {
const option_token = try self.lookaheadToken(.normal);
if (!rc.MenuItem.Option.map.has(option_token.slice(self.lexer.buffer))) {
@@ -1160,7 +1160,7 @@ pub const Parser = struct {
}
try self.skipAnyCommas();
- var options: std.ArrayListUnmanaged(Token) = .empty;
+ var options: std.ArrayList(Token) = .empty;
while (true) {
const option_token = try self.lookaheadToken(.normal);
if (!rc.MenuItem.Option.map.has(option_token.slice(self.lexer.buffer))) {
@@ -1175,7 +1175,7 @@ pub const Parser = struct {
const begin_token = self.state.token;
try self.check(.begin);
- var items: std.ArrayListUnmanaged(*Node) = .empty;
+ var items: std.ArrayList(*Node) = .empty;
while (try self.parseMenuItemStatement(resource, top_level_menu_id_token, nesting_level + 1)) |item_node| {
try items.append(self.state.arena, item_node);
}
@@ -1245,7 +1245,7 @@ pub const Parser = struct {
const begin_token = self.state.token;
try self.check(.begin);
- var items: std.ArrayListUnmanaged(*Node) = .empty;
+ var items: std.ArrayList(*Node) = .empty;
while (try self.parseMenuItemStatement(resource, top_level_menu_id_token, nesting_level + 1)) |item_node| {
try items.append(self.state.arena, item_node);
}
@@ -1322,7 +1322,7 @@ pub const Parser = struct {
switch (statement_type) {
.file_version, .product_version => {
var parts_buffer: [4]*Node = undefined;
- var parts = std.ArrayListUnmanaged(*Node).initBuffer(&parts_buffer);
+ var parts = std.ArrayList(*Node).initBuffer(&parts_buffer);
while (true) {
const value = try self.parseExpression(.{ .allowed_types = .{ .number = true } });
@@ -1402,7 +1402,7 @@ pub const Parser = struct {
const begin_token = self.state.token;
try self.check(.begin);
- var children: std.ArrayListUnmanaged(*Node) = .empty;
+ var children: std.ArrayList(*Node) = .empty;
while (try self.parseVersionBlockOrValue(top_level_version_id_token, nesting_level + 1)) |value_node| {
try children.append(self.state.arena, value_node);
}
@@ -1435,7 +1435,7 @@ pub const Parser = struct {
}
fn parseBlockValuesList(self: *Self, had_comma_before_first_value: bool) Error![]*Node {
- var values: std.ArrayListUnmanaged(*Node) = .empty;
+ var values: std.ArrayList(*Node) = .empty;
var seen_number: bool = false;
var first_string_value: ?*Node = null;
while (true) {
diff --git a/lib/compiler/resinator/preprocess.zig b/lib/compiler/resinator/preprocess.zig
index ff6de00461..d14812a05d 100644
--- a/lib/compiler/resinator/preprocess.zig
+++ b/lib/compiler/resinator/preprocess.zig
@@ -2,28 +2,32 @@ const std = @import("std");
const builtin = @import("builtin");
const Allocator = std.mem.Allocator;
const cli = @import("cli.zig");
+const Dependencies = @import("compile.zig").Dependencies;
const aro = @import("aro");
const PreprocessError = error{ ArgError, GeneratedSourceError, PreprocessError, StreamTooLong, OutOfMemory };
pub fn preprocess(
comp: *aro.Compilation,
- writer: anytype,
+ writer: *std.Io.Writer,
/// Expects argv[0] to be the command name
argv: []const []const u8,
- maybe_dependencies_list: ?*std.array_list.Managed([]const u8),
+ maybe_dependencies: ?*Dependencies,
) PreprocessError!void {
try comp.addDefaultPragmaHandlers();
var driver: aro.Driver = .{ .comp = comp, .aro_name = "arocc" };
defer driver.deinit();
- var macro_buf = std.array_list.Managed(u8).init(comp.gpa);
+ var macro_buf: std.Io.Writer.Allocating = .init(comp.gpa);
defer macro_buf.deinit();
- _ = driver.parseArgs(std.io.null_writer, macro_buf.writer(), argv) catch |err| switch (err) {
+ var trash: [64]u8 = undefined;
+ var discarding: std.Io.Writer.Discarding = .init(&trash);
+ _ = driver.parseArgs(&discarding.writer, &macro_buf.writer, argv) catch |err| switch (err) {
error.FatalError => return error.ArgError,
error.OutOfMemory => |e| return e,
+ error.WriteFailed => return error.OutOfMemory,
};
if (hasAnyErrors(comp)) return error.ArgError;
@@ -33,7 +37,7 @@ pub fn preprocess(
error.FatalError => return error.GeneratedSourceError,
else => |e| return e,
};
- const user_macros = comp.addSourceFromBuffer("<command line>", macro_buf.items) catch |err| switch (err) {
+ const user_macros = comp.addSourceFromBuffer("<command line>", macro_buf.written()) catch |err| switch (err) {
error.FatalError => return error.GeneratedSourceError,
else => |e| return e,
};
@@ -59,15 +63,17 @@ pub fn preprocess(
if (hasAnyErrors(comp)) return error.PreprocessError;
- try pp.prettyPrintTokens(writer, .result_only);
+ pp.prettyPrintTokens(writer, .result_only) catch |err| switch (err) {
+ error.WriteFailed => return error.OutOfMemory,
+ };
- if (maybe_dependencies_list) |dependencies_list| {
+ if (maybe_dependencies) |dependencies| {
for (comp.sources.values()) |comp_source| {
if (comp_source.id == builtin_macros.id or comp_source.id == user_macros.id) continue;
if (comp_source.id == .unused or comp_source.id == .generated) continue;
- const duped_path = try dependencies_list.allocator.dupe(u8, comp_source.path);
- errdefer dependencies_list.allocator.free(duped_path);
- try dependencies_list.append(duped_path);
+ const duped_path = try dependencies.allocator.dupe(u8, comp_source.path);
+ errdefer dependencies.allocator.free(duped_path);
+ try dependencies.list.append(dependencies.allocator, duped_path);
}
}
}
@@ -87,8 +93,8 @@ fn hasAnyErrors(comp: *aro.Compilation) bool {
/// `arena` is used for temporary -D argument strings and the INCLUDE environment variable.
/// The arena should be kept alive at least as long as `argv`.
-pub fn appendAroArgs(arena: Allocator, argv: *std.array_list.Managed([]const u8), options: cli.Options, system_include_paths: []const []const u8) !void {
- try argv.appendSlice(&.{
+pub fn appendAroArgs(arena: Allocator, argv: *std.ArrayList([]const u8), options: cli.Options, system_include_paths: []const []const u8) !void {
+ try argv.appendSlice(arena, &.{
"-E",
"--comments",
"-fuse-line-directives",
@@ -99,13 +105,13 @@ pub fn appendAroArgs(arena: Allocator, argv: *std.array_list.Managed([]const u8)
"-D_WIN32", // undocumented, but defined by default
});
for (options.extra_include_paths.items) |extra_include_path| {
- try argv.append("-I");
- try argv.append(extra_include_path);
+ try argv.append(arena, "-I");
+ try argv.append(arena, extra_include_path);
}
for (system_include_paths) |include_path| {
- try argv.append("-isystem");
- try argv.append(include_path);
+ try argv.append(arena, "-isystem");
+ try argv.append(arena, include_path);
}
if (!options.ignore_include_env_var) {
@@ -119,8 +125,8 @@ pub fn appendAroArgs(arena: Allocator, argv: *std.array_list.Managed([]const u8)
};
var it = std.mem.tokenizeScalar(u8, INCLUDE, delimiter);
while (it.next()) |include_path| {
- try argv.append("-isystem");
- try argv.append(include_path);
+ try argv.append(arena, "-isystem");
+ try argv.append(arena, include_path);
}
}
@@ -128,13 +134,13 @@ pub fn appendAroArgs(arena: Allocator, argv: *std.array_list.Managed([]const u8)
while (symbol_it.next()) |entry| {
switch (entry.value_ptr.*) {
.define => |value| {
- try argv.append("-D");
+ try argv.append(arena, "-D");
const define_arg = try std.fmt.allocPrint(arena, "{s}={s}", .{ entry.key_ptr.*, value });
- try argv.append(define_arg);
+ try argv.append(arena, define_arg);
},
.undefine => {
- try argv.append("-U");
- try argv.append(entry.key_ptr.*);
+ try argv.append(arena, "-U");
+ try argv.append(arena, entry.key_ptr.*);
},
}
}
diff --git a/lib/compiler/resinator/res.zig b/lib/compiler/resinator/res.zig
index ba5b203451..9d8347afff 100644
--- a/lib/compiler/resinator/res.zig
+++ b/lib/compiler/resinator/res.zig
@@ -258,7 +258,7 @@ pub const NameOrOrdinal = union(enum) {
}
}
- pub fn write(self: NameOrOrdinal, writer: anytype) !void {
+ pub fn write(self: NameOrOrdinal, writer: *std.Io.Writer) !void {
switch (self) {
.name => |name| {
try writer.writeAll(std.mem.sliceAsBytes(name[0 .. name.len + 1]));
@@ -270,7 +270,7 @@ pub const NameOrOrdinal = union(enum) {
}
}
- pub fn writeEmpty(writer: anytype) !void {
+ pub fn writeEmpty(writer: *std.Io.Writer) !void {
try writer.writeInt(u16, 0, .little);
}
@@ -283,8 +283,8 @@ pub const NameOrOrdinal = union(enum) {
pub fn nameFromString(allocator: Allocator, bytes: SourceBytes) !NameOrOrdinal {
// Names have a limit of 256 UTF-16 code units + null terminator
- var buf = try std.array_list.Managed(u16).initCapacity(allocator, @min(257, bytes.slice.len));
- errdefer buf.deinit();
+ var buf = try std.ArrayList(u16).initCapacity(allocator, @min(257, bytes.slice.len));
+ errdefer buf.deinit(allocator);
var i: usize = 0;
while (bytes.code_page.codepointAt(i, bytes.slice)) |codepoint| : (i += codepoint.byte_len) {
@@ -292,27 +292,27 @@ pub const NameOrOrdinal = union(enum) {
const c = codepoint.value;
if (c == Codepoint.invalid) {
- try buf.append(std.mem.nativeToLittle(u16, '�'));
+ try buf.append(allocator, std.mem.nativeToLittle(u16, '�'));
} else if (c < 0x7F) {
// ASCII chars in names are always converted to uppercase
- try buf.append(std.mem.nativeToLittle(u16, std.ascii.toUpper(@intCast(c))));
+ try buf.append(allocator, std.mem.nativeToLittle(u16, std.ascii.toUpper(@intCast(c))));
} else if (c < 0x10000) {
const short: u16 = @intCast(c);
- try buf.append(std.mem.nativeToLittle(u16, short));
+ try buf.append(allocator, std.mem.nativeToLittle(u16, short));
} else {
const high = @as(u16, @intCast((c - 0x10000) >> 10)) + 0xD800;
- try buf.append(std.mem.nativeToLittle(u16, high));
+ try buf.append(allocator, std.mem.nativeToLittle(u16, high));
// Note: This can cut-off in the middle of a UTF-16 surrogate pair,
// i.e. it can make the string end with an unpaired high surrogate
if (buf.items.len == 256) break;
const low = @as(u16, @intCast(c & 0x3FF)) + 0xDC00;
- try buf.append(std.mem.nativeToLittle(u16, low));
+ try buf.append(allocator, std.mem.nativeToLittle(u16, low));
}
}
- return NameOrOrdinal{ .name = try buf.toOwnedSliceSentinel(0) };
+ return NameOrOrdinal{ .name = try buf.toOwnedSliceSentinel(allocator, 0) };
}
/// Returns `null` if the bytes do not form a valid number.
@@ -1079,7 +1079,7 @@ pub const FixedFileInfo = struct {
}
};
- pub fn write(self: FixedFileInfo, writer: anytype) !void {
+ pub fn write(self: FixedFileInfo, writer: *std.Io.Writer) !void {
try writer.writeInt(u32, signature, .little);
try writer.writeInt(u32, version, .little);
try writer.writeInt(u32, self.file_version.mostSignificantCombinedParts(), .little);
diff --git a/lib/compiler/resinator/source_mapping.zig b/lib/compiler/resinator/source_mapping.zig
index 4caaf38c84..928a7205c6 100644
--- a/lib/compiler/resinator/source_mapping.zig
+++ b/lib/compiler/resinator/source_mapping.zig
@@ -10,7 +10,7 @@ pub const ParseLineCommandsResult = struct {
const CurrentMapping = struct {
line_num: usize = 1,
- filename: std.ArrayListUnmanaged(u8) = .empty,
+ filename: std.ArrayList(u8) = .empty,
pending: bool = true,
ignore_contents: bool = false,
};
@@ -574,8 +574,8 @@ fn parseFilename(allocator: Allocator, str: []const u8) error{ OutOfMemory, Inva
escape_u,
};
- var filename = try std.array_list.Managed(u8).initCapacity(allocator, str.len);
- errdefer filename.deinit();
+ var filename = try std.ArrayList(u8).initCapacity(allocator, str.len);
+ errdefer filename.deinit(allocator);
var state: State = .string;
var index: usize = 0;
var escape_len: usize = undefined;
@@ -693,7 +693,7 @@ fn parseFilename(allocator: Allocator, str: []const u8) error{ OutOfMemory, Inva
}
}
- return filename.toOwnedSlice();
+ return filename.toOwnedSlice(allocator);
}
fn testParseFilename(expected: []const u8, input: []const u8) !void {
@@ -927,7 +927,7 @@ test "SourceMappings collapse" {
/// Same thing as StringTable in Zig's src/Wasm.zig
pub const StringTable = struct {
- data: std.ArrayListUnmanaged(u8) = .empty,
+ data: std.ArrayList(u8) = .empty,
map: std.HashMapUnmanaged(u32, void, std.hash_map.StringIndexContext, std.hash_map.default_max_load_percentage) = .empty,
pub fn deinit(self: *StringTable, allocator: Allocator) void {
diff --git a/lib/compiler/resinator/windows1252.zig b/lib/compiler/resinator/windows1252.zig
index e88687bac0..d6a38a4890 100644
--- a/lib/compiler/resinator/windows1252.zig
+++ b/lib/compiler/resinator/windows1252.zig
@@ -1,36 +1,5 @@
const std = @import("std");
-pub fn windows1252ToUtf8Stream(writer: anytype, reader: anytype) !usize {
- var bytes_written: usize = 0;
- var utf8_buf: [3]u8 = undefined;
- while (true) {
- const c = reader.readByte() catch |err| switch (err) {
- error.EndOfStream => return bytes_written,
- else => |e| return e,
- };
- const codepoint = toCodepoint(c);
- if (codepoint <= 0x7F) {
- try writer.writeByte(c);
- bytes_written += 1;
- } else {
- const utf8_len = std.unicode.utf8Encode(codepoint, &utf8_buf) catch unreachable;
- try writer.writeAll(utf8_buf[0..utf8_len]);
- bytes_written += utf8_len;
- }
- }
-}
-
-/// Returns the number of code units written to the writer
-pub fn windows1252ToUtf16AllocZ(allocator: std.mem.Allocator, win1252_str: []const u8) ![:0]u16 {
- // Guaranteed to need exactly the same number of code units as Windows-1252 bytes
- var utf16_slice = try allocator.allocSentinel(u16, win1252_str.len, 0);
- errdefer allocator.free(utf16_slice);
- for (win1252_str, 0..) |c, i| {
- utf16_slice[i] = toCodepoint(c);
- }
- return utf16_slice;
-}
-
/// https://www.unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WindowsBestFit/bestfit1252.txt
pub fn toCodepoint(c: u8) u16 {
return switch (c) {
@@ -572,17 +541,3 @@ pub fn bestFitFromCodepoint(codepoint: u21) ?u8 {
else => null,
};
}
-
-test "windows-1252 to utf8" {
- var buf = std.array_list.Managed(u8).init(std.testing.allocator);
- defer buf.deinit();
-
- const input_windows1252 = "\x81pqrstuvwxyz{|}~\x80\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8e\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9e\x9f\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff";
- const expected_utf8 = "\xc2\x81pqrstuvwxyz{|}~€‚ƒ„…†‡ˆ‰Š‹ŒŽ‘’“”•–—˜™š›œžŸ¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖרÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ";
-
- var fbs = std.io.fixedBufferStream(input_windows1252);
- const bytes_written = try windows1252ToUtf8Stream(buf.writer(), fbs.reader());
-
- try std.testing.expectEqualStrings(expected_utf8, buf.items);
- try std.testing.expectEqual(expected_utf8.len, bytes_written);
-}
diff --git a/lib/docs/wasm/Decl.zig b/lib/docs/wasm/Decl.zig
index ba1a7b455a..79a55294d2 100644
--- a/lib/docs/wasm/Decl.zig
+++ b/lib/docs/wasm/Decl.zig
@@ -6,6 +6,7 @@ const gpa = std.heap.wasm_allocator;
const assert = std.debug.assert;
const log = std.log;
const Oom = error{OutOfMemory};
+const ArrayList = std.ArrayList;
ast_node: Ast.Node.Index,
file: Walk.File.Index,
@@ -189,7 +190,7 @@ pub fn lookup(decl: *const Decl, name: []const u8) ?Decl.Index {
}
/// Appends the fully qualified name to `out`.
-pub fn fqn(decl: *const Decl, out: *std.ArrayListUnmanaged(u8)) Oom!void {
+pub fn fqn(decl: *const Decl, out: *ArrayList(u8)) Oom!void {
try decl.append_path(out);
if (decl.parent != .none) {
try append_parent_ns(out, decl.parent);
@@ -199,12 +200,12 @@ pub fn fqn(decl: *const Decl, out: *std.ArrayListUnmanaged(u8)) Oom!void {
}
}
-pub fn reset_with_path(decl: *const Decl, list: *std.ArrayListUnmanaged(u8)) Oom!void {
+pub fn reset_with_path(decl: *const Decl, list: *ArrayList(u8)) Oom!void {
list.clearRetainingCapacity();
try append_path(decl, list);
}
-pub fn append_path(decl: *const Decl, list: *std.ArrayListUnmanaged(u8)) Oom!void {
+pub fn append_path(decl: *const Decl, list: *ArrayList(u8)) Oom!void {
const start = list.items.len;
// Prefer the module name alias.
for (Walk.modules.keys(), Walk.modules.values()) |pkg_name, pkg_file| {
@@ -230,7 +231,7 @@ pub fn append_path(decl: *const Decl, list: *std.ArrayListUnmanaged(u8)) Oom!voi
}
}
-pub fn append_parent_ns(list: *std.ArrayListUnmanaged(u8), parent: Decl.Index) Oom!void {
+pub fn append_parent_ns(list: *ArrayList(u8), parent: Decl.Index) Oom!void {
assert(parent != .none);
const decl = parent.get();
if (decl.parent != .none) {
diff --git a/lib/docs/wasm/html_render.zig b/lib/docs/wasm/html_render.zig
index 68b10a01e4..13d2ec05c5 100644
--- a/lib/docs/wasm/html_render.zig
+++ b/lib/docs/wasm/html_render.zig
@@ -1,6 +1,8 @@
const std = @import("std");
const Ast = std.zig.Ast;
const assert = std.debug.assert;
+const ArrayList = std.ArrayList;
+const Writer = std.Io.Writer;
const Walk = @import("Walk");
const Decl = Walk.Decl;
@@ -30,7 +32,7 @@ pub const Annotation = struct {
pub fn fileSourceHtml(
file_index: Walk.File.Index,
- out: *std.ArrayListUnmanaged(u8),
+ out: *ArrayList(u8),
root_node: Ast.Node.Index,
options: RenderSourceOptions,
) !void {
@@ -38,7 +40,7 @@ pub fn fileSourceHtml(
const file = file_index.get();
const g = struct {
- var field_access_buffer: std.ArrayListUnmanaged(u8) = .empty;
+ var field_access_buffer: ArrayList(u8) = .empty;
};
const start_token = ast.firstToken(root_node);
@@ -88,7 +90,7 @@ pub fn fileSourceHtml(
if (next_annotate_index >= options.source_location_annotations.len) break;
const next_annotation = options.source_location_annotations[next_annotate_index];
if (cursor <= next_annotation.file_byte_offset) break;
- try out.writer(gpa).print("<span id=\"{s}{d}\"></span>", .{
+ try out.print(gpa, "<span id=\"{s}{d}\"></span>", .{
options.annotation_prefix, next_annotation.dom_id,
});
next_annotate_index += 1;
@@ -318,7 +320,7 @@ pub fn fileSourceHtml(
}
}
-fn appendUnindented(out: *std.ArrayListUnmanaged(u8), s: []const u8, indent: usize) !void {
+fn appendUnindented(out: *ArrayList(u8), s: []const u8, indent: usize) !void {
var it = std.mem.splitScalar(u8, s, '\n');
var is_first_line = true;
while (it.next()) |line| {
@@ -332,7 +334,7 @@ fn appendUnindented(out: *std.ArrayListUnmanaged(u8), s: []const u8, indent: usi
}
}
-pub fn appendEscaped(out: *std.ArrayListUnmanaged(u8), s: []const u8) !void {
+pub fn appendEscaped(out: *ArrayList(u8), s: []const u8) !void {
for (s) |c| {
try out.ensureUnusedCapacity(gpa, 6);
switch (c) {
@@ -347,7 +349,7 @@ pub fn appendEscaped(out: *std.ArrayListUnmanaged(u8), s: []const u8) !void {
fn walkFieldAccesses(
file_index: Walk.File.Index,
- out: *std.ArrayListUnmanaged(u8),
+ out: *ArrayList(u8),
node: Ast.Node.Index,
) Oom!void {
const ast = file_index.get_ast();
@@ -371,7 +373,7 @@ fn walkFieldAccesses(
fn resolveIdentLink(
file_index: Walk.File.Index,
- out: *std.ArrayListUnmanaged(u8),
+ out: *ArrayList(u8),
ident_token: Ast.TokenIndex,
) Oom!void {
const decl_index = file_index.get().lookup_token(ident_token);
@@ -391,7 +393,7 @@ fn unindent(s: []const u8, indent: usize) []const u8 {
return s[indent_idx..];
}
-pub fn resolveDeclLink(decl_index: Decl.Index, out: *std.ArrayListUnmanaged(u8)) Oom!void {
+pub fn resolveDeclLink(decl_index: Decl.Index, out: *ArrayList(u8)) Oom!void {
const decl = decl_index.get();
switch (decl.categorize()) {
.alias => |alias_decl| try alias_decl.get().fqn(out),
diff --git a/lib/docs/wasm/main.zig b/lib/docs/wasm/main.zig
index d3043cd917..adf3f6b884 100644
--- a/lib/docs/wasm/main.zig
+++ b/lib/docs/wasm/main.zig
@@ -5,6 +5,8 @@ const Ast = std.zig.Ast;
const Walk = @import("Walk");
const markdown = @import("markdown.zig");
const Decl = Walk.Decl;
+const ArrayList = std.ArrayList;
+const Writer = std.Io.Writer;
const fileSourceHtml = @import("html_render.zig").fileSourceHtml;
const appendEscaped = @import("html_render.zig").appendEscaped;
@@ -66,8 +68,8 @@ export fn unpack(tar_ptr: [*]u8, tar_len: usize) void {
};
}
-var query_string: std.ArrayListUnmanaged(u8) = .empty;
-var query_results: std.ArrayListUnmanaged(Decl.Index) = .empty;
+var query_string: ArrayList(u8) = .empty;
+var query_results: ArrayList(Decl.Index) = .empty;
/// Resizes the query string to be the correct length; returns the pointer to
/// the query string.
@@ -99,11 +101,11 @@ fn query_exec_fallible(query: []const u8, ignore_case: bool) !void {
segments: u16,
};
const g = struct {
- var full_path_search_text: std.ArrayListUnmanaged(u8) = .empty;
- var full_path_search_text_lower: std.ArrayListUnmanaged(u8) = .empty;
- var doc_search_text: std.ArrayListUnmanaged(u8) = .empty;
+ var full_path_search_text: ArrayList(u8) = .empty;
+ var full_path_search_text_lower: ArrayList(u8) = .empty;
+ var doc_search_text: ArrayList(u8) = .empty;
/// Each element matches a corresponding query_results element.
- var scores: std.ArrayListUnmanaged(Score) = .empty;
+ var scores: ArrayList(Score) = .empty;
};
// First element stores the size of the list.
@@ -234,7 +236,7 @@ const ErrorIdentifier = packed struct(u64) {
return ast.tokenTag(token_index - 1) == .doc_comment;
}
- fn html(ei: ErrorIdentifier, base_decl: Decl.Index, out: *std.ArrayListUnmanaged(u8)) Oom!void {
+ fn html(ei: ErrorIdentifier, base_decl: Decl.Index, out: *ArrayList(u8)) Oom!void {
const decl_index = ei.decl_index;
const ast = decl_index.get().file.get_ast();
const name = ast.tokenSlice(ei.token_index);
@@ -260,7 +262,7 @@ const ErrorIdentifier = packed struct(u64) {
}
};
-var string_result: std.ArrayListUnmanaged(u8) = .empty;
+var string_result: ArrayList(u8) = .empty;
var error_set_result: std.StringArrayHashMapUnmanaged(ErrorIdentifier) = .empty;
export fn decl_error_set(decl_index: Decl.Index) Slice(ErrorIdentifier) {
@@ -411,7 +413,7 @@ fn decl_fields_fallible(decl_index: Decl.Index) ![]Ast.Node.Index {
fn ast_decl_fields_fallible(ast: *Ast, ast_index: Ast.Node.Index) ![]Ast.Node.Index {
const g = struct {
- var result: std.ArrayListUnmanaged(Ast.Node.Index) = .empty;
+ var result: ArrayList(Ast.Node.Index) = .empty;
};
g.result.clearRetainingCapacity();
var buf: [2]Ast.Node.Index = undefined;
@@ -429,7 +431,7 @@ fn ast_decl_fields_fallible(ast: *Ast, ast_index: Ast.Node.Index) ![]Ast.Node.In
fn decl_params_fallible(decl_index: Decl.Index) ![]Ast.Node.Index {
const g = struct {
- var result: std.ArrayListUnmanaged(Ast.Node.Index) = .empty;
+ var result: ArrayList(Ast.Node.Index) = .empty;
};
g.result.clearRetainingCapacity();
const decl = decl_index.get();
@@ -460,7 +462,7 @@ export fn decl_param_html(decl_index: Decl.Index, param_node: Ast.Node.Index) St
}
fn decl_field_html_fallible(
- out: *std.ArrayListUnmanaged(u8),
+ out: *ArrayList(u8),
decl_index: Decl.Index,
field_node: Ast.Node.Index,
) !void {
@@ -480,7 +482,7 @@ fn decl_field_html_fallible(
}
fn decl_param_html_fallible(
- out: *std.ArrayListUnmanaged(u8),
+ out: *ArrayList(u8),
decl_index: Decl.Index,
param_node: Ast.Node.Index,
) !void {
@@ -649,7 +651,7 @@ export fn decl_docs_html(decl_index: Decl.Index, short: bool) String {
}
fn collect_docs(
- list: *std.ArrayListUnmanaged(u8),
+ list: *ArrayList(u8),
ast: *const Ast,
first_doc_comment: Ast.TokenIndex,
) Oom!void {
@@ -667,7 +669,7 @@ fn collect_docs(
}
fn render_docs(
- out: *std.ArrayListUnmanaged(u8),
+ out: *ArrayList(u8),
decl_index: Decl.Index,
first_doc_comment: Ast.TokenIndex,
short: bool,
@@ -691,11 +693,10 @@ fn render_docs(
defer parsed_doc.deinit(gpa);
const g = struct {
- var link_buffer: std.ArrayListUnmanaged(u8) = .empty;
+ var link_buffer: ArrayList(u8) = .empty;
};
- const Writer = std.ArrayListUnmanaged(u8).Writer;
- const Renderer = markdown.Renderer(Writer, Decl.Index);
+ const Renderer = markdown.Renderer(Decl.Index);
const renderer: Renderer = .{
.context = decl_index,
.renderFn = struct {
@@ -703,8 +704,8 @@ fn render_docs(
r: Renderer,
doc: markdown.Document,
node: markdown.Document.Node.Index,
- writer: Writer,
- ) !void {
+ writer: *Writer,
+ ) Writer.Error!void {
const data = doc.nodes.items(.data)[@intFromEnum(node)];
switch (doc.nodes.items(.tag)[@intFromEnum(node)]) {
.code_span => {
@@ -712,7 +713,7 @@ fn render_docs(
const content = doc.string(data.text.content);
if (resolve_decl_path(r.context, content)) |resolved_decl_index| {
g.link_buffer.clearRetainingCapacity();
- try resolveDeclLink(resolved_decl_index, &g.link_buffer);
+ resolveDeclLink(resolved_decl_index, &g.link_buffer) catch return error.WriteFailed;
try writer.writeAll("<a href=\"#");
_ = missing_feature_url_escape;
@@ -730,7 +731,12 @@ fn render_docs(
}
}.render,
};
- try renderer.render(parsed_doc, out.writer(gpa));
+
+ var allocating = Writer.Allocating.fromArrayList(gpa, out);
+ defer out.* = allocating.toArrayList();
+ renderer.render(parsed_doc, &allocating.writer) catch |err| switch (err) {
+ error.WriteFailed => return error.OutOfMemory,
+ };
}
fn resolve_decl_path(decl_index: Decl.Index, path: []const u8) ?Decl.Index {
@@ -827,7 +833,7 @@ export fn find_module_root(pkg: Walk.ModuleIndex) Decl.Index {
}
/// Set by `set_input_string`.
-var input_string: std.ArrayListUnmanaged(u8) = .empty;
+var input_string: ArrayList(u8) = .empty;
export fn set_input_string(len: usize) [*]u8 {
input_string.resize(gpa, len) catch @panic("OOM");
@@ -849,7 +855,7 @@ export fn find_decl() Decl.Index {
if (result != .none) return result;
const g = struct {
- var match_fqn: std.ArrayListUnmanaged(u8) = .empty;
+ var match_fqn: ArrayList(u8) = .empty;
};
for (Walk.decls.items, 0..) |*decl, decl_index| {
g.match_fqn.clearRetainingCapacity();
@@ -905,7 +911,7 @@ export fn type_fn_members(parent: Decl.Index, include_private: bool) Slice(Decl.
export fn namespace_members(parent: Decl.Index, include_private: bool) Slice(Decl.Index) {
const g = struct {
- var members: std.ArrayListUnmanaged(Decl.Index) = .empty;
+ var members: ArrayList(Decl.Index) = .empty;
};
g.members.clearRetainingCapacity();
diff --git a/lib/docs/wasm/markdown/renderer.zig b/lib/docs/wasm/markdown/renderer.zig
index cba857d204..13e3795b9a 100644
--- a/lib/docs/wasm/markdown/renderer.zig
+++ b/lib/docs/wasm/markdown/renderer.zig
@@ -2,25 +2,26 @@ const std = @import("std");
const Document = @import("Document.zig");
const Node = Document.Node;
const assert = std.debug.assert;
+const Writer = std.Io.Writer;
/// A Markdown document renderer.
///
/// Each concrete `Renderer` type has a `renderDefault` function, with the
/// intention that custom `renderFn` implementations can call `renderDefault`
/// for node types for which they require no special rendering.
-pub fn Renderer(comptime Writer: type, comptime Context: type) type {
+pub fn Renderer(comptime Context: type) type {
return struct {
renderFn: *const fn (
r: Self,
doc: Document,
node: Node.Index,
- writer: Writer,
+ writer: *Writer,
) Writer.Error!void = renderDefault,
context: Context,
const Self = @This();
- pub fn render(r: Self, doc: Document, writer: Writer) Writer.Error!void {
+ pub fn render(r: Self, doc: Document, writer: *Writer) Writer.Error!void {
try r.renderFn(r, doc, .root, writer);
}
@@ -28,7 +29,7 @@ pub fn Renderer(comptime Writer: type, comptime Context: type) type {
r: Self,
doc: Document,
node: Node.Index,
- writer: Writer,
+ writer: *Writer,
) Writer.Error!void {
const data = doc.nodes.items(.data)[@intFromEnum(node)];
switch (doc.nodes.items(.tag)[@intFromEnum(node)]) {
@@ -188,8 +189,8 @@ pub fn Renderer(comptime Writer: type, comptime Context: type) type {
pub fn renderInlineNodeText(
doc: Document,
node: Node.Index,
- writer: anytype,
-) @TypeOf(writer).Error!void {
+ writer: *Writer,
+) Writer.Error!void {
const data = doc.nodes.items(.data)[@intFromEnum(node)];
switch (doc.nodes.items(.tag)[@intFromEnum(node)]) {
.root,
@@ -234,14 +235,12 @@ pub fn fmtHtml(bytes: []const u8) std.fmt.Formatter([]const u8, formatHtml) {
return .{ .data = bytes };
}
-fn formatHtml(bytes: []const u8, writer: *std.io.Writer) std.io.Writer.Error!void {
- for (bytes) |b| {
- switch (b) {
- '<' => try writer.writeAll("&lt;"),
- '>' => try writer.writeAll("&gt;"),
- '&' => try writer.writeAll("&amp;"),
- '"' => try writer.writeAll("&quot;"),
- else => try writer.writeByte(b),
- }
- }
+fn formatHtml(bytes: []const u8, w: *Writer) Writer.Error!void {
+ for (bytes) |b| switch (b) {
+ '<' => try w.writeAll("&lt;"),
+ '>' => try w.writeAll("&gt;"),
+ '&' => try w.writeAll("&amp;"),
+ '"' => try w.writeAll("&quot;"),
+ else => try w.writeByte(b),
+ };
}
diff --git a/lib/std/Build/Step/CheckObject.zig b/lib/std/Build/Step/CheckObject.zig
index 6bbe3307a0..1e321de50a 100644
--- a/lib/std/Build/Step/CheckObject.zig
+++ b/lib/std/Build/Step/CheckObject.zig
@@ -257,7 +257,7 @@ const Check = struct {
fn dumpSection(allocator: Allocator, name: [:0]const u8) Check {
var check = Check.create(allocator, .dump_section);
const off: u32 = @intCast(check.data.items.len);
- check.data.writer().print("{s}\x00", .{name}) catch @panic("OOM");
+ check.data.print("{s}\x00", .{name}) catch @panic("OOM");
check.payload = .{ .dump_section = off };
return check;
}
@@ -1320,7 +1320,8 @@ const MachODumper = struct {
}
bindings.deinit();
}
- try ctx.parseBindInfo(data, &bindings);
+ var data_reader: std.Io.Reader = .fixed(data);
+ try ctx.parseBindInfo(&data_reader, &bindings);
mem.sort(Binding, bindings.items, {}, Binding.lessThan);
for (bindings.items) |binding| {
try writer.print("0x{x} [addend: {d}]", .{ binding.address, binding.addend });
@@ -1335,11 +1336,7 @@ const MachODumper = struct {
}
}
- fn parseBindInfo(ctx: ObjectContext, data: []const u8, bindings: *std.array_list.Managed(Binding)) !void {
- var stream = std.io.fixedBufferStream(data);
- var creader = std.io.countingReader(stream.reader());
- const reader = creader.reader();
-
+ fn parseBindInfo(ctx: ObjectContext, reader: *std.Io.Reader, bindings: *std.array_list.Managed(Binding)) !void {
var seg_id: ?u8 = null;
var tag: Binding.Tag = .self;
var ordinal: u16 = 0;
@@ -1350,7 +1347,7 @@ const MachODumper = struct {
defer name_buf.deinit();
while (true) {
- const byte = reader.readByte() catch break;
+ const byte = reader.takeByte() catch break;
const opc = byte & macho.BIND_OPCODE_MASK;
const imm = byte & macho.BIND_IMMEDIATE_MASK;
switch (opc) {
@@ -1371,18 +1368,17 @@ const MachODumper = struct {
},
macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB => {
seg_id = imm;
- offset = try std.leb.readUleb128(u64, reader);
+ offset = try reader.takeLeb128(u64);
},
macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM => {
name_buf.clearRetainingCapacity();
- try reader.readUntilDelimiterArrayList(&name_buf, 0, std.math.maxInt(u32));
- try name_buf.append(0);
+ try name_buf.appendSlice(try reader.takeDelimiterInclusive(0));
},
macho.BIND_OPCODE_SET_ADDEND_SLEB => {
- addend = try std.leb.readIleb128(i64, reader);
+ addend = try reader.takeLeb128(i64);
},
macho.BIND_OPCODE_ADD_ADDR_ULEB => {
- const x = try std.leb.readUleb128(u64, reader);
+ const x = try reader.takeLeb128(u64);
offset = @intCast(@as(i64, @intCast(offset)) + @as(i64, @bitCast(x)));
},
macho.BIND_OPCODE_DO_BIND,
@@ -1397,14 +1393,14 @@ const MachODumper = struct {
switch (opc) {
macho.BIND_OPCODE_DO_BIND => {},
macho.BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB => {
- add_addr = try std.leb.readUleb128(u64, reader);
+ add_addr = try reader.takeLeb128(u64);
},
macho.BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED => {
add_addr = imm * @sizeOf(u64);
},
macho.BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB => {
- count = try std.leb.readUleb128(u64, reader);
- skip = try std.leb.readUleb128(u64, reader);
+ count = try reader.takeLeb128(u64);
+ skip = try reader.takeLeb128(u64);
},
else => unreachable,
}
@@ -1621,8 +1617,9 @@ const MachODumper = struct {
var ctx = ObjectContext{ .gpa = gpa, .data = bytes, .header = hdr };
try ctx.parse();
- var output = std.array_list.Managed(u8).init(gpa);
- const writer = output.writer();
+ var output: std.Io.Writer.Allocating = .init(gpa);
+ defer output.deinit();
+ const writer = &output.writer;
switch (check.kind) {
.headers => {
@@ -1787,8 +1784,9 @@ const ElfDumper = struct {
try ctx.objects.append(gpa, .{ .name = name, .off = stream.pos, .len = size });
}
- var output = std.array_list.Managed(u8).init(gpa);
- const writer = output.writer();
+ var output: std.Io.Writer.Allocating = .init(gpa);
+ defer output.deinit();
+ const writer = &output.writer;
switch (check.kind) {
.archive_symtab => if (ctx.symtab.items.len > 0) {
@@ -1944,8 +1942,9 @@ const ElfDumper = struct {
else => {},
};
- var output = std.array_list.Managed(u8).init(gpa);
- const writer = output.writer();
+ var output: std.Io.Writer.Allocating = .init(gpa);
+ defer output.deinit();
+ const writer = &output.writer;
switch (check.kind) {
.headers => {
@@ -2398,10 +2397,10 @@ const WasmDumper = struct {
return error.UnsupportedWasmVersion;
}
- var output = std.array_list.Managed(u8).init(gpa);
+ var output: std.Io.Writer.Allocating = .init(gpa);
defer output.deinit();
- parseAndDumpInner(step, check, bytes, &fbs, &output) catch |err| switch (err) {
- error.EndOfStream => try output.appendSlice("\n<UnexpectedEndOfStream>"),
+ parseAndDumpInner(step, check, bytes, &fbs, &output.writer) catch |err| switch (err) {
+ error.EndOfStream => try output.writer.writeAll("\n<UnexpectedEndOfStream>"),
else => |e| return e,
};
return output.toOwnedSlice();
@@ -2412,10 +2411,9 @@ const WasmDumper = struct {
check: Check,
bytes: []const u8,
fbs: *std.io.FixedBufferStream([]const u8),
- output: *std.array_list.Managed(u8),
+ writer: *std.Io.Writer,
) !void {
const reader = fbs.reader();
- const writer = output.writer();
switch (check.kind) {
.headers => {
diff --git a/lib/std/Io.zig b/lib/std/Io.zig
index 7cce6397bd..9c91e1159d 100644
--- a/lib/std/Io.zig
+++ b/lib/std/Io.zig
@@ -144,61 +144,6 @@ pub fn GenericReader(
return @errorCast(self.any().readAllAlloc(allocator, max_size));
}
- pub inline fn readUntilDelimiterArrayList(
- self: Self,
- array_list: *std.array_list.Managed(u8),
- delimiter: u8,
- max_size: usize,
- ) (NoEofError || Allocator.Error || error{StreamTooLong})!void {
- return @errorCast(self.any().readUntilDelimiterArrayList(
- array_list,
- delimiter,
- max_size,
- ));
- }
-
- pub inline fn readUntilDelimiterAlloc(
- self: Self,
- allocator: Allocator,
- delimiter: u8,
- max_size: usize,
- ) (NoEofError || Allocator.Error || error{StreamTooLong})![]u8 {
- return @errorCast(self.any().readUntilDelimiterAlloc(
- allocator,
- delimiter,
- max_size,
- ));
- }
-
- pub inline fn readUntilDelimiter(
- self: Self,
- buf: []u8,
- delimiter: u8,
- ) (NoEofError || error{StreamTooLong})![]u8 {
- return @errorCast(self.any().readUntilDelimiter(buf, delimiter));
- }
-
- pub inline fn readUntilDelimiterOrEofAlloc(
- self: Self,
- allocator: Allocator,
- delimiter: u8,
- max_size: usize,
- ) (Error || Allocator.Error || error{StreamTooLong})!?[]u8 {
- return @errorCast(self.any().readUntilDelimiterOrEofAlloc(
- allocator,
- delimiter,
- max_size,
- ));
- }
-
- pub inline fn readUntilDelimiterOrEof(
- self: Self,
- buf: []u8,
- delimiter: u8,
- ) (Error || error{StreamTooLong})!?[]u8 {
- return @errorCast(self.any().readUntilDelimiterOrEof(buf, delimiter));
- }
-
pub inline fn streamUntilDelimiter(
self: Self,
writer: anytype,
@@ -326,103 +271,8 @@ pub fn GenericReader(
};
}
-/// Deprecated in favor of `Writer`.
-pub fn GenericWriter(
- comptime Context: type,
- comptime WriteError: type,
- comptime writeFn: fn (context: Context, bytes: []const u8) WriteError!usize,
-) type {
- return struct {
- context: Context,
-
- const Self = @This();
- pub const Error = WriteError;
-
- pub inline fn write(self: Self, bytes: []const u8) Error!usize {
- return writeFn(self.context, bytes);
- }
-
- pub inline fn writeAll(self: Self, bytes: []const u8) Error!void {
- return @errorCast(self.any().writeAll(bytes));
- }
-
- pub inline fn print(self: Self, comptime format: []const u8, args: anytype) Error!void {
- return @errorCast(self.any().print(format, args));
- }
-
- pub inline fn writeByte(self: Self, byte: u8) Error!void {
- return @errorCast(self.any().writeByte(byte));
- }
-
- pub inline fn writeByteNTimes(self: Self, byte: u8, n: usize) Error!void {
- return @errorCast(self.any().writeByteNTimes(byte, n));
- }
-
- pub inline fn writeBytesNTimes(self: Self, bytes: []const u8, n: usize) Error!void {
- return @errorCast(self.any().writeBytesNTimes(bytes, n));
- }
-
- pub inline fn writeInt(self: Self, comptime T: type, value: T, endian: std.builtin.Endian) Error!void {
- return @errorCast(self.any().writeInt(T, value, endian));
- }
-
- pub inline fn writeStruct(self: Self, value: anytype) Error!void {
- return @errorCast(self.any().writeStruct(value));
- }
-
- pub inline fn writeStructEndian(self: Self, value: anytype, endian: std.builtin.Endian) Error!void {
- return @errorCast(self.any().writeStructEndian(value, endian));
- }
-
- pub inline fn any(self: *const Self) AnyWriter {
- return .{
- .context = @ptrCast(&self.context),
- .writeFn = typeErasedWriteFn,
- };
- }
-
- fn typeErasedWriteFn(context: *const anyopaque, bytes: []const u8) anyerror!usize {
- const ptr: *const Context = @ptrCast(@alignCast(context));
- return writeFn(ptr.*, bytes);
- }
-
- /// Helper for bridging to the new `Writer` API while upgrading.
- pub fn adaptToNewApi(self: *const Self, buffer: []u8) Adapter {
- return .{
- .derp_writer = self.*,
- .new_interface = .{
- .buffer = buffer,
- .vtable = &.{ .drain = Adapter.drain },
- },
- };
- }
-
- pub const Adapter = struct {
- derp_writer: Self,
- new_interface: Writer,
- err: ?Error = null,
-
- fn drain(w: *std.io.Writer, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
- _ = splat;
- const a: *@This() = @alignCast(@fieldParentPtr("new_interface", w));
- const buffered = w.buffered();
- if (buffered.len != 0) return w.consume(a.derp_writer.write(buffered) catch |err| {
- a.err = err;
- return error.WriteFailed;
- });
- return a.derp_writer.write(data[0]) catch |err| {
- a.err = err;
- return error.WriteFailed;
- };
- }
- };
- };
-}
-
/// Deprecated in favor of `Reader`.
pub const AnyReader = @import("Io/DeprecatedReader.zig");
-/// Deprecated in favor of `Writer`.
-pub const AnyWriter = @import("Io/DeprecatedWriter.zig");
/// Deprecated in favor of `Reader`.
pub const FixedBufferStream = @import("Io/fixed_buffer_stream.zig").FixedBufferStream;
/// Deprecated in favor of `Reader`.
@@ -434,19 +284,6 @@ pub const countingReader = @import("Io/counting_reader.zig").countingReader;
pub const tty = @import("Io/tty.zig");
-/// Deprecated in favor of `Writer.Discarding`.
-pub const null_writer: NullWriter = .{ .context = {} };
-/// Deprecated in favor of `Writer.Discarding`.
-pub const NullWriter = GenericWriter(void, error{}, dummyWrite);
-fn dummyWrite(context: void, data: []const u8) error{}!usize {
- _ = context;
- return data.len;
-}
-
-test null_writer {
- null_writer.writeAll("yay" ** 10) catch |err| switch (err) {};
-}
-
pub fn poll(
gpa: Allocator,
comptime StreamEnum: type,
diff --git a/lib/std/Io/DeprecatedReader.zig b/lib/std/Io/DeprecatedReader.zig
index 4dfbdd19ee..0505c6be66 100644
--- a/lib/std/Io/DeprecatedReader.zig
+++ b/lib/std/Io/DeprecatedReader.zig
@@ -93,100 +93,6 @@ pub fn readAllAlloc(self: Self, allocator: mem.Allocator, max_size: usize) anyer
return try array_list.toOwnedSlice();
}
-/// Deprecated: use `streamUntilDelimiter` with ArrayList's writer instead.
-/// Replaces the `std.array_list.Managed` contents by reading from the stream until `delimiter` is found.
-/// Does not include the delimiter in the result.
-/// If the `std.array_list.Managed` length would exceed `max_size`, `error.StreamTooLong` is returned and the
-/// `std.array_list.Managed` is populated with `max_size` bytes from the stream.
-pub fn readUntilDelimiterArrayList(
- self: Self,
- array_list: *std.array_list.Managed(u8),
- delimiter: u8,
- max_size: usize,
-) anyerror!void {
- array_list.shrinkRetainingCapacity(0);
- try self.streamUntilDelimiter(array_list.writer(), delimiter, max_size);
-}
-
-/// Deprecated: use `streamUntilDelimiter` with ArrayList's writer instead.
-/// Allocates enough memory to read until `delimiter`. If the allocated
-/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
-/// Caller owns returned memory.
-/// If this function returns an error, the contents from the stream read so far are lost.
-pub fn readUntilDelimiterAlloc(
- self: Self,
- allocator: mem.Allocator,
- delimiter: u8,
- max_size: usize,
-) anyerror![]u8 {
- var array_list = std.array_list.Managed(u8).init(allocator);
- defer array_list.deinit();
- try self.streamUntilDelimiter(array_list.writer(), delimiter, max_size);
- return try array_list.toOwnedSlice();
-}
-
-/// Deprecated: use `streamUntilDelimiter` with FixedBufferStream's writer instead.
-/// Reads from the stream until specified byte is found. If the buffer is not
-/// large enough to hold the entire contents, `error.StreamTooLong` is returned.
-/// If end-of-stream is found, `error.EndOfStream` is returned.
-/// Returns a slice of the stream data, with ptr equal to `buf.ptr`. The
-/// delimiter byte is written to the output buffer but is not included
-/// in the returned slice.
-pub fn readUntilDelimiter(self: Self, buf: []u8, delimiter: u8) anyerror![]u8 {
- var fbs = std.io.fixedBufferStream(buf);
- try self.streamUntilDelimiter(fbs.writer(), delimiter, fbs.buffer.len);
- const output = fbs.getWritten();
- buf[output.len] = delimiter; // emulating old behaviour
- return output;
-}
-
-/// Deprecated: use `streamUntilDelimiter` with ArrayList's (or any other's) writer instead.
-/// Allocates enough memory to read until `delimiter` or end-of-stream.
-/// If the allocated memory would be greater than `max_size`, returns
-/// `error.StreamTooLong`. If end-of-stream is found, returns the rest
-/// of the stream. If this function is called again after that, returns
-/// null.
-/// Caller owns returned memory.
-/// If this function returns an error, the contents from the stream read so far are lost.
-pub fn readUntilDelimiterOrEofAlloc(
- self: Self,
- allocator: mem.Allocator,
- delimiter: u8,
- max_size: usize,
-) anyerror!?[]u8 {
- var array_list = std.array_list.Managed(u8).init(allocator);
- defer array_list.deinit();
- self.streamUntilDelimiter(array_list.writer(), delimiter, max_size) catch |err| switch (err) {
- error.EndOfStream => if (array_list.items.len == 0) {
- return null;
- },
- else => |e| return e,
- };
- return try array_list.toOwnedSlice();
-}
-
-/// Deprecated: use `streamUntilDelimiter` with FixedBufferStream's writer instead.
-/// Reads from the stream until specified byte is found. If the buffer is not
-/// large enough to hold the entire contents, `error.StreamTooLong` is returned.
-/// If end-of-stream is found, returns the rest of the stream. If this
-/// function is called again after that, returns null.
-/// Returns a slice of the stream data, with ptr equal to `buf.ptr`. The
-/// delimiter byte is written to the output buffer but is not included
-/// in the returned slice.
-pub fn readUntilDelimiterOrEof(self: Self, buf: []u8, delimiter: u8) anyerror!?[]u8 {
- var fbs = std.io.fixedBufferStream(buf);
- self.streamUntilDelimiter(fbs.writer(), delimiter, fbs.buffer.len) catch |err| switch (err) {
- error.EndOfStream => if (fbs.getWritten().len == 0) {
- return null;
- },
-
- else => |e| return e,
- };
- const output = fbs.getWritten();
- buf[output.len] = delimiter; // emulating old behaviour
- return output;
-}
-
/// Appends to the `writer` contents by reading from the stream until `delimiter` is found.
/// Does not write the delimiter itself.
/// If `optional_max_size` is not null and amount of written bytes exceeds `optional_max_size`,
@@ -384,7 +290,3 @@ const mem = std.mem;
const testing = std.testing;
const native_endian = @import("builtin").target.cpu.arch.endian();
const Alignment = std.mem.Alignment;
-
-test {
- _ = @import("Reader/test.zig");
-}
diff --git a/lib/std/Io/DeprecatedWriter.zig b/lib/std/Io/DeprecatedWriter.zig
deleted file mode 100644
index 68b21bde5b..0000000000
--- a/lib/std/Io/DeprecatedWriter.zig
+++ /dev/null
@@ -1,114 +0,0 @@
-const std = @import("../std.zig");
-const assert = std.debug.assert;
-const mem = std.mem;
-const native_endian = @import("builtin").target.cpu.arch.endian();
-
-context: *const anyopaque,
-writeFn: *const fn (context: *const anyopaque, bytes: []const u8) anyerror!usize,
-
-const Self = @This();
-pub const Error = anyerror;
-
-pub fn write(self: Self, bytes: []const u8) anyerror!usize {
- return self.writeFn(self.context, bytes);
-}
-
-pub fn writeAll(self: Self, bytes: []const u8) anyerror!void {
- var index: usize = 0;
- while (index != bytes.len) {
- index += try self.write(bytes[index..]);
- }
-}
-
-pub fn print(self: Self, comptime format: []const u8, args: anytype) anyerror!void {
- return std.fmt.format(self, format, args);
-}
-
-pub fn writeByte(self: Self, byte: u8) anyerror!void {
- const array = [1]u8{byte};
- return self.writeAll(&array);
-}
-
-pub fn writeByteNTimes(self: Self, byte: u8, n: usize) anyerror!void {
- var bytes: [256]u8 = undefined;
- @memset(bytes[0..], byte);
-
- var remaining: usize = n;
- while (remaining > 0) {
- const to_write = @min(remaining, bytes.len);
- try self.writeAll(bytes[0..to_write]);
- remaining -= to_write;
- }
-}
-
-pub fn writeBytesNTimes(self: Self, bytes: []const u8, n: usize) anyerror!void {
- var i: usize = 0;
- while (i < n) : (i += 1) {
- try self.writeAll(bytes);
- }
-}
-
-pub inline fn writeInt(self: Self, comptime T: type, value: T, endian: std.builtin.Endian) anyerror!void {
- var bytes: [@divExact(@typeInfo(T).int.bits, 8)]u8 = undefined;
- mem.writeInt(std.math.ByteAlignedInt(@TypeOf(value)), &bytes, value, endian);
- return self.writeAll(&bytes);
-}
-
-pub fn writeStruct(self: Self, value: anytype) anyerror!void {
- // Only extern and packed structs have defined in-memory layout.
- comptime assert(@typeInfo(@TypeOf(value)).@"struct".layout != .auto);
- return self.writeAll(mem.asBytes(&value));
-}
-
-pub fn writeStructEndian(self: Self, value: anytype, endian: std.builtin.Endian) anyerror!void {
- // TODO: make sure this value is not a reference type
- if (native_endian == endian) {
- return self.writeStruct(value);
- } else {
- var copy = value;
- mem.byteSwapAllFields(@TypeOf(value), &copy);
- return self.writeStruct(copy);
- }
-}
-
-pub fn writeFile(self: Self, file: std.fs.File) anyerror!void {
- // TODO: figure out how to adjust std lib abstractions so that this ends up
- // doing sendfile or maybe even copy_file_range under the right conditions.
- var buf: [4000]u8 = undefined;
- while (true) {
- const n = try file.readAll(&buf);
- try self.writeAll(buf[0..n]);
- if (n < buf.len) return;
- }
-}
-
-/// Helper for bridging to the new `Writer` API while upgrading.
-pub fn adaptToNewApi(self: *const Self, buffer: []u8) Adapter {
- return .{
- .derp_writer = self.*,
- .new_interface = .{
- .buffer = buffer,
- .vtable = &.{ .drain = Adapter.drain },
- },
- };
-}
-
-pub const Adapter = struct {
- derp_writer: Self,
- new_interface: std.io.Writer,
- err: ?Error = null,
-
- fn drain(w: *std.io.Writer, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
- _ = splat;
- const a: *@This() = @alignCast(@fieldParentPtr("new_interface", w));
- const buffered = w.buffered();
- if (buffered.len != 0) return w.consume(a.derp_writer.write(buffered) catch |err| {
- a.err = err;
- return error.WriteFailed;
- });
- return a.derp_writer.write(data[0]) catch |err| {
- a.err = err;
- return error.WriteFailed;
- };
- }
-};
diff --git a/lib/std/Io/Reader.zig b/lib/std/Io/Reader.zig
index f6cb22cb4b..4cad73e8ce 100644
--- a/lib/std/Io/Reader.zig
+++ b/lib/std/Io/Reader.zig
@@ -143,8 +143,8 @@ pub const failing: Reader = .{
/// This is generally safe to `@constCast` because it has an empty buffer, so
/// there is not really a way to accidentally attempt mutation of these fields.
-const ending_state: Reader = .fixed(&.{});
-pub const ending: *Reader = @constCast(&ending_state);
+pub const ending_instance: Reader = .fixed(&.{});
+pub const ending: *Reader = @constCast(&ending_instance);
pub fn limited(r: *Reader, limit: Limit, buffer: []u8) Limited {
return .init(r, limit, buffer);
@@ -784,7 +784,7 @@ pub fn peekDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
}
/// Returns a slice of the next bytes of buffered data from the stream until
-/// `delimiter` is found, advancing the seek position.
+/// `delimiter` is found, advancing the seek position up to the delimiter.
///
/// Returned slice excludes the delimiter. End-of-stream is treated equivalent
/// to a delimiter, unless it would result in a length 0 return value, in which
@@ -815,6 +815,37 @@ pub fn takeDelimiterExclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
}
/// Returns a slice of the next bytes of buffered data from the stream until
+/// `delimiter` is found, advancing the seek position past the delimiter.
+///
+/// Returned slice excludes the delimiter. End-of-stream is treated equivalent
+/// to a delimiter, unless it would result in a length 0 return value, in which
+/// case `null` is returned instead.
+///
+/// If the delimiter is not found within a number of bytes matching the
+/// capacity of this `Reader`, `error.StreamTooLong` is returned. In
+/// such case, the stream state is unmodified as if this function was never
+/// called.
+///
+/// Invalidates previously returned values from `peek`.
+///
+/// See also:
+/// * `takeDelimiterInclusive`
+/// * `takeDelimiterExclusive`
+pub fn takeDelimiter(r: *Reader, delimiter: u8) error{ ReadFailed, StreamTooLong }!?[]u8 {
+ const result = r.peekDelimiterInclusive(delimiter) catch |err| switch (err) {
+ error.EndOfStream => {
+ const remaining = r.buffer[r.seek..r.end];
+ if (remaining.len == 0) return null;
+ r.toss(remaining.len);
+ return remaining;
+ },
+ else => |e| return e,
+ };
+ r.toss(result.len + 1);
+ return result[0 .. result.len - 1];
+}
+
+/// Returns a slice of the next bytes of buffered data from the stream until
/// `delimiter` is found, without advancing the seek position.
///
/// Returned slice excludes the delimiter. End-of-stream is treated equivalent
@@ -846,6 +877,8 @@ pub fn peekDelimiterExclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
/// Appends to `w` contents by reading from the stream until `delimiter` is
/// found. Does not write the delimiter itself.
///
+/// Does not discard the delimiter from the `Reader`.
+///
/// Returns number of bytes streamed, which may be zero, or error.EndOfStream
/// if the delimiter was not found.
///
@@ -899,6 +932,8 @@ pub const StreamDelimiterLimitError = error{
/// Appends to `w` contents by reading from the stream until `delimiter` is found.
/// Does not write the delimiter itself.
///
+/// Does not discard the delimiter from the `Reader`.
+///
/// Returns number of bytes streamed, which may be zero. End of stream can be
/// detected by checking if the next byte in the stream is the delimiter.
///
@@ -1128,7 +1163,11 @@ pub inline fn takeStruct(r: *Reader, comptime T: type, endian: std.builtin.Endia
.@"struct" => |info| switch (info.layout) {
.auto => @compileError("ill-defined memory layout"),
.@"extern" => {
- var res = (try r.takeStructPointer(T)).*;
+ // This code works around https://github.com/ziglang/zig/issues/25067
+ // by avoiding a call to `peekStructPointer`.
+ const struct_bytes = try r.takeArray(@sizeOf(T));
+ var res: T = undefined;
+ @memcpy(@as([]u8, @ptrCast(&res)), struct_bytes);
if (native_endian != endian) std.mem.byteSwapAllFields(T, &res);
return res;
},
@@ -1153,7 +1192,11 @@ pub inline fn peekStruct(r: *Reader, comptime T: type, endian: std.builtin.Endia
.@"struct" => |info| switch (info.layout) {
.auto => @compileError("ill-defined memory layout"),
.@"extern" => {
- var res = (try r.peekStructPointer(T)).*;
+ // This code works around https://github.com/ziglang/zig/issues/25067
+ // by avoiding a call to `peekStructPointer`.
+ const struct_bytes = try r.peekArray(@sizeOf(T));
+ var res: T = undefined;
+ @memcpy(@as([]u8, @ptrCast(&res)), struct_bytes);
if (native_endian != endian) std.mem.byteSwapAllFields(T, &res);
return res;
},
diff --git a/lib/std/Io/Reader/test.zig b/lib/std/Io/Reader/test.zig
deleted file mode 100644
index 90f9539ae8..0000000000
--- a/lib/std/Io/Reader/test.zig
+++ /dev/null
@@ -1,351 +0,0 @@
-const builtin = @import("builtin");
-const std = @import("../../std.zig");
-const testing = std.testing;
-
-test "Reader" {
- var buf = "a\x02".*;
- var fis = std.io.fixedBufferStream(&buf);
- const reader = fis.reader();
- try testing.expect((try reader.readByte()) == 'a');
- try testing.expect((try reader.readEnum(enum(u8) {
- a = 0,
- b = 99,
- c = 2,
- d = 3,
- }, builtin.cpu.arch.endian())) == .c);
- try testing.expectError(error.EndOfStream, reader.readByte());
-}
-
-test "isBytes" {
- var fis = std.io.fixedBufferStream("foobar");
- const reader = fis.reader();
- try testing.expectEqual(true, try reader.isBytes("foo"));
- try testing.expectEqual(false, try reader.isBytes("qux"));
-}
-
-test "skipBytes" {
- var fis = std.io.fixedBufferStream("foobar");
- const reader = fis.reader();
- try reader.skipBytes(3, .{});
- try testing.expect(try reader.isBytes("bar"));
- try reader.skipBytes(0, .{});
- try testing.expectError(error.EndOfStream, reader.skipBytes(1, .{}));
-}
-
-test "readUntilDelimiterArrayList returns ArrayLists with bytes read until the delimiter, then EndOfStream" {
- const a = std.testing.allocator;
- var list = std.array_list.Managed(u8).init(a);
- defer list.deinit();
-
- var fis = std.io.fixedBufferStream("0000\n1234\n");
- const reader = fis.reader();
-
- try reader.readUntilDelimiterArrayList(&list, '\n', 5);
- try std.testing.expectEqualStrings("0000", list.items);
- try reader.readUntilDelimiterArrayList(&list, '\n', 5);
- try std.testing.expectEqualStrings("1234", list.items);
- try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiterArrayList(&list, '\n', 5));
-}
-
-test "readUntilDelimiterArrayList returns an empty ArrayList" {
- const a = std.testing.allocator;
- var list = std.array_list.Managed(u8).init(a);
- defer list.deinit();
-
- var fis = std.io.fixedBufferStream("\n");
- const reader = fis.reader();
-
- try reader.readUntilDelimiterArrayList(&list, '\n', 5);
- try std.testing.expectEqualStrings("", list.items);
-}
-
-test "readUntilDelimiterArrayList returns StreamTooLong, then an ArrayList with bytes read until the delimiter" {
- const a = std.testing.allocator;
- var list = std.array_list.Managed(u8).init(a);
- defer list.deinit();
-
- var fis = std.io.fixedBufferStream("1234567\n");
- const reader = fis.reader();
-
- try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterArrayList(&list, '\n', 5));
- try std.testing.expectEqualStrings("12345", list.items);
- try reader.readUntilDelimiterArrayList(&list, '\n', 5);
- try std.testing.expectEqualStrings("67", list.items);
-}
-
-test "readUntilDelimiterArrayList returns EndOfStream" {
- const a = std.testing.allocator;
- var list = std.array_list.Managed(u8).init(a);
- defer list.deinit();
-
- var fis = std.io.fixedBufferStream("1234");
- const reader = fis.reader();
-
- try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiterArrayList(&list, '\n', 5));
- try std.testing.expectEqualStrings("1234", list.items);
-}
-
-test "readUntilDelimiterAlloc returns ArrayLists with bytes read until the delimiter, then EndOfStream" {
- const a = std.testing.allocator;
-
- var fis = std.io.fixedBufferStream("0000\n1234\n");
- const reader = fis.reader();
-
- {
- const result = try reader.readUntilDelimiterAlloc(a, '\n', 5);
- defer a.free(result);
- try std.testing.expectEqualStrings("0000", result);
- }
-
- {
- const result = try reader.readUntilDelimiterAlloc(a, '\n', 5);
- defer a.free(result);
- try std.testing.expectEqualStrings("1234", result);
- }
-
- try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiterAlloc(a, '\n', 5));
-}
-
-test "readUntilDelimiterAlloc returns an empty ArrayList" {
- const a = std.testing.allocator;
-
- var fis = std.io.fixedBufferStream("\n");
- const reader = fis.reader();
-
- {
- const result = try reader.readUntilDelimiterAlloc(a, '\n', 5);
- defer a.free(result);
- try std.testing.expectEqualStrings("", result);
- }
-}
-
-test "readUntilDelimiterAlloc returns StreamTooLong, then an ArrayList with bytes read until the delimiter" {
- const a = std.testing.allocator;
-
- var fis = std.io.fixedBufferStream("1234567\n");
- const reader = fis.reader();
-
- try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterAlloc(a, '\n', 5));
-
- const result = try reader.readUntilDelimiterAlloc(a, '\n', 5);
- defer a.free(result);
- try std.testing.expectEqualStrings("67", result);
-}
-
-test "readUntilDelimiterAlloc returns EndOfStream" {
- const a = std.testing.allocator;
-
- var fis = std.io.fixedBufferStream("1234");
- const reader = fis.reader();
-
- try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiterAlloc(a, '\n', 5));
-}
-
-test "readUntilDelimiter returns bytes read until the delimiter" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("0000\n1234\n");
- const reader = fis.reader();
- try std.testing.expectEqualStrings("0000", try reader.readUntilDelimiter(&buf, '\n'));
- try std.testing.expectEqualStrings("1234", try reader.readUntilDelimiter(&buf, '\n'));
-}
-
-test "readUntilDelimiter returns an empty string" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("\n");
- const reader = fis.reader();
- try std.testing.expectEqualStrings("", try reader.readUntilDelimiter(&buf, '\n'));
-}
-
-test "readUntilDelimiter returns StreamTooLong, then an empty string" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("12345\n");
- const reader = fis.reader();
- try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiter(&buf, '\n'));
- try std.testing.expectEqualStrings("", try reader.readUntilDelimiter(&buf, '\n'));
-}
-
-test "readUntilDelimiter returns StreamTooLong, then bytes read until the delimiter" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("1234567\n");
- const reader = fis.reader();
- try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiter(&buf, '\n'));
- try std.testing.expectEqualStrings("67", try reader.readUntilDelimiter(&buf, '\n'));
-}
-
-test "readUntilDelimiter returns EndOfStream" {
- {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("");
- const reader = fis.reader();
- try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiter(&buf, '\n'));
- }
- {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("1234");
- const reader = fis.reader();
- try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiter(&buf, '\n'));
- }
-}
-
-test "readUntilDelimiter returns bytes read until delimiter, then EndOfStream" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("1234\n");
- const reader = fis.reader();
- try std.testing.expectEqualStrings("1234", try reader.readUntilDelimiter(&buf, '\n'));
- try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiter(&buf, '\n'));
-}
-
-test "readUntilDelimiter returns StreamTooLong, then EndOfStream" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("12345");
- const reader = fis.reader();
- try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiter(&buf, '\n'));
- try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiter(&buf, '\n'));
-}
-
-test "readUntilDelimiter writes all bytes read to the output buffer" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("0000\n12345");
- const reader = fis.reader();
- _ = try reader.readUntilDelimiter(&buf, '\n');
- try std.testing.expectEqualStrings("0000\n", &buf);
- try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiter(&buf, '\n'));
- try std.testing.expectEqualStrings("12345", &buf);
-}
-
-test "readUntilDelimiterOrEofAlloc returns ArrayLists with bytes read until the delimiter, then EndOfStream" {
- const a = std.testing.allocator;
-
- var fis = std.io.fixedBufferStream("0000\n1234\n");
- const reader = fis.reader();
-
- {
- const result = (try reader.readUntilDelimiterOrEofAlloc(a, '\n', 5)).?;
- defer a.free(result);
- try std.testing.expectEqualStrings("0000", result);
- }
-
- {
- const result = (try reader.readUntilDelimiterOrEofAlloc(a, '\n', 5)).?;
- defer a.free(result);
- try std.testing.expectEqualStrings("1234", result);
- }
-
- try std.testing.expect((try reader.readUntilDelimiterOrEofAlloc(a, '\n', 5)) == null);
-}
-
-test "readUntilDelimiterOrEofAlloc returns an empty ArrayList" {
- const a = std.testing.allocator;
-
- var fis = std.io.fixedBufferStream("\n");
- const reader = fis.reader();
-
- {
- const result = (try reader.readUntilDelimiterOrEofAlloc(a, '\n', 5)).?;
- defer a.free(result);
- try std.testing.expectEqualStrings("", result);
- }
-}
-
-test "readUntilDelimiterOrEofAlloc returns StreamTooLong, then an ArrayList with bytes read until the delimiter" {
- const a = std.testing.allocator;
-
- var fis = std.io.fixedBufferStream("1234567\n");
- const reader = fis.reader();
-
- try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterOrEofAlloc(a, '\n', 5));
-
- const result = (try reader.readUntilDelimiterOrEofAlloc(a, '\n', 5)).?;
- defer a.free(result);
- try std.testing.expectEqualStrings("67", result);
-}
-
-test "readUntilDelimiterOrEof returns bytes read until the delimiter" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("0000\n1234\n");
- const reader = fis.reader();
- try std.testing.expectEqualStrings("0000", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
- try std.testing.expectEqualStrings("1234", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
-}
-
-test "readUntilDelimiterOrEof returns an empty string" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("\n");
- const reader = fis.reader();
- try std.testing.expectEqualStrings("", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
-}
-
-test "readUntilDelimiterOrEof returns StreamTooLong, then an empty string" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("12345\n");
- const reader = fis.reader();
- try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterOrEof(&buf, '\n'));
- try std.testing.expectEqualStrings("", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
-}
-
-test "readUntilDelimiterOrEof returns StreamTooLong, then bytes read until the delimiter" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("1234567\n");
- const reader = fis.reader();
- try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterOrEof(&buf, '\n'));
- try std.testing.expectEqualStrings("67", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
-}
-
-test "readUntilDelimiterOrEof returns null" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("");
- const reader = fis.reader();
- try std.testing.expect((try reader.readUntilDelimiterOrEof(&buf, '\n')) == null);
-}
-
-test "readUntilDelimiterOrEof returns bytes read until delimiter, then null" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("1234\n");
- const reader = fis.reader();
- try std.testing.expectEqualStrings("1234", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
- try std.testing.expect((try reader.readUntilDelimiterOrEof(&buf, '\n')) == null);
-}
-
-test "readUntilDelimiterOrEof returns bytes read until end-of-stream" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("1234");
- const reader = fis.reader();
- try std.testing.expectEqualStrings("1234", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
-}
-
-test "readUntilDelimiterOrEof returns StreamTooLong, then bytes read until end-of-stream" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("1234567");
- const reader = fis.reader();
- try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterOrEof(&buf, '\n'));
- try std.testing.expectEqualStrings("67", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
-}
-
-test "readUntilDelimiterOrEof writes all bytes read to the output buffer" {
- var buf: [5]u8 = undefined;
- var fis = std.io.fixedBufferStream("0000\n12345");
- const reader = fis.reader();
- _ = try reader.readUntilDelimiterOrEof(&buf, '\n');
- try std.testing.expectEqualStrings("0000\n", &buf);
- try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterOrEof(&buf, '\n'));
- try std.testing.expectEqualStrings("12345", &buf);
-}
-
-test "streamUntilDelimiter writes all bytes without delimiter to the output" {
- const input_string = "some_string_with_delimiter!";
- var input_fbs = std.io.fixedBufferStream(input_string);
- const reader = input_fbs.reader();
-
- var output: [input_string.len]u8 = undefined;
- var output_fbs = std.io.fixedBufferStream(&output);
- const writer = output_fbs.writer();
-
- try reader.streamUntilDelimiter(writer, '!', input_fbs.buffer.len);
- try std.testing.expectEqualStrings("some_string_with_delimiter", output_fbs.getWritten());
- try std.testing.expectError(error.EndOfStream, reader.streamUntilDelimiter(writer, '!', input_fbs.buffer.len));
-
- input_fbs.reset();
- output_fbs.reset();
-
- try std.testing.expectError(error.StreamTooLong, reader.streamUntilDelimiter(writer, '!', 5));
-}
diff --git a/lib/std/Io/Writer.zig b/lib/std/Io/Writer.zig
index 707ed9cb94..384faff62e 100644
--- a/lib/std/Io/Writer.zig
+++ b/lib/std/Io/Writer.zig
@@ -8,6 +8,7 @@ const Limit = std.Io.Limit;
const File = std.fs.File;
const testing = std.testing;
const Allocator = std.mem.Allocator;
+const ArrayList = std.ArrayList;
vtable: *const VTable,
/// If this has length zero, the writer is unbuffered, and `flush` is a no-op.
@@ -2374,6 +2375,29 @@ pub fn unreachableRebase(w: *Writer, preserve: usize, capacity: usize) Error!voi
unreachable;
}
+pub fn fromArrayList(array_list: *ArrayList(u8)) Writer {
+ defer array_list.* = .empty;
+ return .{
+ .vtable = &.{
+ .drain = fixedDrain,
+ .flush = noopFlush,
+ .rebase = failingRebase,
+ },
+ .buffer = array_list.allocatedSlice(),
+ .end = array_list.items.len,
+ };
+}
+
+pub fn toArrayList(w: *Writer) ArrayList(u8) {
+ const result: ArrayList(u8) = .{
+ .items = w.buffer[0..w.end],
+ .capacity = w.buffer.len,
+ };
+ w.buffer = &.{};
+ w.end = 0;
+ return result;
+}
+
/// Provides a `Writer` implementation based on calling `Hasher.update`, sending
/// all data also to an underlying `Writer`.
///
@@ -2546,7 +2570,7 @@ pub const Allocating = struct {
}
/// Replaces `array_list` with empty, taking ownership of the memory.
- pub fn fromArrayList(allocator: Allocator, array_list: *std.ArrayListUnmanaged(u8)) Allocating {
+ pub fn fromArrayList(allocator: Allocator, array_list: *ArrayList(u8)) Allocating {
defer array_list.* = .empty;
return .{
.allocator = allocator,
@@ -2572,9 +2596,9 @@ pub const Allocating = struct {
/// Returns an array list that takes ownership of the allocated memory.
/// Resets the `Allocating` to an empty state.
- pub fn toArrayList(a: *Allocating) std.ArrayListUnmanaged(u8) {
+ pub fn toArrayList(a: *Allocating) ArrayList(u8) {
const w = &a.writer;
- const result: std.ArrayListUnmanaged(u8) = .{
+ const result: ArrayList(u8) = .{
.items = w.buffer[0..w.end],
.capacity = w.buffer.len,
};
@@ -2603,7 +2627,7 @@ pub const Allocating = struct {
pub fn toOwnedSliceSentinel(a: *Allocating, comptime sentinel: u8) error{OutOfMemory}![:sentinel]u8 {
const gpa = a.allocator;
- var list = toArrayList(a);
+ var list = @This().toArrayList(a);
defer a.setArrayList(list);
return list.toOwnedSliceSentinel(gpa, sentinel);
}
@@ -2670,7 +2694,7 @@ pub const Allocating = struct {
list.ensureUnusedCapacity(gpa, minimum_len) catch return error.WriteFailed;
}
- fn setArrayList(a: *Allocating, list: std.ArrayListUnmanaged(u8)) void {
+ fn setArrayList(a: *Allocating, list: ArrayList(u8)) void {
a.writer.buffer = list.allocatedSlice();
a.writer.end = list.items.len;
}
diff --git a/lib/std/Io/fixed_buffer_stream.zig b/lib/std/Io/fixed_buffer_stream.zig
index c284b9baf4..a960f21ce6 100644
--- a/lib/std/Io/fixed_buffer_stream.zig
+++ b/lib/std/Io/fixed_buffer_stream.zig
@@ -17,7 +17,6 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
pub const GetSeekPosError = error{};
pub const Reader = io.GenericReader(*Self, ReadError, read);
- pub const Writer = io.GenericWriter(*Self, WriteError, write);
const Self = @This();
@@ -25,10 +24,6 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
return .{ .context = self };
}
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
-
pub fn read(self: *Self, dest: []u8) ReadError!usize {
const size = @min(dest.len, self.buffer.len - self.pos);
const end = self.pos + size;
@@ -39,23 +34,6 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
return size;
}
- /// If the returned number of bytes written is less than requested, the
- /// buffer is full. Returns `error.NoSpaceLeft` when no bytes would be written.
- /// Note: `error.NoSpaceLeft` matches the corresponding error from
- /// `std.fs.File.WriteError`.
- pub fn write(self: *Self, bytes: []const u8) WriteError!usize {
- if (bytes.len == 0) return 0;
- if (self.pos >= self.buffer.len) return error.NoSpaceLeft;
-
- const n = @min(self.buffer.len - self.pos, bytes.len);
- @memcpy(self.buffer[self.pos..][0..n], bytes[0..n]);
- self.pos += n;
-
- if (n == 0) return error.NoSpaceLeft;
-
- return n;
- }
-
pub fn seekTo(self: *Self, pos: u64) SeekError!void {
self.pos = @min(std.math.lossyCast(usize, pos), self.buffer.len);
}
@@ -84,10 +62,6 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
return self.pos;
}
- pub fn getWritten(self: Self) Buffer {
- return self.buffer[0..self.pos];
- }
-
pub fn reset(self: *Self) void {
self.pos = 0;
}
@@ -117,49 +91,6 @@ fn Slice(comptime T: type) type {
}
}
-test "output" {
- var buf: [255]u8 = undefined;
- var fbs = fixedBufferStream(&buf);
- const stream = fbs.writer();
-
- try stream.print("{s}{s}!", .{ "Hello", "World" });
- try testing.expectEqualSlices(u8, "HelloWorld!", fbs.getWritten());
-}
-
-test "output at comptime" {
- comptime {
- var buf: [255]u8 = undefined;
- var fbs = fixedBufferStream(&buf);
- const stream = fbs.writer();
-
- try stream.print("{s}{s}!", .{ "Hello", "World" });
- try testing.expectEqualSlices(u8, "HelloWorld!", fbs.getWritten());
- }
-}
-
-test "output 2" {
- var buffer: [10]u8 = undefined;
- var fbs = fixedBufferStream(&buffer);
-
- try fbs.writer().writeAll("Hello");
- try testing.expect(mem.eql(u8, fbs.getWritten(), "Hello"));
-
- try fbs.writer().writeAll("world");
- try testing.expect(mem.eql(u8, fbs.getWritten(), "Helloworld"));
-
- try testing.expectError(error.NoSpaceLeft, fbs.writer().writeAll("!"));
- try testing.expect(mem.eql(u8, fbs.getWritten(), "Helloworld"));
-
- fbs.reset();
- try testing.expect(fbs.getWritten().len == 0);
-
- try testing.expectError(error.NoSpaceLeft, fbs.writer().writeAll("Hello world!"));
- try testing.expect(mem.eql(u8, fbs.getWritten(), "Hello worl"));
-
- try fbs.seekTo((try fbs.getEndPos()) + 1);
- try testing.expectError(error.NoSpaceLeft, fbs.writer().writeAll("H"));
-}
-
test "input" {
const bytes = [_]u8{ 1, 2, 3, 4, 5, 6, 7 };
var fbs = fixedBufferStream(&bytes);
diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig
index e6d1c79d01..5cf6b173fb 100644
--- a/lib/std/Thread.zig
+++ b/lib/std/Thread.zig
@@ -167,7 +167,7 @@ pub fn setName(self: Thread, name: []const u8) SetNameError!void {
const file = try std.fs.cwd().openFile(path, .{ .mode = .write_only });
defer file.close();
- try file.deprecatedWriter().writeAll(name);
+ try file.writeAll(name);
return;
},
.windows => {
diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig
index 486deffafd..a0b7614d20 100644
--- a/lib/std/array_list.zig
+++ b/lib/std/array_list.zig
@@ -336,39 +336,6 @@ pub fn AlignedManaged(comptime T: type, comptime alignment: ?mem.Alignment) type
try unmanaged.print(gpa, fmt, args);
}
- pub const Writer = if (T != u8) void else std.io.GenericWriter(*Self, Allocator.Error, appendWrite);
-
- /// Initializes a Writer which will append to the list.
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
-
- /// Same as `append` except it returns the number of bytes written, which is always the same
- /// as `m.len`. The purpose of this function existing is to match `std.io.GenericWriter` API.
- /// Invalidates element pointers if additional memory is needed.
- fn appendWrite(self: *Self, m: []const u8) Allocator.Error!usize {
- try self.appendSlice(m);
- return m.len;
- }
-
- pub const FixedWriter = std.io.GenericWriter(*Self, Allocator.Error, appendWriteFixed);
-
- /// Initializes a Writer which will append to the list but will return
- /// `error.OutOfMemory` rather than increasing capacity.
- pub fn fixedWriter(self: *Self) FixedWriter {
- return .{ .context = self };
- }
-
- /// The purpose of this function existing is to match `std.io.GenericWriter` API.
- fn appendWriteFixed(self: *Self, m: []const u8) error{OutOfMemory}!usize {
- const available_capacity = self.capacity - self.items.len;
- if (m.len > available_capacity)
- return error.OutOfMemory;
-
- self.appendSliceAssumeCapacity(m);
- return m.len;
- }
-
/// Append a value to the list `n` times.
/// Allocates more memory as necessary.
/// Invalidates element pointers if additional memory is needed.
@@ -1083,48 +1050,6 @@ pub fn Aligned(comptime T: type, comptime alignment: ?mem.Alignment) type {
self.items.len += w.end;
}
- /// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
- pub const WriterContext = struct {
- self: *Self,
- allocator: Allocator,
- };
-
- /// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
- pub const Writer = if (T != u8)
- @compileError("The Writer interface is only defined for ArrayList(u8) " ++
- "but the given type is ArrayList(" ++ @typeName(T) ++ ")")
- else
- std.io.GenericWriter(WriterContext, Allocator.Error, appendWrite);
-
- /// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
- pub fn writer(self: *Self, gpa: Allocator) Writer {
- return .{ .context = .{ .self = self, .allocator = gpa } };
- }
-
- /// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
- fn appendWrite(context: WriterContext, m: []const u8) Allocator.Error!usize {
- try context.self.appendSlice(context.allocator, m);
- return m.len;
- }
-
- /// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
- pub const FixedWriter = std.io.GenericWriter(*Self, Allocator.Error, appendWriteFixed);
-
- /// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
- pub fn fixedWriter(self: *Self) FixedWriter {
- return .{ .context = self };
- }
-
- /// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
- fn appendWriteFixed(self: *Self, m: []const u8) error{OutOfMemory}!usize {
- const available_capacity = self.capacity - self.items.len;
- if (m.len > available_capacity)
- return error.OutOfMemory;
-
- self.appendSliceAssumeCapacity(m);
- return m.len;
- }
-
/// Append a value to the list `n` times.
/// Allocates more memory as necessary.
/// Invalidates element pointers if additional memory is needed.
@@ -2116,60 +2041,6 @@ test "Managed(T) of struct T" {
}
}
-test "Managed(u8) implements writer" {
- const a = testing.allocator;
-
- {
- var buffer = Managed(u8).init(a);
- defer buffer.deinit();
-
- const x: i32 = 42;
- const y: i32 = 1234;
- try buffer.writer().print("x: {}\ny: {}\n", .{ x, y });
-
- try testing.expectEqualSlices(u8, "x: 42\ny: 1234\n", buffer.items);
- }
- {
- var list = AlignedManaged(u8, .@"2").init(a);
- defer list.deinit();
-
- const writer = list.writer();
- try writer.writeAll("a");
- try writer.writeAll("bc");
- try writer.writeAll("d");
- try writer.writeAll("efg");
-
- try testing.expectEqualSlices(u8, list.items, "abcdefg");
- }
-}
-
-test "ArrayList(u8) implements writer" {
- const a = testing.allocator;
-
- {
- var buffer: ArrayList(u8) = .empty;
- defer buffer.deinit(a);
-
- const x: i32 = 42;
- const y: i32 = 1234;
- try buffer.writer(a).print("x: {}\ny: {}\n", .{ x, y });
-
- try testing.expectEqualSlices(u8, "x: 42\ny: 1234\n", buffer.items);
- }
- {
- var list: Aligned(u8, .@"2") = .empty;
- defer list.deinit(a);
-
- const writer = list.writer(a);
- try writer.writeAll("a");
- try writer.writeAll("bc");
- try writer.writeAll("d");
- try writer.writeAll("efg");
-
- try testing.expectEqualSlices(u8, list.items, "abcdefg");
- }
-}
-
test "shrink still sets length when resizing is disabled" {
var failing_allocator = testing.FailingAllocator.init(testing.allocator, .{ .resize_fail_index = 0 });
const a = failing_allocator.allocator();
diff --git a/lib/std/base64.zig b/lib/std/base64.zig
index 15e48b5c51..8c08fd6786 100644
--- a/lib/std/base64.zig
+++ b/lib/std/base64.zig
@@ -108,8 +108,7 @@ pub const Base64Encoder = struct {
}
}
- // dest must be compatible with std.io.GenericWriter's writeAll interface
- pub fn encodeWriter(encoder: *const Base64Encoder, dest: anytype, source: []const u8) !void {
+ pub fn encodeWriter(encoder: *const Base64Encoder, dest: *std.Io.Writer, source: []const u8) !void {
var chunker = window(u8, source, 3, 3);
while (chunker.next()) |chunk| {
var temp: [5]u8 = undefined;
diff --git a/lib/std/crypto/aegis.zig b/lib/std/crypto/aegis.zig
index 1065c4fc21..5c81b9640c 100644
--- a/lib/std/crypto/aegis.zig
+++ b/lib/std/crypto/aegis.zig
@@ -801,18 +801,6 @@ fn AegisMac(comptime T: type) type {
ctx.update(msg);
ctx.final(out);
}
-
- pub const Error = error{};
- pub const Writer = std.io.GenericWriter(*Mac, Error, write);
-
- fn write(self: *Mac, bytes: []const u8) Error!usize {
- self.update(bytes);
- return bytes.len;
- }
-
- pub fn writer(self: *Mac) Writer {
- return .{ .context = self };
- }
};
}
diff --git a/lib/std/crypto/blake2.zig b/lib/std/crypto/blake2.zig
index cd40978cf3..28754b4cae 100644
--- a/lib/std/crypto/blake2.zig
+++ b/lib/std/crypto/blake2.zig
@@ -185,18 +185,6 @@ pub fn Blake2s(comptime out_bits: usize) type {
r.* ^= v[i] ^ v[i + 8];
}
}
-
- pub const Error = error{};
- pub const Writer = std.io.GenericWriter(*Self, Error, write);
-
- fn write(self: *Self, bytes: []const u8) Error!usize {
- self.update(bytes);
- return bytes.len;
- }
-
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
};
}
diff --git a/lib/std/crypto/blake3.zig b/lib/std/crypto/blake3.zig
index bc771524e8..a840a30632 100644
--- a/lib/std/crypto/blake3.zig
+++ b/lib/std/crypto/blake3.zig
@@ -474,18 +474,6 @@ pub const Blake3 = struct {
}
output.rootOutputBytes(out_slice);
}
-
- pub const Error = error{};
- pub const Writer = std.io.GenericWriter(*Blake3, Error, write);
-
- fn write(self: *Blake3, bytes: []const u8) Error!usize {
- self.update(bytes);
- return bytes.len;
- }
-
- pub fn writer(self: *Blake3) Writer {
- return .{ .context = self };
- }
};
// Use named type declarations to workaround crash with anonymous structs (issue #4373).
diff --git a/lib/std/crypto/codecs/asn1/der/ArrayListReverse.zig b/lib/std/crypto/codecs/asn1/der/ArrayListReverse.zig
index b6c0ab20d4..b761a93345 100644
--- a/lib/std/crypto/codecs/asn1/der/ArrayListReverse.zig
+++ b/lib/std/crypto/codecs/asn1/der/ArrayListReverse.zig
@@ -4,6 +4,12 @@
//! Laid out in memory like:
//! capacity |--------------------------|
//! data |-------------|
+
+const std = @import("std");
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+const testing = std.testing;
+
data: []u8,
capacity: usize,
allocator: Allocator,
@@ -45,12 +51,6 @@ pub fn prependSlice(self: *ArrayListReverse, data: []const u8) Error!void {
self.data.ptr = begin;
}
-pub const Writer = std.io.GenericWriter(*ArrayListReverse, Error, prependSliceSize);
-/// Warning: This writer writes backwards. `fn print` will NOT work as expected.
-pub fn writer(self: *ArrayListReverse) Writer {
- return .{ .context = self };
-}
-
fn prependSliceSize(self: *ArrayListReverse, data: []const u8) Error!usize {
try self.prependSlice(data);
return data.len;
@@ -77,11 +77,6 @@ pub fn toOwnedSlice(self: *ArrayListReverse) Error![]u8 {
return new_memory;
}
-const std = @import("std");
-const Allocator = std.mem.Allocator;
-const assert = std.debug.assert;
-const testing = std.testing;
-
test ArrayListReverse {
var b = ArrayListReverse.init(testing.allocator);
defer b.deinit();
diff --git a/lib/std/crypto/ml_kem.zig b/lib/std/crypto/ml_kem.zig
index ce3edf9eb5..c7ad23d3e2 100644
--- a/lib/std/crypto/ml_kem.zig
+++ b/lib/std/crypto/ml_kem.zig
@@ -1721,53 +1721,55 @@ test "Test happy flow" {
// Code to test NIST Known Answer Tests (KAT), see PQCgenKAT.c.
-const sha2 = crypto.hash.sha2;
-
-test "NIST KAT test" {
- inline for (.{
- .{ d00.Kyber512, "e9c2bd37133fcb40772f81559f14b1f58dccd1c816701be9ba6214d43baf4547" },
- .{ d00.Kyber1024, "89248f2f33f7f4f7051729111f3049c409a933ec904aedadf035f30fa5646cd5" },
- .{ d00.Kyber768, "a1e122cad3c24bc51622e4c242d8b8acbcd3f618fee4220400605ca8f9ea02c2" },
- }) |modeHash| {
- const mode = modeHash[0];
- var seed: [48]u8 = undefined;
- for (&seed, 0..) |*s, i| {
- s.* = @as(u8, @intCast(i));
- }
- var f = sha2.Sha256.init(.{});
- const fw = f.writer();
- var g = NistDRBG.init(seed);
- try std.fmt.format(fw, "# {s}\n\n", .{mode.name});
- for (0..100) |i| {
- g.fill(&seed);
- try std.fmt.format(fw, "count = {}\n", .{i});
- try std.fmt.format(fw, "seed = {X}\n", .{&seed});
- var g2 = NistDRBG.init(seed);
-
- // This is not equivalent to g2.fill(kseed[:]). As the reference
- // implementation calls randombytes twice generating the keypair,
- // we have to do that as well.
- var kseed: [64]u8 = undefined;
- var eseed: [32]u8 = undefined;
- g2.fill(kseed[0..32]);
- g2.fill(kseed[32..64]);
- g2.fill(&eseed);
- const kp = try mode.KeyPair.generateDeterministic(kseed);
- const e = kp.public_key.encaps(eseed);
- const ss2 = try kp.secret_key.decaps(&e.ciphertext);
- try testing.expectEqual(ss2, e.shared_secret);
- try std.fmt.format(fw, "pk = {X}\n", .{&kp.public_key.toBytes()});
- try std.fmt.format(fw, "sk = {X}\n", .{&kp.secret_key.toBytes()});
- try std.fmt.format(fw, "ct = {X}\n", .{&e.ciphertext});
- try std.fmt.format(fw, "ss = {X}\n\n", .{&e.shared_secret});
- }
+test "NIST KAT test d00.Kyber512" {
+ try testNistKat(d00.Kyber512, "e9c2bd37133fcb40772f81559f14b1f58dccd1c816701be9ba6214d43baf4547");
+}
- var out: [32]u8 = undefined;
- f.final(&out);
- var outHex: [64]u8 = undefined;
- _ = try std.fmt.bufPrint(&outHex, "{x}", .{&out});
- try testing.expectEqual(outHex, modeHash[1].*);
+test "NIST KAT test d00.Kyber1024" {
+ try testNistKat(d00.Kyber1024, "89248f2f33f7f4f7051729111f3049c409a933ec904aedadf035f30fa5646cd5");
+}
+
+test "NIST KAT test d00.Kyber768" {
+ try testNistKat(d00.Kyber768, "a1e122cad3c24bc51622e4c242d8b8acbcd3f618fee4220400605ca8f9ea02c2");
+}
+
+fn testNistKat(mode: type, hash: []const u8) !void {
+ var seed: [48]u8 = undefined;
+ for (&seed, 0..) |*s, i| {
+ s.* = @as(u8, @intCast(i));
}
+ var fw: std.Io.Writer.Hashing(crypto.hash.sha2.Sha256) = .init(&.{});
+ var g = NistDRBG.init(seed);
+ try fw.writer.print("# {s}\n\n", .{mode.name});
+ for (0..100) |i| {
+ g.fill(&seed);
+ try fw.writer.print("count = {}\n", .{i});
+ try fw.writer.print("seed = {X}\n", .{&seed});
+ var g2 = NistDRBG.init(seed);
+
+ // This is not equivalent to g2.fill(kseed[:]). As the reference
+ // implementation calls randombytes twice generating the keypair,
+ // we have to do that as well.
+ var kseed: [64]u8 = undefined;
+ var eseed: [32]u8 = undefined;
+ g2.fill(kseed[0..32]);
+ g2.fill(kseed[32..64]);
+ g2.fill(&eseed);
+ const kp = try mode.KeyPair.generateDeterministic(kseed);
+ const e = kp.public_key.encaps(eseed);
+ const ss2 = try kp.secret_key.decaps(&e.ciphertext);
+ try testing.expectEqual(ss2, e.shared_secret);
+ try fw.writer.print("pk = {X}\n", .{&kp.public_key.toBytes()});
+ try fw.writer.print("sk = {X}\n", .{&kp.secret_key.toBytes()});
+ try fw.writer.print("ct = {X}\n", .{&e.ciphertext});
+ try fw.writer.print("ss = {X}\n\n", .{&e.shared_secret});
+ }
+
+ var out: [32]u8 = undefined;
+ fw.hasher.final(&out);
+ var outHex: [64]u8 = undefined;
+ _ = try std.fmt.bufPrint(&outHex, "{x}", .{&out});
+ try testing.expectEqualStrings(&outHex, hash);
}
const NistDRBG = struct {
diff --git a/lib/std/crypto/scrypt.zig b/lib/std/crypto/scrypt.zig
index f12306a1dc..122da1bb45 100644
--- a/lib/std/crypto/scrypt.zig
+++ b/lib/std/crypto/scrypt.zig
@@ -304,31 +304,34 @@ const crypt_format = struct {
/// Serialize parameters into a string in modular crypt format.
pub fn serialize(params: anytype, str: []u8) EncodingError![]const u8 {
- var buf = io.fixedBufferStream(str);
- try serializeTo(params, buf.writer());
- return buf.getWritten();
+ var w: std.Io.Writer = .fixed(str);
+ serializeTo(params, &w) catch |err| switch (err) {
+ error.WriteFailed => return error.NoSpaceLeft,
+ else => |e| return e,
+ };
+ return w.buffered();
}
/// Compute the number of bytes required to serialize `params`
pub fn calcSize(params: anytype) usize {
var trash: [128]u8 = undefined;
var d: std.Io.Writer.Discarding = .init(&trash);
- serializeTo(params, &d) catch unreachable;
+ serializeTo(params, &d.writer) catch unreachable;
return @intCast(d.fullCount());
}
- fn serializeTo(params: anytype, out: anytype) !void {
+ fn serializeTo(params: anytype, w: *std.Io.Writer) !void {
var header: [14]u8 = undefined;
header[0..3].* = prefix.*;
Codec.intEncode(header[3..4], params.ln);
Codec.intEncode(header[4..9], params.r);
Codec.intEncode(header[9..14], params.p);
- try out.writeAll(&header);
- try out.writeAll(params.salt);
- try out.writeAll("$");
+ try w.writeAll(&header);
+ try w.writeAll(params.salt);
+ try w.writeAll("$");
var buf: [@TypeOf(params.hash).max_encoded_length]u8 = undefined;
const hash_str = try params.hash.toB64(&buf);
- try out.writeAll(hash_str);
+ try w.writeAll(hash_str);
}
/// Custom codec that maps 6 bits into 8 like regular Base64, but uses its own alphabet,
diff --git a/lib/std/crypto/sha2.zig b/lib/std/crypto/sha2.zig
index 1abc2b0edc..d32f15f57b 100644
--- a/lib/std/crypto/sha2.zig
+++ b/lib/std/crypto/sha2.zig
@@ -373,18 +373,6 @@ fn Sha2x32(comptime iv: Iv32, digest_bits: comptime_int) type {
for (&d.s, v) |*dv, vv| dv.* +%= vv;
}
-
- pub const Error = error{};
- pub const Writer = std.io.GenericWriter(*Self, Error, write);
-
- fn write(self: *Self, bytes: []const u8) Error!usize {
- self.update(bytes);
- return bytes.len;
- }
-
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
};
}
diff --git a/lib/std/crypto/sha3.zig b/lib/std/crypto/sha3.zig
index 78c3ff5527..84cd0c2b0e 100644
--- a/lib/std/crypto/sha3.zig
+++ b/lib/std/crypto/sha3.zig
@@ -80,18 +80,6 @@ pub fn Keccak(comptime f: u11, comptime output_bits: u11, comptime default_delim
self.st.pad();
self.st.squeeze(out[0..]);
}
-
- pub const Error = error{};
- pub const Writer = std.io.GenericWriter(*Self, Error, write);
-
- fn write(self: *Self, bytes: []const u8) Error!usize {
- self.update(bytes);
- return bytes.len;
- }
-
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
};
}
@@ -191,18 +179,6 @@ fn ShakeLike(comptime security_level: u11, comptime default_delim: u8, comptime
pub fn fillBlock(self: *Self) void {
self.st.fillBlock();
}
-
- pub const Error = error{};
- pub const Writer = std.io.GenericWriter(*Self, Error, write);
-
- fn write(self: *Self, bytes: []const u8) Error!usize {
- self.update(bytes);
- return bytes.len;
- }
-
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
};
}
@@ -284,18 +260,6 @@ fn CShakeLike(comptime security_level: u11, comptime default_delim: u8, comptime
pub fn fillBlock(self: *Self) void {
self.shaker.fillBlock();
}
-
- pub const Error = error{};
- pub const Writer = std.io.GenericWriter(*Self, Error, write);
-
- fn write(self: *Self, bytes: []const u8) Error!usize {
- self.update(bytes);
- return bytes.len;
- }
-
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
};
}
@@ -390,18 +354,6 @@ fn KMacLike(comptime security_level: u11, comptime default_delim: u8, comptime r
ctx.update(msg);
ctx.final(out);
}
-
- pub const Error = error{};
- pub const Writer = std.io.GenericWriter(*Self, Error, write);
-
- fn write(self: *Self, bytes: []const u8) Error!usize {
- self.update(bytes);
- return bytes.len;
- }
-
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
};
}
@@ -482,18 +434,6 @@ fn TupleHashLike(comptime security_level: u11, comptime default_delim: u8, compt
}
self.cshaker.squeeze(out);
}
-
- pub const Error = error{};
- pub const Writer = std.io.GenericWriter(*Self, Error, write);
-
- fn write(self: *Self, bytes: []const u8) Error!usize {
- self.update(bytes);
- return bytes.len;
- }
-
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
};
}
diff --git a/lib/std/crypto/siphash.zig b/lib/std/crypto/siphash.zig
index 4334a6912d..cf595327c5 100644
--- a/lib/std/crypto/siphash.zig
+++ b/lib/std/crypto/siphash.zig
@@ -238,18 +238,6 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
pub fn toInt(msg: []const u8, key: *const [key_length]u8) T {
return State.hash(msg, key);
}
-
- pub const Error = error{};
- pub const Writer = std.io.GenericWriter(*Self, Error, write);
-
- fn write(self: *Self, bytes: []const u8) Error!usize {
- self.update(bytes);
- return bytes.len;
- }
-
- pub fn writer(self: *Self) Writer {
- return .{ .context = self };
- }
};
}
diff --git a/lib/std/debug/Dwarf/expression.zig b/lib/std/debug/Dwarf/expression.zig
index c123cdb30b..1c849489ad 100644
--- a/lib/std/debug/Dwarf/expression.zig
+++ b/lib/std/debug/Dwarf/expression.zig
@@ -8,6 +8,8 @@ const OP = std.dwarf.OP;
const abi = std.debug.Dwarf.abi;
const mem = std.mem;
const assert = std.debug.assert;
+const testing = std.testing;
+const Writer = std.Io.Writer;
/// Expressions can be evaluated in different contexts, each requiring its own set of inputs.
/// Callers should specify all the fields relevant to their context. If a field is required
@@ -782,7 +784,7 @@ pub fn Builder(comptime options: Options) type {
return struct {
/// Zero-operand instructions
- pub fn writeOpcode(writer: anytype, comptime opcode: u8) !void {
+ pub fn writeOpcode(writer: *Writer, comptime opcode: u8) !void {
if (options.call_frame_context and !comptime isOpcodeValidInCFA(opcode)) return error.InvalidCFAOpcode;
switch (opcode) {
OP.dup,
@@ -823,14 +825,14 @@ pub fn Builder(comptime options: Options) type {
}
// 2.5.1.1: Literal Encodings
- pub fn writeLiteral(writer: anytype, literal: u8) !void {
+ pub fn writeLiteral(writer: *Writer, literal: u8) !void {
switch (literal) {
0...31 => |n| try writer.writeByte(n + OP.lit0),
else => return error.InvalidLiteral,
}
}
- pub fn writeConst(writer: anytype, comptime T: type, value: T) !void {
+ pub fn writeConst(writer: *Writer, comptime T: type, value: T) !void {
if (@typeInfo(T) != .int) @compileError("Constants must be integers");
switch (T) {
@@ -852,7 +854,7 @@ pub fn Builder(comptime options: Options) type {
else => switch (@typeInfo(T).int.signedness) {
.unsigned => {
try writer.writeByte(OP.constu);
- try leb.writeUleb128(writer, value);
+ try writer.writeUleb128(value);
},
.signed => {
try writer.writeByte(OP.consts);
@@ -862,105 +864,105 @@ pub fn Builder(comptime options: Options) type {
}
}
- pub fn writeConstx(writer: anytype, debug_addr_offset: anytype) !void {
+ pub fn writeConstx(writer: *Writer, debug_addr_offset: anytype) !void {
try writer.writeByte(OP.constx);
- try leb.writeUleb128(writer, debug_addr_offset);
+ try writer.writeUleb128(debug_addr_offset);
}
- pub fn writeConstType(writer: anytype, die_offset: anytype, value_bytes: []const u8) !void {
+ pub fn writeConstType(writer: *Writer, die_offset: anytype, value_bytes: []const u8) !void {
if (options.call_frame_context) return error.InvalidCFAOpcode;
if (value_bytes.len > 0xff) return error.InvalidTypeLength;
try writer.writeByte(OP.const_type);
- try leb.writeUleb128(writer, die_offset);
+ try writer.writeUleb128(die_offset);
try writer.writeByte(@intCast(value_bytes.len));
try writer.writeAll(value_bytes);
}
- pub fn writeAddr(writer: anytype, value: addr_type) !void {
+ pub fn writeAddr(writer: *Writer, value: addr_type) !void {
try writer.writeByte(OP.addr);
try writer.writeInt(addr_type, value, options.endian);
}
- pub fn writeAddrx(writer: anytype, debug_addr_offset: anytype) !void {
+ pub fn writeAddrx(writer: *Writer, debug_addr_offset: anytype) !void {
if (options.call_frame_context) return error.InvalidCFAOpcode;
try writer.writeByte(OP.addrx);
- try leb.writeUleb128(writer, debug_addr_offset);
+ try writer.writeUleb128(debug_addr_offset);
}
// 2.5.1.2: Register Values
- pub fn writeFbreg(writer: anytype, offset: anytype) !void {
+ pub fn writeFbreg(writer: *Writer, offset: anytype) !void {
try writer.writeByte(OP.fbreg);
try leb.writeIleb128(writer, offset);
}
- pub fn writeBreg(writer: anytype, register: u8, offset: anytype) !void {
+ pub fn writeBreg(writer: *Writer, register: u8, offset: anytype) !void {
if (register > 31) return error.InvalidRegister;
try writer.writeByte(OP.breg0 + register);
try leb.writeIleb128(writer, offset);
}
- pub fn writeBregx(writer: anytype, register: anytype, offset: anytype) !void {
+ pub fn writeBregx(writer: *Writer, register: anytype, offset: anytype) !void {
try writer.writeByte(OP.bregx);
- try leb.writeUleb128(writer, register);
+ try writer.writeUleb128(register);
try leb.writeIleb128(writer, offset);
}
- pub fn writeRegvalType(writer: anytype, register: anytype, offset: anytype) !void {
+ pub fn writeRegvalType(writer: *Writer, register: anytype, offset: anytype) !void {
if (options.call_frame_context) return error.InvalidCFAOpcode;
try writer.writeByte(OP.regval_type);
- try leb.writeUleb128(writer, register);
- try leb.writeUleb128(writer, offset);
+ try writer.writeUleb128(register);
+ try writer.writeUleb128(offset);
}
// 2.5.1.3: Stack Operations
- pub fn writePick(writer: anytype, index: u8) !void {
+ pub fn writePick(writer: *Writer, index: u8) !void {
try writer.writeByte(OP.pick);
try writer.writeByte(index);
}
- pub fn writeDerefSize(writer: anytype, size: u8) !void {
+ pub fn writeDerefSize(writer: *Writer, size: u8) !void {
try writer.writeByte(OP.deref_size);
try writer.writeByte(size);
}
- pub fn writeXDerefSize(writer: anytype, size: u8) !void {
+ pub fn writeXDerefSize(writer: *Writer, size: u8) !void {
try writer.writeByte(OP.xderef_size);
try writer.writeByte(size);
}
- pub fn writeDerefType(writer: anytype, size: u8, die_offset: anytype) !void {
+ pub fn writeDerefType(writer: *Writer, size: u8, die_offset: anytype) !void {
if (options.call_frame_context) return error.InvalidCFAOpcode;
try writer.writeByte(OP.deref_type);
try writer.writeByte(size);
- try leb.writeUleb128(writer, die_offset);
+ try writer.writeUleb128(die_offset);
}
- pub fn writeXDerefType(writer: anytype, size: u8, die_offset: anytype) !void {
+ pub fn writeXDerefType(writer: *Writer, size: u8, die_offset: anytype) !void {
try writer.writeByte(OP.xderef_type);
try writer.writeByte(size);
- try leb.writeUleb128(writer, die_offset);
+ try writer.writeUleb128(die_offset);
}
// 2.5.1.4: Arithmetic and Logical Operations
- pub fn writePlusUconst(writer: anytype, uint_value: anytype) !void {
+ pub fn writePlusUconst(writer: *Writer, uint_value: anytype) !void {
try writer.writeByte(OP.plus_uconst);
- try leb.writeUleb128(writer, uint_value);
+ try writer.writeUleb128(uint_value);
}
// 2.5.1.5: Control Flow Operations
- pub fn writeSkip(writer: anytype, offset: i16) !void {
+ pub fn writeSkip(writer: *Writer, offset: i16) !void {
try writer.writeByte(OP.skip);
try writer.writeInt(i16, offset, options.endian);
}
- pub fn writeBra(writer: anytype, offset: i16) !void {
+ pub fn writeBra(writer: *Writer, offset: i16) !void {
try writer.writeByte(OP.bra);
try writer.writeInt(i16, offset, options.endian);
}
- pub fn writeCall(writer: anytype, comptime T: type, offset: T) !void {
+ pub fn writeCall(writer: *Writer, comptime T: type, offset: T) !void {
if (options.call_frame_context) return error.InvalidCFAOpcode;
switch (T) {
u16 => try writer.writeByte(OP.call2),
@@ -971,45 +973,45 @@ pub fn Builder(comptime options: Options) type {
try writer.writeInt(T, offset, options.endian);
}
- pub fn writeCallRef(writer: anytype, comptime is_64: bool, value: if (is_64) u64 else u32) !void {
+ pub fn writeCallRef(writer: *Writer, comptime is_64: bool, value: if (is_64) u64 else u32) !void {
if (options.call_frame_context) return error.InvalidCFAOpcode;
try writer.writeByte(OP.call_ref);
try writer.writeInt(if (is_64) u64 else u32, value, options.endian);
}
- pub fn writeConvert(writer: anytype, die_offset: anytype) !void {
+ pub fn writeConvert(writer: *Writer, die_offset: anytype) !void {
if (options.call_frame_context) return error.InvalidCFAOpcode;
try writer.writeByte(OP.convert);
- try leb.writeUleb128(writer, die_offset);
+ try writer.writeUleb128(die_offset);
}
- pub fn writeReinterpret(writer: anytype, die_offset: anytype) !void {
+ pub fn writeReinterpret(writer: *Writer, die_offset: anytype) !void {
if (options.call_frame_context) return error.InvalidCFAOpcode;
try writer.writeByte(OP.reinterpret);
- try leb.writeUleb128(writer, die_offset);
+ try writer.writeUleb128(die_offset);
}
// 2.5.1.7: Special Operations
- pub fn writeEntryValue(writer: anytype, expression: []const u8) !void {
+ pub fn writeEntryValue(writer: *Writer, expression: []const u8) !void {
try writer.writeByte(OP.entry_value);
- try leb.writeUleb128(writer, expression.len);
+ try writer.writeUleb128(expression.len);
try writer.writeAll(expression);
}
// 2.6: Location Descriptions
- pub fn writeReg(writer: anytype, register: u8) !void {
+ pub fn writeReg(writer: *Writer, register: u8) !void {
try writer.writeByte(OP.reg0 + register);
}
- pub fn writeRegx(writer: anytype, register: anytype) !void {
+ pub fn writeRegx(writer: *Writer, register: anytype) !void {
try writer.writeByte(OP.regx);
- try leb.writeUleb128(writer, register);
+ try writer.writeUleb128(register);
}
- pub fn writeImplicitValue(writer: anytype, value_bytes: []const u8) !void {
+ pub fn writeImplicitValue(writer: *Writer, value_bytes: []const u8) !void {
try writer.writeByte(OP.implicit_value);
- try leb.writeUleb128(writer, value_bytes.len);
+ try writer.writeUleb128(value_bytes.len);
try writer.writeAll(value_bytes);
}
};
@@ -1042,8 +1044,7 @@ fn isOpcodeRegisterLocation(opcode: u8) bool {
};
}
-const testing = std.testing;
-test "DWARF expressions" {
+test "basics" {
const allocator = std.testing.allocator;
const options = Options{};
@@ -1052,10 +1053,10 @@ test "DWARF expressions" {
const b = Builder(options);
- var program = std.array_list.Managed(u8).init(allocator);
+ var program: std.Io.Writer.Allocating = .init(allocator);
defer program.deinit();
- const writer = program.writer();
+ const writer = &program.writer;
// Literals
{
@@ -1064,7 +1065,7 @@ test "DWARF expressions" {
try b.writeLiteral(writer, @intCast(i));
}
- _ = try stack_machine.run(program.items, allocator, context, 0);
+ _ = try stack_machine.run(program.written(), allocator, context, 0);
for (0..32) |i| {
const expected = 31 - i;
@@ -1108,16 +1109,16 @@ test "DWARF expressions" {
var mock_compile_unit: std.debug.Dwarf.CompileUnit = undefined;
mock_compile_unit.addr_base = 1;
- var mock_debug_addr = std.array_list.Managed(u8).init(allocator);
+ var mock_debug_addr: std.Io.Writer.Allocating = .init(allocator);
defer mock_debug_addr.deinit();
- try mock_debug_addr.writer().writeInt(u16, 0, native_endian);
- try mock_debug_addr.writer().writeInt(usize, input[11], native_endian);
- try mock_debug_addr.writer().writeInt(usize, input[12], native_endian);
+ try mock_debug_addr.writer.writeInt(u16, 0, native_endian);
+ try mock_debug_addr.writer.writeInt(usize, input[11], native_endian);
+ try mock_debug_addr.writer.writeInt(usize, input[12], native_endian);
- const context = Context{
+ const context: Context = .{
.compile_unit = &mock_compile_unit,
- .debug_addr = mock_debug_addr.items,
+ .debug_addr = mock_debug_addr.written(),
};
try b.writeConstx(writer, @as(usize, 1));
@@ -1127,7 +1128,7 @@ test "DWARF expressions" {
const type_bytes: []const u8 = &.{ 1, 2, 3, 4 };
try b.writeConstType(writer, die_offset, type_bytes);
- _ = try stack_machine.run(program.items, allocator, context, 0);
+ _ = try stack_machine.run(program.written(), allocator, context, 0);
const const_type = stack_machine.stack.pop().?.const_type;
try testing.expectEqual(die_offset, const_type.type_offset);
@@ -1185,7 +1186,7 @@ test "DWARF expressions" {
try b.writeBregx(writer, abi.ipRegNum(native_arch).?, @as(usize, 300));
try b.writeRegvalType(writer, @as(u8, 0), @as(usize, 400));
- _ = try stack_machine.run(program.items, allocator, context, 0);
+ _ = try stack_machine.run(program.written(), allocator, context, 0);
const regval_type = stack_machine.stack.pop().?.regval_type;
try testing.expectEqual(@as(usize, 400), regval_type.type_offset);
@@ -1214,7 +1215,7 @@ test "DWARF expressions" {
program.clearRetainingCapacity();
try b.writeConst(writer, u8, 1);
try b.writeOpcode(writer, OP.dup);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 1), stack_machine.stack.pop().?.generic);
try testing.expectEqual(@as(usize, 1), stack_machine.stack.pop().?.generic);
@@ -1222,7 +1223,7 @@ test "DWARF expressions" {
program.clearRetainingCapacity();
try b.writeConst(writer, u8, 1);
try b.writeOpcode(writer, OP.drop);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expect(stack_machine.stack.pop() == null);
stack_machine.reset();
@@ -1231,7 +1232,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u8, 5);
try b.writeConst(writer, u8, 6);
try b.writePick(writer, 2);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 4), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1240,7 +1241,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u8, 5);
try b.writeConst(writer, u8, 6);
try b.writeOpcode(writer, OP.over);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 5), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1248,7 +1249,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u8, 5);
try b.writeConst(writer, u8, 6);
try b.writeOpcode(writer, OP.swap);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 5), stack_machine.stack.pop().?.generic);
try testing.expectEqual(@as(usize, 6), stack_machine.stack.pop().?.generic);
@@ -1258,7 +1259,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u8, 5);
try b.writeConst(writer, u8, 6);
try b.writeOpcode(writer, OP.rot);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 5), stack_machine.stack.pop().?.generic);
try testing.expectEqual(@as(usize, 4), stack_machine.stack.pop().?.generic);
try testing.expectEqual(@as(usize, 6), stack_machine.stack.pop().?.generic);
@@ -1269,7 +1270,7 @@ test "DWARF expressions" {
program.clearRetainingCapacity();
try b.writeAddr(writer, @intFromPtr(&deref_target));
try b.writeOpcode(writer, OP.deref);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(deref_target, stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1277,14 +1278,14 @@ test "DWARF expressions" {
try b.writeLiteral(writer, 0);
try b.writeAddr(writer, @intFromPtr(&deref_target));
try b.writeOpcode(writer, OP.xderef);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(deref_target, stack_machine.stack.pop().?.generic);
stack_machine.reset();
program.clearRetainingCapacity();
try b.writeAddr(writer, @intFromPtr(&deref_target));
try b.writeDerefSize(writer, 1);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, @as(*const u8, @ptrCast(&deref_target)).*), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1292,7 +1293,7 @@ test "DWARF expressions" {
try b.writeLiteral(writer, 0);
try b.writeAddr(writer, @intFromPtr(&deref_target));
try b.writeXDerefSize(writer, 1);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, @as(*const u8, @ptrCast(&deref_target)).*), stack_machine.stack.pop().?.generic);
const type_offset: usize = @truncate(0xaabbaabb_aabbaabb);
@@ -1301,7 +1302,7 @@ test "DWARF expressions" {
program.clearRetainingCapacity();
try b.writeAddr(writer, @intFromPtr(&deref_target));
try b.writeDerefType(writer, 1, type_offset);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
const deref_type = stack_machine.stack.pop().?.regval_type;
try testing.expectEqual(type_offset, deref_type.type_offset);
try testing.expectEqual(@as(u8, 1), deref_type.type_size);
@@ -1312,7 +1313,7 @@ test "DWARF expressions" {
try b.writeLiteral(writer, 0);
try b.writeAddr(writer, @intFromPtr(&deref_target));
try b.writeXDerefType(writer, 1, type_offset);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
const xderef_type = stack_machine.stack.pop().?.regval_type;
try testing.expectEqual(type_offset, xderef_type.type_offset);
try testing.expectEqual(@as(u8, 1), xderef_type.type_size);
@@ -1323,7 +1324,7 @@ test "DWARF expressions" {
stack_machine.reset();
program.clearRetainingCapacity();
try b.writeOpcode(writer, OP.push_object_address);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, @intFromPtr(context.object_address.?)), stack_machine.stack.pop().?.generic);
// TODO: Test OP.form_tls_address
@@ -1333,7 +1334,7 @@ test "DWARF expressions" {
stack_machine.reset();
program.clearRetainingCapacity();
try b.writeOpcode(writer, OP.call_frame_cfa);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(context.cfa.?, stack_machine.stack.pop().?.generic);
}
@@ -1345,7 +1346,7 @@ test "DWARF expressions" {
program.clearRetainingCapacity();
try b.writeConst(writer, i16, -4096);
try b.writeOpcode(writer, OP.abs);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 4096), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1353,7 +1354,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u16, 0xff0f);
try b.writeConst(writer, u16, 0xf0ff);
try b.writeOpcode(writer, OP.@"and");
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 0xf00f), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1361,7 +1362,7 @@ test "DWARF expressions" {
try b.writeConst(writer, i16, -404);
try b.writeConst(writer, i16, 100);
try b.writeOpcode(writer, OP.div);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(isize, -404 / 100), @as(isize, @bitCast(stack_machine.stack.pop().?.generic)));
stack_machine.reset();
@@ -1369,7 +1370,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u16, 200);
try b.writeConst(writer, u16, 50);
try b.writeOpcode(writer, OP.minus);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 150), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1377,7 +1378,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u16, 123);
try b.writeConst(writer, u16, 100);
try b.writeOpcode(writer, OP.mod);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 23), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1385,7 +1386,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u16, 0xff);
try b.writeConst(writer, u16, 0xee);
try b.writeOpcode(writer, OP.mul);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 0xed12), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1394,7 +1395,7 @@ test "DWARF expressions" {
try b.writeOpcode(writer, OP.neg);
try b.writeConst(writer, i16, -6);
try b.writeOpcode(writer, OP.neg);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 6), stack_machine.stack.pop().?.generic);
try testing.expectEqual(@as(isize, -5), @as(isize, @bitCast(stack_machine.stack.pop().?.generic)));
@@ -1402,7 +1403,7 @@ test "DWARF expressions" {
program.clearRetainingCapacity();
try b.writeConst(writer, u16, 0xff0f);
try b.writeOpcode(writer, OP.not);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(~@as(usize, 0xff0f), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1410,7 +1411,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u16, 0xff0f);
try b.writeConst(writer, u16, 0xf0ff);
try b.writeOpcode(writer, OP.@"or");
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 0xffff), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1418,14 +1419,14 @@ test "DWARF expressions" {
try b.writeConst(writer, i16, 402);
try b.writeConst(writer, i16, 100);
try b.writeOpcode(writer, OP.plus);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 502), stack_machine.stack.pop().?.generic);
stack_machine.reset();
program.clearRetainingCapacity();
try b.writeConst(writer, u16, 4096);
try b.writePlusUconst(writer, @as(usize, 8192));
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 4096 + 8192), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1433,7 +1434,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u16, 0xfff);
try b.writeConst(writer, u16, 1);
try b.writeOpcode(writer, OP.shl);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 0xfff << 1), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1441,7 +1442,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u16, 0xfff);
try b.writeConst(writer, u16, 1);
try b.writeOpcode(writer, OP.shr);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 0xfff >> 1), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1449,7 +1450,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u16, 0xfff);
try b.writeConst(writer, u16, 1);
try b.writeOpcode(writer, OP.shr);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, @bitCast(@as(isize, 0xfff) >> 1)), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1457,7 +1458,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u16, 0xf0ff);
try b.writeConst(writer, u16, 0xff0f);
try b.writeOpcode(writer, OP.xor);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 0x0ff0), stack_machine.stack.pop().?.generic);
}
@@ -1486,7 +1487,7 @@ test "DWARF expressions" {
try b.writeConst(writer, u16, 1);
try b.writeConst(writer, u16, 0);
try b.writeOpcode(writer, e[0]);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, e[3]), stack_machine.stack.pop().?.generic);
try testing.expectEqual(@as(usize, e[2]), stack_machine.stack.pop().?.generic);
try testing.expectEqual(@as(usize, e[1]), stack_machine.stack.pop().?.generic);
@@ -1497,7 +1498,7 @@ test "DWARF expressions" {
try b.writeLiteral(writer, 2);
try b.writeSkip(writer, 1);
try b.writeLiteral(writer, 3);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 2), stack_machine.stack.pop().?.generic);
stack_machine.reset();
@@ -1509,7 +1510,7 @@ test "DWARF expressions" {
try b.writeBra(writer, 1);
try b.writeLiteral(writer, 4);
try b.writeLiteral(writer, 5);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 5), stack_machine.stack.pop().?.generic);
try testing.expectEqual(@as(usize, 4), stack_machine.stack.pop().?.generic);
try testing.expect(stack_machine.stack.pop() == null);
@@ -1535,7 +1536,7 @@ test "DWARF expressions" {
program.clearRetainingCapacity();
try b.writeConstType(writer, @as(usize, 0), &value_bytes);
try b.writeConvert(writer, @as(usize, 0));
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(value, stack_machine.stack.pop().?.generic);
// Reinterpret to generic type
@@ -1543,7 +1544,7 @@ test "DWARF expressions" {
program.clearRetainingCapacity();
try b.writeConstType(writer, @as(usize, 0), &value_bytes);
try b.writeReinterpret(writer, @as(usize, 0));
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(value, stack_machine.stack.pop().?.generic);
// Reinterpret to new type
@@ -1553,7 +1554,7 @@ test "DWARF expressions" {
program.clearRetainingCapacity();
try b.writeConstType(writer, @as(usize, 0), &value_bytes);
try b.writeReinterpret(writer, die_offset);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
const const_type = stack_machine.stack.pop().?.const_type;
try testing.expectEqual(die_offset, const_type.type_offset);
@@ -1561,7 +1562,7 @@ test "DWARF expressions" {
program.clearRetainingCapacity();
try b.writeLiteral(writer, 0);
try b.writeReinterpret(writer, die_offset);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
const regval_type = stack_machine.stack.pop().?.regval_type;
try testing.expectEqual(die_offset, regval_type.type_offset);
}
@@ -1573,20 +1574,20 @@ test "DWARF expressions" {
stack_machine.reset();
program.clearRetainingCapacity();
try b.writeOpcode(writer, OP.nop);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expect(stack_machine.stack.pop() == null);
// Sub-expression
{
- var sub_program = std.array_list.Managed(u8).init(allocator);
+ var sub_program: std.Io.Writer.Allocating = .init(allocator);
defer sub_program.deinit();
- const sub_writer = sub_program.writer();
+ const sub_writer = &sub_program.writer;
try b.writeLiteral(sub_writer, 3);
stack_machine.reset();
program.clearRetainingCapacity();
- try b.writeEntryValue(writer, sub_program.items);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ try b.writeEntryValue(writer, sub_program.written());
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 3), stack_machine.stack.pop().?.generic);
}
@@ -1605,15 +1606,15 @@ test "DWARF expressions" {
if (abi.regBytes(&thread_context, 0, reg_context)) |reg_bytes| {
mem.writeInt(usize, reg_bytes[0..@sizeOf(usize)], 0xee, native_endian);
- var sub_program = std.array_list.Managed(u8).init(allocator);
+ var sub_program: std.Io.Writer.Allocating = .init(allocator);
defer sub_program.deinit();
- const sub_writer = sub_program.writer();
+ const sub_writer = &sub_program.writer;
try b.writeReg(sub_writer, 0);
stack_machine.reset();
program.clearRetainingCapacity();
- try b.writeEntryValue(writer, sub_program.items);
- _ = try stack_machine.run(program.items, allocator, context, null);
+ try b.writeEntryValue(writer, sub_program.written());
+ _ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 0xee), stack_machine.stack.pop().?.generic);
} else |err| {
switch (err) {
diff --git a/lib/std/debug/Pdb.zig b/lib/std/debug/Pdb.zig
index 4e46ab7bf6..008aad6ab6 100644
--- a/lib/std/debug/Pdb.zig
+++ b/lib/std/debug/Pdb.zig
@@ -2,10 +2,11 @@ const std = @import("../std.zig");
const File = std.fs.File;
const Allocator = std.mem.Allocator;
const pdb = std.pdb;
+const assert = std.debug.assert;
const Pdb = @This();
-in_file: File,
+file_reader: *File.Reader,
msf: Msf,
allocator: Allocator,
string_table: ?*MsfStream,
@@ -35,39 +36,38 @@ pub const Module = struct {
}
};
-pub fn init(allocator: Allocator, path: []const u8) !Pdb {
- const file = try std.fs.cwd().openFile(path, .{});
- errdefer file.close();
-
+pub fn init(gpa: Allocator, file_reader: *File.Reader) !Pdb {
return .{
- .in_file = file,
- .allocator = allocator,
+ .file_reader = file_reader,
+ .allocator = gpa,
.string_table = null,
.dbi = null,
- .msf = try Msf.init(allocator, file),
- .modules = &[_]Module{},
- .sect_contribs = &[_]pdb.SectionContribEntry{},
+ .msf = try Msf.init(gpa, file_reader),
+ .modules = &.{},
+ .sect_contribs = &.{},
.guid = undefined,
.age = undefined,
};
}
pub fn deinit(self: *Pdb) void {
- self.in_file.close();
- self.msf.deinit(self.allocator);
+ const gpa = self.allocator;
+ self.msf.deinit(gpa);
for (self.modules) |*module| {
- module.deinit(self.allocator);
+ module.deinit(gpa);
}
- self.allocator.free(self.modules);
- self.allocator.free(self.sect_contribs);
+ gpa.free(self.modules);
+ gpa.free(self.sect_contribs);
}
pub fn parseDbiStream(self: *Pdb) !void {
var stream = self.getStream(pdb.StreamType.dbi) orelse
return error.InvalidDebugInfo;
- const reader = stream.reader();
- const header = try reader.readStruct(std.pdb.DbiStreamHeader);
+ const gpa = self.allocator;
+ const reader = &stream.interface;
+
+ const header = try reader.takeStruct(std.pdb.DbiStreamHeader, .little);
if (header.version_header != 19990903) // V70, only value observed by LLVM team
return error.UnknownPDBVersion;
// if (header.Age != age)
@@ -76,22 +76,28 @@ pub fn parseDbiStream(self: *Pdb) !void {
const mod_info_size = header.mod_info_size;
const section_contrib_size = header.section_contribution_size;
- var modules = std.array_list.Managed(Module).init(self.allocator);
+ var modules = std.array_list.Managed(Module).init(gpa);
errdefer modules.deinit();
// Module Info Substream
var mod_info_offset: usize = 0;
while (mod_info_offset != mod_info_size) {
- const mod_info = try reader.readStruct(pdb.ModInfo);
+ const mod_info = try reader.takeStruct(pdb.ModInfo, .little);
var this_record_len: usize = @sizeOf(pdb.ModInfo);
- const module_name = try reader.readUntilDelimiterAlloc(self.allocator, 0, 1024);
- errdefer self.allocator.free(module_name);
- this_record_len += module_name.len + 1;
+ var module_name: std.Io.Writer.Allocating = .init(gpa);
+ defer module_name.deinit();
+ this_record_len += try reader.streamDelimiterLimit(&module_name.writer, 0, .limited(1024));
+ assert(reader.buffered()[0] == 0); // TODO change streamDelimiterLimit API
+ reader.toss(1);
+ this_record_len += 1;
- const obj_file_name = try reader.readUntilDelimiterAlloc(self.allocator, 0, 1024);
- errdefer self.allocator.free(obj_file_name);
- this_record_len += obj_file_name.len + 1;
+ var obj_file_name: std.Io.Writer.Allocating = .init(gpa);
+ defer obj_file_name.deinit();
+ this_record_len += try reader.streamDelimiterLimit(&obj_file_name.writer, 0, .limited(1024));
+ assert(reader.buffered()[0] == 0); // TODO change streamDelimiterLimit API
+ reader.toss(1);
+ this_record_len += 1;
if (this_record_len % 4 != 0) {
const round_to_next_4 = (this_record_len | 0x3) + 1;
@@ -100,10 +106,10 @@ pub fn parseDbiStream(self: *Pdb) !void {
this_record_len += march_forward_bytes;
}
- try modules.append(Module{
+ try modules.append(.{
.mod_info = mod_info,
- .module_name = module_name,
- .obj_file_name = obj_file_name,
+ .module_name = try module_name.toOwnedSlice(),
+ .obj_file_name = try obj_file_name.toOwnedSlice(),
.populated = false,
.symbols = undefined,
@@ -117,21 +123,21 @@ pub fn parseDbiStream(self: *Pdb) !void {
}
// Section Contribution Substream
- var sect_contribs = std.array_list.Managed(pdb.SectionContribEntry).init(self.allocator);
+ var sect_contribs = std.array_list.Managed(pdb.SectionContribEntry).init(gpa);
errdefer sect_contribs.deinit();
var sect_cont_offset: usize = 0;
if (section_contrib_size != 0) {
- const version = reader.readEnum(std.pdb.SectionContrSubstreamVersion, .little) catch |err| switch (err) {
- error.InvalidValue => return error.InvalidDebugInfo,
- else => |e| return e,
+ const version = reader.takeEnum(std.pdb.SectionContrSubstreamVersion, .little) catch |err| switch (err) {
+ error.InvalidEnumTag, error.EndOfStream => return error.InvalidDebugInfo,
+ error.ReadFailed => return error.ReadFailed,
};
_ = version;
sect_cont_offset += @sizeOf(u32);
}
while (sect_cont_offset != section_contrib_size) {
const entry = try sect_contribs.addOne();
- entry.* = try reader.readStruct(pdb.SectionContribEntry);
+ entry.* = try reader.takeStruct(pdb.SectionContribEntry, .little);
sect_cont_offset += @sizeOf(pdb.SectionContribEntry);
if (sect_cont_offset > section_contrib_size)
@@ -143,29 +149,28 @@ pub fn parseDbiStream(self: *Pdb) !void {
}
pub fn parseInfoStream(self: *Pdb) !void {
- var stream = self.getStream(pdb.StreamType.pdb) orelse
- return error.InvalidDebugInfo;
- const reader = stream.reader();
+ var stream = self.getStream(pdb.StreamType.pdb) orelse return error.InvalidDebugInfo;
+ const reader = &stream.interface;
// Parse the InfoStreamHeader.
- const version = try reader.readInt(u32, .little);
- const signature = try reader.readInt(u32, .little);
+ const version = try reader.takeInt(u32, .little);
+ const signature = try reader.takeInt(u32, .little);
_ = signature;
- const age = try reader.readInt(u32, .little);
- const guid = try reader.readBytesNoEof(16);
+ const age = try reader.takeInt(u32, .little);
+ const guid = try reader.takeArray(16);
if (version != 20000404) // VC70, only value observed by LLVM team
return error.UnknownPDBVersion;
- self.guid = guid;
+ self.guid = guid.*;
self.age = age;
+ const gpa = self.allocator;
+
// Find the string table.
const string_table_index = str_tab_index: {
- const name_bytes_len = try reader.readInt(u32, .little);
- const name_bytes = try self.allocator.alloc(u8, name_bytes_len);
- defer self.allocator.free(name_bytes);
- try reader.readNoEof(name_bytes);
+ const name_bytes_len = try reader.takeInt(u32, .little);
+ const name_bytes = try reader.readAlloc(gpa, name_bytes_len);
const HashTableHeader = extern struct {
size: u32,
@@ -175,23 +180,23 @@ pub fn parseInfoStream(self: *Pdb) !void {
return cap * 2 / 3 + 1;
}
};
- const hash_tbl_hdr = try reader.readStruct(HashTableHeader);
+ const hash_tbl_hdr = try reader.takeStruct(HashTableHeader, .little);
if (hash_tbl_hdr.capacity == 0)
return error.InvalidDebugInfo;
if (hash_tbl_hdr.size > HashTableHeader.maxLoad(hash_tbl_hdr.capacity))
return error.InvalidDebugInfo;
- const present = try readSparseBitVector(&reader, self.allocator);
- defer self.allocator.free(present);
+ const present = try readSparseBitVector(reader, gpa);
+ defer gpa.free(present);
if (present.len != hash_tbl_hdr.size)
return error.InvalidDebugInfo;
- const deleted = try readSparseBitVector(&reader, self.allocator);
- defer self.allocator.free(deleted);
+ const deleted = try readSparseBitVector(reader, gpa);
+ defer gpa.free(deleted);
for (present) |_| {
- const name_offset = try reader.readInt(u32, .little);
- const name_index = try reader.readInt(u32, .little);
+ const name_offset = try reader.takeInt(u32, .little);
+ const name_index = try reader.takeInt(u32, .little);
if (name_offset > name_bytes.len)
return error.InvalidDebugInfo;
const name = std.mem.sliceTo(name_bytes[name_offset..], 0);
@@ -233,6 +238,7 @@ pub fn getSymbolName(self: *Pdb, module: *Module, address: u64) ?[]const u8 {
pub fn getLineNumberInfo(self: *Pdb, module: *Module, address: u64) !std.debug.SourceLocation {
std.debug.assert(module.populated);
const subsect_info = module.subsect_info;
+ const gpa = self.allocator;
var sect_offset: usize = 0;
var skip_len: usize = undefined;
@@ -287,7 +293,16 @@ pub fn getLineNumberInfo(self: *Pdb, module: *Module, address: u64) !std.debug.S
const chksum_hdr: *align(1) pdb.FileChecksumEntryHeader = @ptrCast(&module.subsect_info[subsect_index]);
const strtab_offset = @sizeOf(pdb.StringTableHeader) + chksum_hdr.file_name_offset;
try self.string_table.?.seekTo(strtab_offset);
- const source_file_name = try self.string_table.?.reader().readUntilDelimiterAlloc(self.allocator, 0, 1024);
+ const source_file_name = s: {
+ const string_reader = &self.string_table.?.interface;
+ var source_file_name: std.Io.Writer.Allocating = .init(gpa);
+ defer source_file_name.deinit();
+ _ = try string_reader.streamDelimiterLimit(&source_file_name.writer, 0, .limited(1024));
+ assert(string_reader.buffered()[0] == 0); // TODO change streamDelimiterLimit API
+ string_reader.toss(1);
+ break :s try source_file_name.toOwnedSlice();
+ };
+ errdefer gpa.free(source_file_name);
const line_entry_idx = line_i - 1;
@@ -341,19 +356,16 @@ pub fn getModule(self: *Pdb, index: usize) !?*Module {
const stream = self.getStreamById(mod.mod_info.module_sym_stream) orelse
return error.MissingDebugInfo;
- const reader = stream.reader();
+ const reader = &stream.interface;
- const signature = try reader.readInt(u32, .little);
+ const signature = try reader.takeInt(u32, .little);
if (signature != 4)
return error.InvalidDebugInfo;
- mod.symbols = try self.allocator.alloc(u8, mod.mod_info.sym_byte_size - 4);
- errdefer self.allocator.free(mod.symbols);
- try reader.readNoEof(mod.symbols);
+ const gpa = self.allocator;
- mod.subsect_info = try self.allocator.alloc(u8, mod.mod_info.c13_byte_size);
- errdefer self.allocator.free(mod.subsect_info);
- try reader.readNoEof(mod.subsect_info);
+ mod.symbols = try reader.readAlloc(gpa, mod.mod_info.sym_byte_size - 4);
+ mod.subsect_info = try reader.readAlloc(gpa, mod.mod_info.c13_byte_size);
var sect_offset: usize = 0;
var skip_len: usize = undefined;
@@ -379,8 +391,7 @@ pub fn getModule(self: *Pdb, index: usize) !?*Module {
}
pub fn getStreamById(self: *Pdb, id: u32) ?*MsfStream {
- if (id >= self.msf.streams.len)
- return null;
+ if (id >= self.msf.streams.len) return null;
return &self.msf.streams[id];
}
@@ -394,17 +405,14 @@ const Msf = struct {
directory: MsfStream,
streams: []MsfStream,
- fn init(allocator: Allocator, file: File) !Msf {
- const in = file.deprecatedReader();
-
- const superblock = try in.readStruct(pdb.SuperBlock);
+ fn init(gpa: Allocator, file_reader: *File.Reader) !Msf {
+ const superblock = try file_reader.interface.takeStruct(pdb.SuperBlock, .little);
- // Sanity checks
if (!std.mem.eql(u8, &superblock.file_magic, pdb.SuperBlock.expect_magic))
return error.InvalidDebugInfo;
if (superblock.free_block_map_block != 1 and superblock.free_block_map_block != 2)
return error.InvalidDebugInfo;
- const file_len = try file.getEndPos();
+ const file_len = try file_reader.getSize();
if (superblock.num_blocks * superblock.block_size != file_len)
return error.InvalidDebugInfo;
switch (superblock.block_size) {
@@ -417,163 +425,182 @@ const Msf = struct {
if (dir_block_count > superblock.block_size / @sizeOf(u32))
return error.UnhandledBigDirectoryStream; // cf. BlockMapAddr comment.
- try file.seekTo(superblock.block_size * superblock.block_map_addr);
- const dir_blocks = try allocator.alloc(u32, dir_block_count);
+ try file_reader.seekTo(superblock.block_size * superblock.block_map_addr);
+ const dir_blocks = try gpa.alloc(u32, dir_block_count);
for (dir_blocks) |*b| {
- b.* = try in.readInt(u32, .little);
+ b.* = try file_reader.interface.takeInt(u32, .little);
}
- var directory = MsfStream.init(
- superblock.block_size,
- file,
- dir_blocks,
- );
+ var directory_buffer: [64]u8 = undefined;
+ var directory = MsfStream.init(superblock.block_size, file_reader, dir_blocks, &directory_buffer);
- const begin = directory.pos;
- const stream_count = try directory.reader().readInt(u32, .little);
- const stream_sizes = try allocator.alloc(u32, stream_count);
- defer allocator.free(stream_sizes);
+ const begin = directory.logicalPos();
+ const stream_count = try directory.interface.takeInt(u32, .little);
+ const stream_sizes = try gpa.alloc(u32, stream_count);
+ defer gpa.free(stream_sizes);
// Microsoft's implementation uses @as(u32, -1) for inexistent streams.
// These streams are not used, but still participate in the file
// and must be taken into account when resolving stream indices.
- const Nil = 0xFFFFFFFF;
+ const nil_size = 0xFFFFFFFF;
for (stream_sizes) |*s| {
- const size = try directory.reader().readInt(u32, .little);
- s.* = if (size == Nil) 0 else blockCountFromSize(size, superblock.block_size);
+ const size = try directory.interface.takeInt(u32, .little);
+ s.* = if (size == nil_size) 0 else blockCountFromSize(size, superblock.block_size);
}
- const streams = try allocator.alloc(MsfStream, stream_count);
+ const streams = try gpa.alloc(MsfStream, stream_count);
+ errdefer gpa.free(streams);
+
for (streams, 0..) |*stream, i| {
const size = stream_sizes[i];
if (size == 0) {
- stream.* = MsfStream{
- .blocks = &[_]u32{},
- };
+ stream.* = .empty;
} else {
- var blocks = try allocator.alloc(u32, size);
- var j: u32 = 0;
- while (j < size) : (j += 1) {
- const block_id = try directory.reader().readInt(u32, .little);
+ const blocks = try gpa.alloc(u32, size);
+ errdefer gpa.free(blocks);
+ for (blocks) |*block| {
+ const block_id = try directory.interface.takeInt(u32, .little);
const n = (block_id % superblock.block_size);
// 0 is for pdb.SuperBlock, 1 and 2 for FPMs.
if (block_id == 0 or n == 1 or n == 2 or block_id * superblock.block_size > file_len)
return error.InvalidBlockIndex;
- blocks[j] = block_id;
+ block.* = block_id;
}
-
- stream.* = MsfStream.init(
- superblock.block_size,
- file,
- blocks,
- );
+ const buffer = try gpa.alloc(u8, 64);
+ errdefer gpa.free(buffer);
+ stream.* = .init(superblock.block_size, file_reader, blocks, buffer);
}
}
- const end = directory.pos;
+ const end = directory.logicalPos();
if (end - begin != superblock.num_directory_bytes)
return error.InvalidStreamDirectory;
- return Msf{
+ return .{
.directory = directory,
.streams = streams,
};
}
- fn deinit(self: *Msf, allocator: Allocator) void {
- allocator.free(self.directory.blocks);
+ fn deinit(self: *Msf, gpa: Allocator) void {
+ gpa.free(self.directory.blocks);
for (self.streams) |*stream| {
- allocator.free(stream.blocks);
+ gpa.free(stream.interface.buffer);
+ gpa.free(stream.blocks);
}
- allocator.free(self.streams);
+ gpa.free(self.streams);
}
};
const MsfStream = struct {
- in_file: File = undefined,
- pos: u64 = undefined,
- blocks: []u32 = undefined,
- block_size: u32 = undefined,
-
- pub const Error = @typeInfo(@typeInfo(@TypeOf(read)).@"fn".return_type.?).error_union.error_set;
+ file_reader: *File.Reader,
+ next_read_pos: u64,
+ blocks: []u32,
+ block_size: u32,
+ interface: std.Io.Reader,
+ err: ?Error,
+
+ const Error = File.Reader.SeekError;
+
+ const empty: MsfStream = .{
+ .file_reader = undefined,
+ .next_read_pos = 0,
+ .blocks = &.{},
+ .block_size = undefined,
+ .interface = .ending_instance,
+ .err = null,
+ };
- fn init(block_size: u32, file: File, blocks: []u32) MsfStream {
- const stream = MsfStream{
- .in_file = file,
- .pos = 0,
+ fn init(block_size: u32, file_reader: *File.Reader, blocks: []u32, buffer: []u8) MsfStream {
+ return .{
+ .file_reader = file_reader,
+ .next_read_pos = 0,
.blocks = blocks,
.block_size = block_size,
+ .interface = .{
+ .vtable = &.{ .stream = stream },
+ .buffer = buffer,
+ .seek = 0,
+ .end = 0,
+ },
+ .err = null,
};
-
- return stream;
}
- fn read(self: *MsfStream, buffer: []u8) !usize {
- var block_id = @as(usize, @intCast(self.pos / self.block_size));
- if (block_id >= self.blocks.len) return 0; // End of Stream
- var block = self.blocks[block_id];
- var offset = self.pos % self.block_size;
+ fn stream(r: *std.Io.Reader, w: *std.Io.Writer, limit: std.Io.Limit) std.Io.Reader.StreamError!usize {
+ const ms: *MsfStream = @alignCast(@fieldParentPtr("interface", r));
- try self.in_file.seekTo(block * self.block_size + offset);
- const in = self.in_file.deprecatedReader();
+ var block_id: usize = @intCast(ms.next_read_pos / ms.block_size);
+ if (block_id >= ms.blocks.len) return error.EndOfStream;
+ var block = ms.blocks[block_id];
+ var offset = ms.next_read_pos % ms.block_size;
- var size: usize = 0;
- var rem_buffer = buffer;
- while (size < buffer.len) {
- const size_to_read = @min(self.block_size - offset, rem_buffer.len);
- size += try in.read(rem_buffer[0..size_to_read]);
- rem_buffer = buffer[size..];
- offset += size_to_read;
+ ms.file_reader.seekTo(block * ms.block_size + offset) catch |err| {
+ ms.err = err;
+ return error.ReadFailed;
+ };
+
+ var remaining = @intFromEnum(limit);
+ while (remaining != 0) {
+ const stream_len: usize = @min(remaining, ms.block_size - offset);
+ const n = try ms.file_reader.interface.stream(w, .limited(stream_len));
+ remaining -= n;
+ offset += n;
// If we're at the end of a block, go to the next one.
- if (offset == self.block_size) {
+ if (offset == ms.block_size) {
offset = 0;
block_id += 1;
- if (block_id >= self.blocks.len) break; // End of Stream
- block = self.blocks[block_id];
- try self.in_file.seekTo(block * self.block_size);
+ if (block_id >= ms.blocks.len) break; // End of Stream
+ block = ms.blocks[block_id];
+ ms.file_reader.seekTo(block * ms.block_size) catch |err| {
+ ms.err = err;
+ return error.ReadFailed;
+ };
}
}
- self.pos += buffer.len;
- return buffer.len;
+ const total = @intFromEnum(limit) - remaining;
+ ms.next_read_pos += total;
+ return total;
}
- pub fn seekBy(self: *MsfStream, len: i64) !void {
- self.pos = @as(u64, @intCast(@as(i64, @intCast(self.pos)) + len));
- if (self.pos >= self.blocks.len * self.block_size)
- return error.EOF;
+ pub fn logicalPos(ms: *const MsfStream) u64 {
+ return ms.next_read_pos - ms.interface.bufferedLen();
}
- pub fn seekTo(self: *MsfStream, len: u64) !void {
- self.pos = len;
- if (self.pos >= self.blocks.len * self.block_size)
- return error.EOF;
+ pub fn seekBy(ms: *MsfStream, len: i64) !void {
+ ms.next_read_pos = @as(u64, @intCast(@as(i64, @intCast(ms.logicalPos())) + len));
+ if (ms.next_read_pos >= ms.blocks.len * ms.block_size) return error.EOF;
+ ms.interface.tossBuffered();
}
- fn getSize(self: *const MsfStream) u64 {
- return self.blocks.len * self.block_size;
+ pub fn seekTo(ms: *MsfStream, len: u64) !void {
+ ms.next_read_pos = len;
+ if (ms.next_read_pos >= ms.blocks.len * ms.block_size) return error.EOF;
+ ms.interface.tossBuffered();
}
- fn getFilePos(self: MsfStream) u64 {
- const block_id = self.pos / self.block_size;
- const block = self.blocks[block_id];
- const offset = self.pos % self.block_size;
-
- return block * self.block_size + offset;
+ fn getSize(ms: *const MsfStream) u64 {
+ return ms.blocks.len * ms.block_size;
}
- pub fn reader(self: *MsfStream) std.io.GenericReader(*MsfStream, Error, read) {
- return .{ .context = self };
+ fn getFilePos(ms: *const MsfStream) u64 {
+ const pos = ms.logicalPos();
+ const block_id = pos / ms.block_size;
+ const block = ms.blocks[block_id];
+ const offset = pos % ms.block_size;
+
+ return block * ms.block_size + offset;
}
};
-fn readSparseBitVector(stream: anytype, allocator: Allocator) ![]u32 {
- const num_words = try stream.readInt(u32, .little);
+fn readSparseBitVector(reader: *std.Io.Reader, allocator: Allocator) ![]u32 {
+ const num_words = try reader.takeInt(u32, .little);
var list = std.array_list.Managed(u32).init(allocator);
errdefer list.deinit();
var word_i: u32 = 0;
while (word_i != num_words) : (word_i += 1) {
- const word = try stream.readInt(u32, .little);
+ const word = try reader.takeInt(u32, .little);
var bit_i: u5 = 0;
while (true) : (bit_i += 1) {
if (word & (@as(u32, 1) << bit_i) != 0) {
diff --git a/lib/std/debug/SelfInfo.zig b/lib/std/debug/SelfInfo.zig
index 693643c598..0a7a0f9a80 100644
--- a/lib/std/debug/SelfInfo.zig
+++ b/lib/std/debug/SelfInfo.zig
@@ -713,22 +713,26 @@ pub const Module = switch (native_os) {
},
.uefi, .windows => struct {
base_address: usize,
- pdb: ?Pdb = null,
- dwarf: ?Dwarf = null,
+ pdb: ?Pdb,
+ dwarf: ?Dwarf,
coff_image_base: u64,
/// Only used if pdb is non-null
coff_section_headers: []coff.SectionHeader,
- pub fn deinit(self: *@This(), allocator: Allocator) void {
+ pub fn deinit(self: *@This(), gpa: Allocator) void {
if (self.dwarf) |*dwarf| {
- dwarf.deinit(allocator);
+ dwarf.deinit(gpa);
}
if (self.pdb) |*p| {
+ gpa.free(p.file_reader.interface.buffer);
+ gpa.destroy(p.file_reader);
p.deinit();
- allocator.free(self.coff_section_headers);
+ gpa.free(self.coff_section_headers);
}
+
+ self.* = undefined;
}
fn getSymbolFromPdb(self: *@This(), relocated_address: usize) !?std.debug.Symbol {
@@ -970,23 +974,25 @@ fn readMachODebugInfo(allocator: Allocator, macho_file: File) !Module {
};
}
-fn readCoffDebugInfo(allocator: Allocator, coff_obj: *coff.Coff) !Module {
+fn readCoffDebugInfo(gpa: Allocator, coff_obj: *coff.Coff) !Module {
nosuspend {
var di: Module = .{
.base_address = undefined,
.coff_image_base = coff_obj.getImageBase(),
.coff_section_headers = undefined,
+ .pdb = null,
+ .dwarf = null,
};
if (coff_obj.getSectionByName(".debug_info")) |_| {
// This coff file has embedded DWARF debug info
var sections: Dwarf.SectionArray = Dwarf.null_section_array;
- errdefer for (sections) |section| if (section) |s| if (s.owned) allocator.free(s.data);
+ errdefer for (sections) |section| if (section) |s| if (s.owned) gpa.free(s.data);
inline for (@typeInfo(Dwarf.Section.Id).@"enum".fields, 0..) |section, i| {
sections[i] = if (coff_obj.getSectionByName("." ++ section.name)) |section_header| blk: {
break :blk .{
- .data = try coff_obj.getSectionDataAlloc(section_header, allocator),
+ .data = try coff_obj.getSectionDataAlloc(section_header, gpa),
.virtual_address = section_header.virtual_address,
.owned = true,
};
@@ -999,7 +1005,7 @@ fn readCoffDebugInfo(allocator: Allocator, coff_obj: *coff.Coff) !Module {
.is_macho = false,
};
- try Dwarf.open(&dwarf, allocator);
+ try Dwarf.open(&dwarf, gpa);
di.dwarf = dwarf;
}
@@ -1008,20 +1014,31 @@ fn readCoffDebugInfo(allocator: Allocator, coff_obj: *coff.Coff) !Module {
if (fs.path.isAbsolute(raw_path)) {
break :blk raw_path;
} else {
- const self_dir = try fs.selfExeDirPathAlloc(allocator);
- defer allocator.free(self_dir);
- break :blk try fs.path.join(allocator, &.{ self_dir, raw_path });
+ const self_dir = try fs.selfExeDirPathAlloc(gpa);
+ defer gpa.free(self_dir);
+ break :blk try fs.path.join(gpa, &.{ self_dir, raw_path });
}
};
- defer if (path.ptr != raw_path.ptr) allocator.free(path);
+ defer if (path.ptr != raw_path.ptr) gpa.free(path);
- di.pdb = Pdb.init(allocator, path) catch |err| switch (err) {
+ const pdb_file = std.fs.cwd().openFile(path, .{}) catch |err| switch (err) {
error.FileNotFound, error.IsDir => {
if (di.dwarf == null) return error.MissingDebugInfo;
return di;
},
- else => return err,
+ else => |e| return e,
};
+ errdefer pdb_file.close();
+
+ const pdb_file_reader_buffer = try gpa.alloc(u8, 4096);
+ errdefer gpa.free(pdb_file_reader_buffer);
+
+ const pdb_file_reader = try gpa.create(File.Reader);
+ errdefer gpa.destroy(pdb_file_reader);
+
+ pdb_file_reader.* = pdb_file.reader(pdb_file_reader_buffer);
+
+ di.pdb = try Pdb.init(gpa, pdb_file_reader);
try di.pdb.?.parseInfoStream();
try di.pdb.?.parseDbiStream();
@@ -1029,8 +1046,8 @@ fn readCoffDebugInfo(allocator: Allocator, coff_obj: *coff.Coff) !Module {
return error.InvalidDebugInfo;
// Only used by the pdb path
- di.coff_section_headers = try coff_obj.getSectionHeadersAlloc(allocator);
- errdefer allocator.free(di.coff_section_headers);
+ di.coff_section_headers = try coff_obj.getSectionHeadersAlloc(gpa);
+ errdefer gpa.free(di.coff_section_headers);
return di;
}
diff --git a/lib/std/fs/File.zig b/lib/std/fs/File.zig
index 8da2112f8b..99825a87d1 100644
--- a/lib/std/fs/File.zig
+++ b/lib/std/fs/File.zig
@@ -1097,14 +1097,6 @@ pub fn deprecatedReader(file: File) DeprecatedReader {
return .{ .context = file };
}
-/// Deprecated in favor of `Writer`.
-pub const DeprecatedWriter = io.GenericWriter(File, WriteError, write);
-
-/// Deprecated in favor of `Writer`.
-pub fn deprecatedWriter(file: File) DeprecatedWriter {
- return .{ .context = file };
-}
-
/// Memoizes key information about a file handle such as:
/// * The size from calling stat, or the error that occurred therein.
/// * The current seek position.
diff --git a/lib/std/json.zig b/lib/std/json.zig
index 52388f38ea..e730cff89b 100644
--- a/lib/std/json.zig
+++ b/lib/std/json.zig
@@ -6,7 +6,7 @@
//! The high-level `parseFromSlice` and `parseFromTokenSource` deserialize a JSON document into a Zig type.
//! Parse into a dynamically-typed `Value` to load any JSON value for runtime inspection.
//!
-//! The low-level `writeStream` emits syntax-conformant JSON tokens to a `std.io.GenericWriter`.
+//! The low-level `writeStream` emits syntax-conformant JSON tokens to a `std.Io.Writer`.
//! The high-level `stringify` serializes a Zig or `Value` type into JSON.
const builtin = @import("builtin");
diff --git a/lib/std/leb128.zig b/lib/std/leb128.zig
index 0de97011fd..d3dd231e3d 100644
--- a/lib/std/leb128.zig
+++ b/lib/std/leb128.zig
@@ -33,28 +33,6 @@ pub fn readUleb128(comptime T: type, reader: anytype) !T {
return @as(T, @truncate(value));
}
-/// Write a single unsigned integer as unsigned LEB128 to the given writer.
-pub fn writeUleb128(writer: anytype, arg: anytype) !void {
- const Arg = @TypeOf(arg);
- const Int = switch (Arg) {
- comptime_int => std.math.IntFittingRange(arg, arg),
- else => Arg,
- };
- const Value = if (@typeInfo(Int).int.bits < 8) u8 else Int;
- var value: Value = arg;
-
- while (true) {
- const byte: u8 = @truncate(value & 0x7f);
- value >>= 7;
- if (value == 0) {
- try writer.writeByte(byte);
- break;
- } else {
- try writer.writeByte(byte | 0x80);
- }
- }
-}
-
/// Read a single signed LEB128 value from the given reader as type T,
/// or error.Overflow if the value cannot fit.
pub fn readIleb128(comptime T: type, reader: anytype) !T {
@@ -374,84 +352,3 @@ test "deserialize unsigned LEB128" {
// Decode sequence of ULEB128 values
try test_read_uleb128_seq(u64, 4, "\x81\x01\x3f\x80\x7f\x80\x80\x80\x00");
}
-
-fn test_write_leb128(value: anytype) !void {
- const T = @TypeOf(value);
- const signedness = @typeInfo(T).int.signedness;
- const t_signed = signedness == .signed;
-
- const writeStream = if (t_signed) writeIleb128 else writeUleb128;
- const readStream = if (t_signed) readIleb128 else readUleb128;
-
- // decode to a larger bit size too, to ensure sign extension
- // is working as expected
- const larger_type_bits = ((@typeInfo(T).int.bits + 8) / 8) * 8;
- const B = std.meta.Int(signedness, larger_type_bits);
-
- const bytes_needed = bn: {
- if (@typeInfo(T).int.bits <= 7) break :bn @as(u16, 1);
-
- const unused_bits = if (value < 0) @clz(~value) else @clz(value);
- const used_bits: u16 = (@typeInfo(T).int.bits - unused_bits) + @intFromBool(t_signed);
- if (used_bits <= 7) break :bn @as(u16, 1);
- break :bn ((used_bits + 6) / 7);
- };
-
- const max_groups = if (@typeInfo(T).int.bits == 0) 1 else (@typeInfo(T).int.bits + 6) / 7;
-
- var buf: [max_groups]u8 = undefined;
- var fbs = std.io.fixedBufferStream(&buf);
-
- // stream write
- try writeStream(fbs.writer(), value);
- const w1_pos = fbs.pos;
- try testing.expect(w1_pos == bytes_needed);
-
- // stream read
- fbs.pos = 0;
- const sr = try readStream(T, fbs.reader());
- try testing.expect(fbs.pos == w1_pos);
- try testing.expect(sr == value);
-
- // bigger type stream read
- fbs.pos = 0;
- const bsr = try readStream(B, fbs.reader());
- try testing.expect(fbs.pos == w1_pos);
- try testing.expect(bsr == value);
-}
-
-test "serialize unsigned LEB128" {
- if (builtin.cpu.arch == .x86 and builtin.abi == .musl and builtin.link_mode == .dynamic) return error.SkipZigTest;
-
- const max_bits = 18;
-
- comptime var t = 0;
- inline while (t <= max_bits) : (t += 1) {
- const T = std.meta.Int(.unsigned, t);
- const min = std.math.minInt(T);
- const max = std.math.maxInt(T);
- var i = @as(std.meta.Int(.unsigned, @typeInfo(T).int.bits + 1), min);
-
- while (i <= max) : (i += 1) try test_write_leb128(@as(T, @intCast(i)));
- }
-}
-
-test "serialize signed LEB128" {
- if (builtin.cpu.arch == .x86 and builtin.abi == .musl and builtin.link_mode == .dynamic) return error.SkipZigTest;
-
- // explicitly test i0 because starting `t` at 0
- // will break the while loop
- try test_write_leb128(@as(i0, 0));
-
- const max_bits = 18;
-
- comptime var t = 1;
- inline while (t <= max_bits) : (t += 1) {
- const T = std.meta.Int(.signed, t);
- const min = std.math.minInt(T);
- const max = std.math.maxInt(T);
- var i = @as(std.meta.Int(.signed, @typeInfo(T).int.bits + 1), min);
-
- while (i <= max) : (i += 1) try test_write_leb128(@as(T, @intCast(i)));
- }
-}
diff --git a/lib/std/macho.zig b/lib/std/macho.zig
index 75aa91e536..a1d23d8b18 100644
--- a/lib/std/macho.zig
+++ b/lib/std/macho.zig
@@ -1883,10 +1883,8 @@ pub const GenericBlob = extern struct {
pub const data_in_code_entry = extern struct {
/// From mach_header to start of data range.
offset: u32,
-
/// Number of bytes in data range.
length: u16,
-
/// A DICE_KIND value.
kind: u16,
};
diff --git a/lib/std/posix/test.zig b/lib/std/posix/test.zig
index 55a53518d9..6c18999d4b 100644
--- a/lib/std/posix/test.zig
+++ b/lib/std/posix/test.zig
@@ -683,11 +683,11 @@ test "mmap" {
const file = try tmp.dir.createFile(test_out_file, .{});
defer file.close();
- const stream = file.deprecatedWriter();
+ var stream = file.writer(&.{});
var i: u32 = 0;
while (i < alloc_size / @sizeOf(u32)) : (i += 1) {
- try stream.writeInt(u32, i, .little);
+ try stream.interface.writeInt(u32, i, .little);
}
}
diff --git a/lib/std/tz.zig b/lib/std/tz.zig
index bff0101439..32b23ddaef 100644
--- a/lib/std/tz.zig
+++ b/lib/std/tz.zig
@@ -1,6 +1,12 @@
-const std = @import("std.zig");
+//! The Time Zone Information Format (TZif)
+//! https://datatracker.ietf.org/doc/html/rfc8536
+
const builtin = @import("builtin");
+const std = @import("std.zig");
+const Reader = std.Io.Reader;
+const Allocator = std.mem.Allocator;
+
pub const Transition = struct {
ts: i64,
timetype: *Timetype,
@@ -34,7 +40,7 @@ pub const Leapsecond = struct {
};
pub const Tz = struct {
- allocator: std.mem.Allocator,
+ allocator: Allocator,
transitions: []const Transition,
timetypes: []const Timetype,
leapseconds: []const Leapsecond,
@@ -54,34 +60,30 @@ pub const Tz = struct {
},
};
- pub fn parse(allocator: std.mem.Allocator, reader: anytype) !Tz {
- var legacy_header = try reader.readStruct(Header);
+ pub fn parse(allocator: Allocator, reader: *Reader) !Tz {
+ const legacy_header = try reader.takeStruct(Header, .big);
if (!std.mem.eql(u8, &legacy_header.magic, "TZif")) return error.BadHeader;
- if (legacy_header.version != 0 and legacy_header.version != '2' and legacy_header.version != '3') return error.BadVersion;
-
- if (builtin.target.cpu.arch.endian() != std.builtin.Endian.big) {
- std.mem.byteSwapAllFields(@TypeOf(legacy_header.counts), &legacy_header.counts);
- }
+ if (legacy_header.version != 0 and legacy_header.version != '2' and legacy_header.version != '3')
+ return error.BadVersion;
- if (legacy_header.version == 0) {
+ if (legacy_header.version == 0)
return parseBlock(allocator, reader, legacy_header, true);
- } else {
- // If the format is modern, just skip over the legacy data
- const skipv = legacy_header.counts.timecnt * 5 + legacy_header.counts.typecnt * 6 + legacy_header.counts.charcnt + legacy_header.counts.leapcnt * 8 + legacy_header.counts.isstdcnt + legacy_header.counts.isutcnt;
- try reader.skipBytes(skipv, .{});
-
- var header = try reader.readStruct(Header);
- if (!std.mem.eql(u8, &header.magic, "TZif")) return error.BadHeader;
- if (header.version != '2' and header.version != '3') return error.BadVersion;
- if (builtin.target.cpu.arch.endian() != std.builtin.Endian.big) {
- std.mem.byteSwapAllFields(@TypeOf(header.counts), &header.counts);
- }
- return parseBlock(allocator, reader, header, false);
- }
+ // If the format is modern, just skip over the legacy data
+ const skip_n = legacy_header.counts.timecnt * 5 +
+ legacy_header.counts.typecnt * 6 +
+ legacy_header.counts.charcnt + legacy_header.counts.leapcnt * 8 +
+ legacy_header.counts.isstdcnt + legacy_header.counts.isutcnt;
+ try reader.discardAll(skip_n);
+
+ var header = try reader.takeStruct(Header, .big);
+ if (!std.mem.eql(u8, &header.magic, "TZif")) return error.BadHeader;
+ if (header.version != '2' and header.version != '3') return error.BadVersion;
+
+ return parseBlock(allocator, reader, header, false);
}
- fn parseBlock(allocator: std.mem.Allocator, reader: anytype, header: Header, legacy: bool) !Tz {
+ fn parseBlock(allocator: Allocator, reader: *Reader, header: Header, legacy: bool) !Tz {
if (header.counts.isstdcnt != 0 and header.counts.isstdcnt != header.counts.typecnt) return error.Malformed; // rfc8536: isstdcnt [...] MUST either be zero or equal to "typecnt"
if (header.counts.isutcnt != 0 and header.counts.isutcnt != header.counts.typecnt) return error.Malformed; // rfc8536: isutcnt [...] MUST either be zero or equal to "typecnt"
if (header.counts.typecnt == 0) return error.Malformed; // rfc8536: typecnt [...] MUST NOT be zero
@@ -98,12 +100,12 @@ pub const Tz = struct {
// Parse transition types
var i: usize = 0;
while (i < header.counts.timecnt) : (i += 1) {
- transitions[i].ts = if (legacy) try reader.readInt(i32, .big) else try reader.readInt(i64, .big);
+ transitions[i].ts = if (legacy) try reader.takeInt(i32, .big) else try reader.takeInt(i64, .big);
}
i = 0;
while (i < header.counts.timecnt) : (i += 1) {
- const tt = try reader.readByte();
+ const tt = try reader.takeByte();
if (tt >= timetypes.len) return error.Malformed; // rfc8536: Each type index MUST be in the range [0, "typecnt" - 1]
transitions[i].timetype = &timetypes[tt];
}
@@ -111,11 +113,11 @@ pub const Tz = struct {
// Parse time types
i = 0;
while (i < header.counts.typecnt) : (i += 1) {
- const offset = try reader.readInt(i32, .big);
+ const offset = try reader.takeInt(i32, .big);
if (offset < -2147483648) return error.Malformed; // rfc8536: utoff [...] MUST NOT be -2**31
- const dst = try reader.readByte();
+ const dst = try reader.takeByte();
if (dst != 0 and dst != 1) return error.Malformed; // rfc8536: (is)dst [...] The value MUST be 0 or 1.
- const idx = try reader.readByte();
+ const idx = try reader.takeByte();
if (idx > header.counts.charcnt - 1) return error.Malformed; // rfc8536: (desig)idx [...] Each index MUST be in the range [0, "charcnt" - 1]
timetypes[i] = .{
.offset = offset,
@@ -128,7 +130,7 @@ pub const Tz = struct {
}
var designators_data: [256 + 6]u8 = undefined;
- try reader.readNoEof(designators_data[0..header.counts.charcnt]);
+ try reader.readSliceAll(designators_data[0..header.counts.charcnt]);
const designators = designators_data[0..header.counts.charcnt];
if (designators[designators.len - 1] != 0) return error.Malformed; // rfc8536: charcnt [...] includes the trailing NUL (0x00) octet
@@ -144,12 +146,12 @@ pub const Tz = struct {
// Parse leap seconds
i = 0;
while (i < header.counts.leapcnt) : (i += 1) {
- const occur: i64 = if (legacy) try reader.readInt(i32, .big) else try reader.readInt(i64, .big);
+ const occur: i64 = if (legacy) try reader.takeInt(i32, .big) else try reader.takeInt(i64, .big);
if (occur < 0) return error.Malformed; // rfc8536: occur [...] MUST be nonnegative
if (i > 0 and leapseconds[i - 1].occurrence + 2419199 > occur) return error.Malformed; // rfc8536: occur [...] each later value MUST be at least 2419199 greater than the previous value
if (occur > std.math.maxInt(i48)) return error.Malformed; // Unreasonably far into the future
- const corr = try reader.readInt(i32, .big);
+ const corr = try reader.takeInt(i32, .big);
if (i == 0 and corr != -1 and corr != 1) return error.Malformed; // rfc8536: The correction value in the first leap-second record, if present, MUST be either one (1) or minus one (-1)
if (i > 0 and leapseconds[i - 1].correction != corr + 1 and leapseconds[i - 1].correction != corr - 1) return error.Malformed; // rfc8536: The correction values in adjacent leap-second records MUST differ by exactly one (1)
if (corr > std.math.maxInt(i16)) return error.Malformed; // Unreasonably large correction
@@ -163,7 +165,7 @@ pub const Tz = struct {
// Parse standard/wall indicators
i = 0;
while (i < header.counts.isstdcnt) : (i += 1) {
- const stdtime = try reader.readByte();
+ const stdtime = try reader.takeByte();
if (stdtime == 1) {
timetypes[i].flags |= 0x02;
}
@@ -172,7 +174,7 @@ pub const Tz = struct {
// Parse UT/local indicators
i = 0;
while (i < header.counts.isutcnt) : (i += 1) {
- const ut = try reader.readByte();
+ const ut = try reader.takeByte();
if (ut == 1) {
timetypes[i].flags |= 0x04;
if (!timetypes[i].standardTimeIndicator()) return error.Malformed; // rfc8536: standard/wall value MUST be one (1) if the UT/local value is one (1)
@@ -182,9 +184,8 @@ pub const Tz = struct {
// Footer
var footer: ?[]u8 = null;
if (!legacy) {
- if ((try reader.readByte()) != '\n') return error.Malformed; // An rfc8536 footer must start with a newline
- var footerdata_buf: [128]u8 = undefined;
- const footer_mem = reader.readUntilDelimiter(&footerdata_buf, '\n') catch |err| switch (err) {
+ if ((try reader.takeByte()) != '\n') return error.Malformed; // An rfc8536 footer must start with a newline
+ const footer_mem = reader.takeSentinel('\n') catch |err| switch (err) {
error.StreamTooLong => return error.OverlargeFooter, // Read more than 128 bytes, much larger than any reasonable POSIX TZ string
else => return err,
};
@@ -194,7 +195,7 @@ pub const Tz = struct {
}
errdefer if (footer) |ft| allocator.free(ft);
- return Tz{
+ return .{
.allocator = allocator,
.transitions = transitions,
.timetypes = timetypes,
@@ -215,9 +216,9 @@ pub const Tz = struct {
test "slim" {
const data = @embedFile("tz/asia_tokyo.tzif");
- var in_stream = std.io.fixedBufferStream(data);
+ var in_stream: Reader = .fixed(data);
- var tz = try std.Tz.parse(std.testing.allocator, in_stream.reader());
+ var tz = try std.Tz.parse(std.testing.allocator, &in_stream);
defer tz.deinit();
try std.testing.expectEqual(tz.transitions.len, 9);
@@ -228,9 +229,9 @@ test "slim" {
test "fat" {
const data = @embedFile("tz/antarctica_davis.tzif");
- var in_stream = std.io.fixedBufferStream(data);
+ var in_stream: Reader = .fixed(data);
- var tz = try std.Tz.parse(std.testing.allocator, in_stream.reader());
+ var tz = try std.Tz.parse(std.testing.allocator, &in_stream);
defer tz.deinit();
try std.testing.expectEqual(tz.transitions.len, 8);
@@ -241,9 +242,9 @@ test "fat" {
test "legacy" {
// Taken from Slackware 8.0, from 2001
const data = @embedFile("tz/europe_vatican.tzif");
- var in_stream = std.io.fixedBufferStream(data);
+ var in_stream: Reader = .fixed(data);
- var tz = try std.Tz.parse(std.testing.allocator, in_stream.reader());
+ var tz = try std.Tz.parse(std.testing.allocator, &in_stream);
defer tz.deinit();
try std.testing.expectEqual(tz.transitions.len, 170);