aboutsummaryrefslogtreecommitdiff
path: root/lib/std
diff options
context:
space:
mode:
authorBas <BarabasGitHub@users.noreply.github.com>2020-09-08 11:56:59 +0200
committerGitHub <noreply@github.com>2020-09-08 11:56:59 +0200
commit4a6ca735d9b3d466aba37c4488c1235b06a0bc84 (patch)
tree10ef029ccaefe15c5152c1512a952ca6fdb01358 /lib/std
parent0a40a61548ad9f666ed5300a8910f9040cc1390b (diff)
parent389c26025283edef2206d19d9ad1ddc41e98f007 (diff)
downloadzig-4a6ca735d9b3d466aba37c4488c1235b06a0bc84.tar.gz
zig-4a6ca735d9b3d466aba37c4488c1235b06a0bc84.zip
Merge branch 'master' into improve-windows-networking
Diffstat (limited to 'lib/std')
-rw-r--r--lib/std/array_list.zig12
-rw-r--r--lib/std/c.zig5
-rw-r--r--lib/std/child_process.zig12
-rw-r--r--lib/std/coff.zig66
-rw-r--r--lib/std/compress.zig13
-rw-r--r--lib/std/compress/deflate.zig521
-rw-r--r--lib/std/compress/rfc1951.txt955
-rw-r--r--lib/std/compress/rfc1951.txt.fixed.z.9bin0 -> 12836 bytes
-rw-r--r--lib/std/compress/rfc1951.txt.z.0bin0 -> 36960 bytes
-rw-r--r--lib/std/compress/rfc1951.txt.z.9bin0 -> 11111 bytes
-rw-r--r--lib/std/compress/zlib.zig178
-rw-r--r--lib/std/debug/leb128.zig47
-rw-r--r--lib/std/fmt.zig40
-rw-r--r--lib/std/fmt/parse_float.zig7
-rw-r--r--lib/std/fs.zig12
-rw-r--r--lib/std/fs/file.zig40
-rw-r--r--lib/std/fs/test.zig10
-rw-r--r--lib/std/hash/auto_hash.zig2
-rw-r--r--lib/std/heap.zig6
-rw-r--r--lib/std/io.zig9
-rw-r--r--lib/std/io/auto_indenting_stream.zig148
-rw-r--r--lib/std/io/change_detection_stream.zig55
-rw-r--r--lib/std/io/find_byte_out_stream.zig40
-rw-r--r--lib/std/io/reader.zig10
-rw-r--r--lib/std/io/serialization.zig6
-rw-r--r--lib/std/io/writer.zig10
-rw-r--r--lib/std/log.zig4
-rw-r--r--lib/std/math.zig63
-rw-r--r--lib/std/math/big.zig11
-rw-r--r--lib/std/math/big/int.zig87
-rw-r--r--lib/std/math/big/int_test.zig6
-rw-r--r--lib/std/math/big/rational.zig16
-rw-r--r--lib/std/math/cos.zig2
-rw-r--r--lib/std/math/pow.zig4
-rw-r--r--lib/std/math/sin.zig2
-rw-r--r--lib/std/math/sqrt.zig6
-rw-r--r--lib/std/math/tan.zig2
-rw-r--r--lib/std/mem.zig42
-rw-r--r--lib/std/mem/Allocator.zig8
-rw-r--r--lib/std/meta.zig16
-rw-r--r--lib/std/net.zig5
-rw-r--r--lib/std/os.zig136
-rw-r--r--lib/std/os/bits/darwin.zig8
-rw-r--r--lib/std/os/bits/dragonfly.zig13
-rw-r--r--lib/std/os/bits/freebsd.zig8
-rw-r--r--lib/std/os/bits/linux.zig10
-rw-r--r--lib/std/os/bits/linux/x86_64.zig5
-rw-r--r--lib/std/os/linux.zig90
-rw-r--r--lib/std/os/linux/bpf.zig826
-rw-r--r--lib/std/os/test.zig36
-rw-r--r--lib/std/os/windows/kernel32.zig2
-rw-r--r--lib/std/os/windows/ws2_32.zig2
-rw-r--r--lib/std/pdb.zig2
-rw-r--r--lib/std/process.zig8
-rw-r--r--lib/std/progress.zig6
-rw-r--r--lib/std/rand.zig57
-rw-r--r--lib/std/special/build_runner.zig2
-rw-r--r--lib/std/special/c.zig5
-rw-r--r--lib/std/special/compiler_rt/addXf3.zig18
-rw-r--r--lib/std/special/compiler_rt/aulldiv.zig4
-rw-r--r--lib/std/special/compiler_rt/aullrem.zig4
-rw-r--r--lib/std/special/compiler_rt/compareXf2.zig7
-rw-r--r--lib/std/special/compiler_rt/divdf3.zig9
-rw-r--r--lib/std/special/compiler_rt/divsf3.zig7
-rw-r--r--lib/std/special/compiler_rt/divtf3.zig5
-rw-r--r--lib/std/special/compiler_rt/divti3.zig4
-rw-r--r--lib/std/special/compiler_rt/fixint.zig9
-rw-r--r--lib/std/special/compiler_rt/fixuint.zig6
-rw-r--r--lib/std/special/compiler_rt/floatXisf.zig9
-rw-r--r--lib/std/special/compiler_rt/floatsiXf.zig7
-rw-r--r--lib/std/special/compiler_rt/floatundisf.zig2
-rw-r--r--lib/std/special/compiler_rt/floatunditf.zig2
-rw-r--r--lib/std/special/compiler_rt/floatunsitf.zig2
-rw-r--r--lib/std/special/compiler_rt/int.zig2
-rw-r--r--lib/std/special/compiler_rt/modti3.zig4
-rw-r--r--lib/std/special/compiler_rt/mulXf3.zig10
-rw-r--r--lib/std/special/compiler_rt/mulodi4.zig2
-rw-r--r--lib/std/special/compiler_rt/muloti4.zig6
-rw-r--r--lib/std/special/compiler_rt/negXf2.zig3
-rw-r--r--lib/std/special/compiler_rt/shift.zig25
-rw-r--r--lib/std/special/compiler_rt/truncXfYf2.zig4
-rw-r--r--lib/std/special/compiler_rt/udivmod.zig70
-rw-r--r--lib/std/special/test_runner.zig2
-rw-r--r--lib/std/start.zig4
-rw-r--r--lib/std/std.zig1
-rw-r--r--lib/std/target.zig60
-rw-r--r--lib/std/thread.zig6
-rw-r--r--lib/std/zig.zig2
-rw-r--r--lib/std/zig/parser_test.zig120
-rw-r--r--lib/std/zig/render.zig1675
-rw-r--r--lib/std/zig/tokenizer.zig3
91 files changed, 4374 insertions, 1414 deletions
diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig
index a7432a30ae..f298d14631 100644
--- a/lib/std/array_list.zig
+++ b/lib/std/array_list.zig
@@ -46,7 +46,11 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn initCapacity(allocator: *Allocator, num: usize) !Self {
var self = Self.init(allocator);
- try self.ensureCapacity(num);
+
+ const new_memory = try self.allocator.allocAdvanced(T, alignment, num, .at_least);
+ self.items.ptr = new_memory.ptr;
+ self.capacity = new_memory.len;
+
return self;
}
@@ -366,7 +370,11 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn initCapacity(allocator: *Allocator, num: usize) !Self {
var self = Self{};
- try self.ensureCapacity(allocator, num);
+
+ const new_memory = try self.allocator.allocAdvanced(T, alignment, num, .at_least);
+ self.items.ptr = new_memory.ptr;
+ self.capacity = new_memory.len;
+
return self;
}
diff --git a/lib/std/c.zig b/lib/std/c.zig
index b4e5fc7392..1b3f403ab5 100644
--- a/lib/std/c.zig
+++ b/lib/std/c.zig
@@ -330,3 +330,8 @@ pub const FILE = @Type(.Opaque);
pub extern "c" fn dlopen(path: [*:0]const u8, mode: c_int) ?*c_void;
pub extern "c" fn dlclose(handle: *c_void) c_int;
pub extern "c" fn dlsym(handle: ?*c_void, symbol: [*:0]const u8) ?*c_void;
+
+pub extern "c" fn sync() void;
+pub extern "c" fn syncfs(fd: c_int) c_int;
+pub extern "c" fn fsync(fd: c_int) c_int;
+pub extern "c" fn fdatasync(fd: c_int) c_int;
diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig
index 287fc3e7cd..9219b05088 100644
--- a/lib/std/child_process.zig
+++ b/lib/std/child_process.zig
@@ -44,10 +44,10 @@ pub const ChildProcess = struct {
stderr_behavior: StdIo,
/// Set to change the user id when spawning the child process.
- uid: if (builtin.os.tag == .windows) void else ?u32,
+ uid: if (builtin.os.tag == .windows or builtin.os.tag == .wasi) void else ?os.uid_t,
/// Set to change the group id when spawning the child process.
- gid: if (builtin.os.tag == .windows) void else ?u32,
+ gid: if (builtin.os.tag == .windows or builtin.os.tag == .wasi) void else ?os.gid_t,
/// Set to change the current working directory when spawning the child process.
cwd: ?[]const u8,
@@ -275,9 +275,7 @@ pub const ChildProcess = struct {
}
fn handleWaitResult(self: *ChildProcess, status: u32) void {
- // TODO https://github.com/ziglang/zig/issues/3190
- var term = self.cleanupAfterWait(status);
- self.term = term;
+ self.term = self.cleanupAfterWait(status);
}
fn cleanupStreams(self: *ChildProcess) void {
@@ -487,8 +485,8 @@ pub const ChildProcess = struct {
const any_ignore = (self.stdin_behavior == StdIo.Ignore or self.stdout_behavior == StdIo.Ignore or self.stderr_behavior == StdIo.Ignore);
const nul_handle = if (any_ignore)
- windows.OpenFile(&[_]u16{ 'N', 'U', 'L' }, .{
- .dir = std.fs.cwd().fd,
+ // "\Device\Null" or "\??\NUL"
+ windows.OpenFile(&[_]u16{ '\\', 'D', 'e', 'v', 'i', 'c', 'e', '\\', 'N', 'u', 'l', 'l' }, .{
.access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE,
.share_access = windows.FILE_SHARE_READ,
.creation = windows.OPEN_EXISTING,
diff --git a/lib/std/coff.zig b/lib/std/coff.zig
index cd567b3a6e..ea3a232187 100644
--- a/lib/std/coff.zig
+++ b/lib/std/coff.zig
@@ -18,11 +18,77 @@ const IMAGE_FILE_MACHINE_I386 = 0x014c;
const IMAGE_FILE_MACHINE_IA64 = 0x0200;
const IMAGE_FILE_MACHINE_AMD64 = 0x8664;
+pub const MachineType = enum(u16) {
+ Unknown = 0x0,
+ /// Matsushita AM33
+ AM33 = 0x1d3,
+ /// x64
+ X64 = 0x8664,
+ /// ARM little endian
+ ARM = 0x1c0,
+ /// ARM64 little endian
+ ARM64 = 0xaa64,
+ /// ARM Thumb-2 little endian
+ ARMNT = 0x1c4,
+ /// EFI byte code
+ EBC = 0xebc,
+ /// Intel 386 or later processors and compatible processors
+ I386 = 0x14c,
+ /// Intel Itanium processor family
+ IA64 = 0x200,
+ /// Mitsubishi M32R little endian
+ M32R = 0x9041,
+ /// MIPS16
+ MIPS16 = 0x266,
+ /// MIPS with FPU
+ MIPSFPU = 0x366,
+ /// MIPS16 with FPU
+ MIPSFPU16 = 0x466,
+ /// Power PC little endian
+ POWERPC = 0x1f0,
+ /// Power PC with floating point support
+ POWERPCFP = 0x1f1,
+ /// MIPS little endian
+ R4000 = 0x166,
+ /// RISC-V 32-bit address space
+ RISCV32 = 0x5032,
+ /// RISC-V 64-bit address space
+ RISCV64 = 0x5064,
+ /// RISC-V 128-bit address space
+ RISCV128 = 0x5128,
+ /// Hitachi SH3
+ SH3 = 0x1a2,
+ /// Hitachi SH3 DSP
+ SH3DSP = 0x1a3,
+ /// Hitachi SH4
+ SH4 = 0x1a6,
+ /// Hitachi SH5
+ SH5 = 0x1a8,
+ /// Thumb
+ Thumb = 0x1c2,
+ /// MIPS little-endian WCE v2
+ WCEMIPSV2 = 0x169,
+};
+
// OptionalHeader.magic values
// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms680339(v=vs.85).aspx
const IMAGE_NT_OPTIONAL_HDR32_MAGIC = 0x10b;
const IMAGE_NT_OPTIONAL_HDR64_MAGIC = 0x20b;
+// Image Characteristics
+pub const IMAGE_FILE_RELOCS_STRIPPED = 0x1;
+pub const IMAGE_FILE_DEBUG_STRIPPED = 0x200;
+pub const IMAGE_FILE_EXECUTABLE_IMAGE = 0x2;
+pub const IMAGE_FILE_32BIT_MACHINE = 0x100;
+pub const IMAGE_FILE_LARGE_ADDRESS_AWARE = 0x20;
+
+// Section flags
+pub const IMAGE_SCN_CNT_INITIALIZED_DATA = 0x40;
+pub const IMAGE_SCN_MEM_READ = 0x40000000;
+pub const IMAGE_SCN_CNT_CODE = 0x20;
+pub const IMAGE_SCN_MEM_EXECUTE = 0x20000000;
+pub const IMAGE_SCN_MEM_WRITE = 0x80000000;
+
const IMAGE_NUMBEROF_DIRECTORY_ENTRIES = 16;
const IMAGE_DEBUG_TYPE_CODEVIEW = 2;
const DEBUG_DIRECTORY = 6;
diff --git a/lib/std/compress.zig b/lib/std/compress.zig
new file mode 100644
index 0000000000..5518f807df
--- /dev/null
+++ b/lib/std/compress.zig
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2015-2020 Zig Contributors
+// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
+// The MIT license requires this copyright notice to be included in all copies
+// and substantial portions of the software.
+const std = @import("std.zig");
+
+pub const deflate = @import("compress/deflate.zig");
+pub const zlib = @import("compress/zlib.zig");
+
+test "" {
+ _ = zlib;
+}
diff --git a/lib/std/compress/deflate.zig b/lib/std/compress/deflate.zig
new file mode 100644
index 0000000000..bad23349e8
--- /dev/null
+++ b/lib/std/compress/deflate.zig
@@ -0,0 +1,521 @@
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2015-2020 Zig Contributors
+// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
+// The MIT license requires this copyright notice to be included in all copies
+// and substantial portions of the software.
+//
+// Decompressor for DEFLATE data streams (RFC1951)
+//
+// Heavily inspired by the simple decompressor puff.c by Mark Adler
+
+const std = @import("std");
+const io = std.io;
+const math = std.math;
+const mem = std.mem;
+
+const assert = std.debug.assert;
+
+const MAXBITS = 15;
+const MAXLCODES = 286;
+const MAXDCODES = 30;
+const MAXCODES = MAXLCODES + MAXDCODES;
+const FIXLCODES = 288;
+
+const Huffman = struct {
+ count: [MAXBITS + 1]u16,
+ symbol: [MAXCODES]u16,
+
+ fn construct(self: *Huffman, length: []const u16) !void {
+ for (self.count) |*val| {
+ val.* = 0;
+ }
+
+ for (length) |val| {
+ self.count[val] += 1;
+ }
+
+ if (self.count[0] == length.len)
+ return;
+
+ var left: isize = 1;
+ for (self.count[1..]) |val| {
+ left *= 2;
+ left -= @as(isize, @bitCast(i16, val));
+ if (left < 0)
+ return error.InvalidTree;
+ }
+
+ var offs: [MAXBITS + 1]u16 = undefined;
+ {
+ var len: usize = 1;
+ offs[1] = 0;
+ while (len < MAXBITS) : (len += 1) {
+ offs[len + 1] = offs[len] + self.count[len];
+ }
+ }
+
+ for (length) |val, symbol| {
+ if (val != 0) {
+ self.symbol[offs[val]] = @truncate(u16, symbol);
+ offs[val] += 1;
+ }
+ }
+ }
+};
+
+pub fn InflateStream(comptime ReaderType: type) type {
+ return struct {
+ const Self = @This();
+
+ pub const Error = ReaderType.Error || error{
+ EndOfStream,
+ BadCounts,
+ InvalidBlockType,
+ InvalidDistance,
+ InvalidFixedCode,
+ InvalidLength,
+ InvalidStoredSize,
+ InvalidSymbol,
+ InvalidTree,
+ MissingEOBCode,
+ NoLastLength,
+ OutOfCodes,
+ };
+ pub const Reader = io.Reader(*Self, Error, read);
+
+ bit_reader: io.BitReader(.Little, ReaderType),
+
+ // True if the decoder met the end of the compressed stream, no further
+ // data can be decompressed
+ seen_eos: bool,
+
+ state: union(enum) {
+ // Parse a compressed block header and set up the internal state for
+ // decompressing its contents.
+ DecodeBlockHeader: void,
+ // Decode all the symbols in a compressed block.
+ DecodeBlockData: void,
+ // Copy N bytes of uncompressed data from the underlying stream into
+ // the window.
+ Copy: usize,
+ // Copy 1 byte into the window.
+ CopyLit: u8,
+ // Copy L bytes from the window itself, starting from D bytes
+ // behind.
+ CopyFrom: struct { distance: u16, length: u16 },
+ },
+
+ // Sliding window for the LZ77 algorithm
+ window: struct {
+ const WSelf = @This();
+
+ // invariant: buffer length is always a power of 2
+ buf: []u8,
+ // invariant: ri <= wi
+ wi: usize = 0, // Write index
+ ri: usize = 0, // Read index
+ el: usize = 0, // Number of readable elements
+
+ fn readable(self: *WSelf) usize {
+ return self.el;
+ }
+
+ fn writable(self: *WSelf) usize {
+ return self.buf.len - self.el;
+ }
+
+ // Insert a single byte into the window.
+ // Returns 1 if there's enough space for the new byte and 0
+ // otherwise.
+ fn append(self: *WSelf, value: u8) usize {
+ if (self.writable() < 1) return 0;
+ self.appendUnsafe(value);
+ return 1;
+ }
+
+ // Insert a single byte into the window.
+ // Assumes there's enough space.
+ fn appendUnsafe(self: *WSelf, value: u8) void {
+ self.buf[self.wi] = value;
+ self.wi = (self.wi + 1) & (self.buf.len - 1);
+ self.el += 1;
+ }
+
+ // Fill dest[] with data from the window, starting from the read
+ // position. This updates the read pointer.
+ // Returns the number of read bytes or 0 if there's nothing to read
+ // yet.
+ fn read(self: *WSelf, dest: []u8) usize {
+ const N = math.min(dest.len, self.readable());
+
+ if (N == 0) return 0;
+
+ if (self.ri + N < self.buf.len) {
+ // The data doesn't wrap around
+ mem.copy(u8, dest, self.buf[self.ri .. self.ri + N]);
+ } else {
+ // The data wraps around the buffer, split the copy
+ std.mem.copy(u8, dest, self.buf[self.ri..]);
+ // How much data we've copied from `ri` to the end
+ const r = self.buf.len - self.ri;
+ std.mem.copy(u8, dest[r..], self.buf[0 .. N - r]);
+ }
+
+ self.ri = (self.ri + N) & (self.buf.len - 1);
+ self.el -= N;
+
+ return N;
+ }
+
+ // Copy `length` bytes starting from `distance` bytes behind the
+ // write pointer.
+ // Be careful as the length may be greater than the distance, that's
+ // how the compressor encodes run-length encoded sequences.
+ fn copyFrom(self: *WSelf, distance: usize, length: usize) usize {
+ const N = math.min(length, self.writable());
+
+ if (N == 0) return 0;
+
+ // TODO: Profile and, if needed, replace with smarter juggling
+ // of the window memory for the non-overlapping case.
+ var i: usize = 0;
+ while (i < N) : (i += 1) {
+ const index = (self.wi -% distance) % self.buf.len;
+ self.appendUnsafe(self.buf[index]);
+ }
+
+ return N;
+ }
+ },
+
+ // Compressor-local Huffman tables used to decompress blocks with
+ // dynamic codes.
+ huffman_tables: [2]Huffman = undefined,
+
+ // Huffman tables used for decoding length/distance pairs.
+ hdist: *Huffman,
+ hlen: *Huffman,
+
+ fn stored(self: *Self) !void {
+ // Discard the remaining bits, the lenght field is always
+ // byte-aligned (and so is the data)
+ self.bit_reader.alignToByte();
+
+ const length = (try self.bit_reader.readBitsNoEof(u16, 16));
+ const length_cpl = (try self.bit_reader.readBitsNoEof(u16, 16));
+
+ if (length != ~length_cpl)
+ return error.InvalidStoredSize;
+
+ self.state = .{ .Copy = length };
+ }
+
+ fn fixed(self: *Self) !void {
+ comptime var lencode: Huffman = undefined;
+ comptime var distcode: Huffman = undefined;
+
+ // The Huffman codes are specified in the RFC1951, section 3.2.6
+ comptime {
+ @setEvalBranchQuota(100000);
+
+ const len_lengths = //
+ [_]u16{8} ** 144 ++
+ [_]u16{9} ** 112 ++
+ [_]u16{7} ** 24 ++
+ [_]u16{8} ** 8;
+ assert(len_lengths.len == FIXLCODES);
+ try lencode.construct(len_lengths[0..]);
+
+ const dist_lengths = [_]u16{5} ** MAXDCODES;
+ try distcode.construct(dist_lengths[0..]);
+ }
+
+ self.hlen = &lencode;
+ self.hdist = &distcode;
+ self.state = .DecodeBlockData;
+ }
+
+ fn dynamic(self: *Self) !void {
+ // Number of length codes
+ const nlen = (try self.bit_reader.readBitsNoEof(usize, 5)) + 257;
+ // Number of distance codes
+ const ndist = (try self.bit_reader.readBitsNoEof(usize, 5)) + 1;
+ // Number of code length codes
+ const ncode = (try self.bit_reader.readBitsNoEof(usize, 4)) + 4;
+
+ if (nlen > MAXLCODES or ndist > MAXDCODES)
+ return error.BadCounts;
+
+ // Permutation of code length codes
+ const ORDER = [19]u16{
+ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4,
+ 12, 3, 13, 2, 14, 1, 15,
+ };
+
+ // Build the Huffman table to decode the code length codes
+ var lencode: Huffman = undefined;
+ {
+ var lengths = std.mem.zeroes([19]u16);
+
+ // Read the code lengths, missing ones are left as zero
+ for (ORDER[0..ncode]) |val| {
+ lengths[val] = try self.bit_reader.readBitsNoEof(u16, 3);
+ }
+
+ try lencode.construct(lengths[0..]);
+ }
+
+ // Read the length/literal and distance code length tables.
+ // Zero the table by default so we can avoid explicitly writing out
+ // zeros for codes 17 and 18
+ var lengths = std.mem.zeroes([MAXCODES]u16);
+
+ var i: usize = 0;
+ while (i < nlen + ndist) {
+ const symbol = try self.decode(&lencode);
+
+ switch (symbol) {
+ 0...15 => {
+ lengths[i] = symbol;
+ i += 1;
+ },
+ 16 => {
+ // repeat last length 3..6 times
+ if (i == 0) return error.NoLastLength;
+
+ const last_length = lengths[i - 1];
+ const repeat = 3 + (try self.bit_reader.readBitsNoEof(usize, 2));
+ const last_index = i + repeat;
+ while (i < last_index) : (i += 1) {
+ lengths[i] = last_length;
+ }
+ },
+ 17 => {
+ // repeat zero 3..10 times
+ i += 3 + (try self.bit_reader.readBitsNoEof(usize, 3));
+ },
+ 18 => {
+ // repeat zero 11..138 times
+ i += 11 + (try self.bit_reader.readBitsNoEof(usize, 7));
+ },
+ else => return error.InvalidSymbol,
+ }
+ }
+
+ if (i > nlen + ndist)
+ return error.InvalidLength;
+
+ // Check if the end of block code is present
+ if (lengths[256] == 0)
+ return error.MissingEOBCode;
+
+ try self.huffman_tables[0].construct(lengths[0..nlen]);
+ try self.huffman_tables[1].construct(lengths[nlen .. nlen + ndist]);
+
+ self.hlen = &self.huffman_tables[0];
+ self.hdist = &self.huffman_tables[1];
+ self.state = .DecodeBlockData;
+ }
+
+ fn codes(self: *Self, lencode: *Huffman, distcode: *Huffman) !bool {
+ // Size base for length codes 257..285
+ const LENS = [29]u16{
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
+ 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258,
+ };
+ // Extra bits for length codes 257..285
+ const LEXT = [29]u16{
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+ 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0,
+ };
+ // Offset base for distance codes 0..29
+ const DISTS = [30]u16{
+ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
+ 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577,
+ };
+ // Extra bits for distance codes 0..29
+ const DEXT = [30]u16{
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
+ 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
+ };
+
+ while (true) {
+ const symbol = try self.decode(lencode);
+
+ switch (symbol) {
+ 0...255 => {
+ // Literal value
+ const c = @truncate(u8, symbol);
+ if (self.window.append(c) == 0) {
+ self.state = .{ .CopyLit = c };
+ return false;
+ }
+ },
+ 256 => {
+ // End of block symbol
+ return true;
+ },
+ 257...285 => {
+ // Length/distance pair
+ const length_symbol = symbol - 257;
+ const length = LENS[length_symbol] +
+ try self.bit_reader.readBitsNoEof(u16, LEXT[length_symbol]);
+
+ const distance_symbol = try self.decode(distcode);
+ const distance = DISTS[distance_symbol] +
+ try self.bit_reader.readBitsNoEof(u16, DEXT[distance_symbol]);
+
+ if (distance > self.window.buf.len)
+ return error.InvalidDistance;
+
+ const written = self.window.copyFrom(distance, length);
+ if (written != length) {
+ self.state = .{
+ .CopyFrom = .{
+ .distance = distance,
+ .length = length - @truncate(u16, written),
+ },
+ };
+ return false;
+ }
+ },
+ else => return error.InvalidFixedCode,
+ }
+ }
+ }
+
+ fn decode(self: *Self, h: *Huffman) !u16 {
+ var len: usize = 1;
+ var code: usize = 0;
+ var first: usize = 0;
+ var index: usize = 0;
+
+ while (len <= MAXBITS) : (len += 1) {
+ code |= try self.bit_reader.readBitsNoEof(usize, 1);
+ const count = h.count[len];
+ if (code < first + count)
+ return h.symbol[index + (code - first)];
+ index += count;
+ first += count;
+ first <<= 1;
+ code <<= 1;
+ }
+
+ return error.OutOfCodes;
+ }
+
+ fn step(self: *Self) !void {
+ while (true) {
+ switch (self.state) {
+ .DecodeBlockHeader => {
+ // The compressed stream is done
+ if (self.seen_eos) return;
+
+ const last = try self.bit_reader.readBitsNoEof(u1, 1);
+ const kind = try self.bit_reader.readBitsNoEof(u2, 2);
+
+ self.seen_eos = last != 0;
+
+ // The next state depends on the block type
+ switch (kind) {
+ 0 => try self.stored(),
+ 1 => try self.fixed(),
+ 2 => try self.dynamic(),
+ 3 => return error.InvalidBlockType,
+ }
+ },
+ .DecodeBlockData => {
+ if (!try self.codes(self.hlen, self.hdist)) {
+ return;
+ }
+
+ self.state = .DecodeBlockHeader;
+ },
+ .Copy => |*length| {
+ const N = math.min(self.window.writable(), length.*);
+
+ // TODO: This loop can be more efficient. On the other
+ // hand uncompressed blocks are not that common so...
+ var i: usize = 0;
+ while (i < N) : (i += 1) {
+ var tmp: [1]u8 = undefined;
+ if ((try self.bit_reader.read(&tmp)) != 1) {
+ // Unexpected end of stream, keep this error
+ // consistent with the use of readBitsNoEof
+ return error.EndOfStream;
+ }
+ self.window.appendUnsafe(tmp[0]);
+ }
+
+ if (N != length.*) {
+ length.* -= N;
+ return;
+ }
+
+ self.state = .DecodeBlockHeader;
+ },
+ .CopyLit => |c| {
+ if (self.window.append(c) == 0) {
+ return;
+ }
+
+ self.state = .DecodeBlockData;
+ },
+ .CopyFrom => |*info| {
+ const written = self.window.copyFrom(info.distance, info.length);
+ if (written != info.length) {
+ info.length -= @truncate(u16, written);
+ return;
+ }
+
+ self.state = .DecodeBlockData;
+ },
+ }
+ }
+ }
+
+ fn init(source: ReaderType, window_slice: []u8) Self {
+ assert(math.isPowerOfTwo(window_slice.len));
+
+ return Self{
+ .bit_reader = io.bitReader(.Little, source),
+ .window = .{ .buf = window_slice },
+ .seen_eos = false,
+ .state = .DecodeBlockHeader,
+ .hdist = undefined,
+ .hlen = undefined,
+ };
+ }
+
+ // Implements the io.Reader interface
+ pub fn read(self: *Self, buffer: []u8) Error!usize {
+ if (buffer.len == 0)
+ return 0;
+
+ // Try reading as much as possible from the window
+ var read_amt: usize = self.window.read(buffer);
+ while (read_amt < buffer.len) {
+ // Run the state machine, we can detect the "effective" end of
+ // stream condition by checking if any progress was made.
+ // Why "effective"? Because even though `seen_eos` is true we
+ // may still have to finish processing other decoding steps.
+ try self.step();
+ // No progress was made
+ if (self.window.readable() == 0)
+ break;
+
+ read_amt += self.window.read(buffer[read_amt..]);
+ }
+
+ return read_amt;
+ }
+
+ pub fn reader(self: *Self) Reader {
+ return .{ .context = self };
+ }
+ };
+}
+
+pub fn inflateStream(reader: anytype, window_slice: []u8) InflateStream(@TypeOf(reader)) {
+ return InflateStream(@TypeOf(reader)).init(reader, window_slice);
+}
diff --git a/lib/std/compress/rfc1951.txt b/lib/std/compress/rfc1951.txt
new file mode 100644
index 0000000000..403c8c722f
--- /dev/null
+++ b/lib/std/compress/rfc1951.txt
@@ -0,0 +1,955 @@
+
+
+
+
+
+
+Network Working Group P. Deutsch
+Request for Comments: 1951 Aladdin Enterprises
+Category: Informational May 1996
+
+
+ DEFLATE Compressed Data Format Specification version 1.3
+
+Status of This Memo
+
+ This memo provides information for the Internet community. This memo
+ does not specify an Internet standard of any kind. Distribution of
+ this memo is unlimited.
+
+IESG Note:
+
+ The IESG takes no position on the validity of any Intellectual
+ Property Rights statements contained in this document.
+
+Notices
+
+ Copyright (c) 1996 L. Peter Deutsch
+
+ Permission is granted to copy and distribute this document for any
+ purpose and without charge, including translations into other
+ languages and incorporation into compilations, provided that the
+ copyright notice and this notice are preserved, and that any
+ substantive changes or deletions from the original are clearly
+ marked.
+
+ A pointer to the latest version of this and related documentation in
+ HTML format can be found at the URL
+ <ftp://ftp.uu.net/graphics/png/documents/zlib/zdoc-index.html>.
+
+Abstract
+
+ This specification defines a lossless compressed data format that
+ compresses data using a combination of the LZ77 algorithm and Huffman
+ coding, with efficiency comparable to the best currently available
+ general-purpose compression methods. The data can be produced or
+ consumed, even for an arbitrarily long sequentially presented input
+ data stream, using only an a priori bounded amount of intermediate
+ storage. The format can be implemented readily in a manner not
+ covered by patents.
+
+
+
+
+
+
+
+
+Deutsch Informational [Page 1]
+
+RFC 1951 DEFLATE Compressed Data Format Specification May 1996
+
+
+Table of Contents
+
+ 1. Introduction ................................................... 2
+ 1.1. Purpose ................................................... 2
+ 1.2. Intended audience ......................................... 3
+ 1.3. Scope ..................................................... 3
+ 1.4. Compliance ................................................ 3
+ 1.5. Definitions of terms and conventions used ................ 3
+ 1.6. Changes from previous versions ............................ 4
+ 2. Compressed representation overview ............................. 4
+ 3. Detailed specification ......................................... 5
+ 3.1. Overall conventions ....................................... 5
+ 3.1.1. Packing into bytes .................................. 5
+ 3.2. Compressed block format ................................... 6
+ 3.2.1. Synopsis of prefix and Huffman coding ............... 6
+ 3.2.2. Use of Huffman coding in the "deflate" format ....... 7
+ 3.2.3. Details of block format ............................. 9
+ 3.2.4. Non-compressed blocks (BTYPE=00) ................... 11
+ 3.2.5. Compressed blocks (length and distance codes) ...... 11
+ 3.2.6. Compression with fixed Huffman codes (BTYPE=01) .... 12
+ 3.2.7. Compression with dynamic Huffman codes (BTYPE=10) .. 13
+ 3.3. Compliance ............................................... 14
+ 4. Compression algorithm details ................................. 14
+ 5. References .................................................... 16
+ 6. Security Considerations ....................................... 16
+ 7. Source code ................................................... 16
+ 8. Acknowledgements .............................................. 16
+ 9. Author's Address .............................................. 17
+
+1. Introduction
+
+ 1.1. Purpose
+
+ The purpose of this specification is to define a lossless
+ compressed data format that:
+ * Is independent of CPU type, operating system, file system,
+ and character set, and hence can be used for interchange;
+ * Can be produced or consumed, even for an arbitrarily long
+ sequentially presented input data stream, using only an a
+ priori bounded amount of intermediate storage, and hence
+ can be used in data communications or similar structures
+ such as Unix filters;
+ * Compresses data with efficiency comparable to the best
+ currently available general-purpose compression methods,
+ and in particular considerably better than the "compress"
+ program;
+ * Can be implemented readily in a manner not covered by
+ patents, and hence can be practiced freely;
+
+
+
+Deutsch Informational [Page 2]
+
+RFC 1951 DEFLATE Compressed Data Format Specification May 1996
+
+
+ * Is compatible with the file format produced by the current
+ widely used gzip utility, in that conforming decompressors
+ will be able to read data produced by the existing gzip
+ compressor.
+
+ The data format defined by this specification does not attempt to:
+
+ * Allow random access to compressed data;
+ * Compress specialized data (e.g., raster graphics) as well
+ as the best currently available specialized algorithms.
+
+ A simple counting argument shows that no lossless compression
+ algorithm can compress every possible input data set. For the
+ format defined here, the worst case expansion is 5 bytes per 32K-
+ byte block, i.e., a size increase of 0.015% for large data sets.
+ English text usually compresses by a factor of 2.5 to 3;
+ executable files usually compress somewhat less; graphical data
+ such as raster images may compress much more.
+
+ 1.2. Intended audience
+
+ This specification is intended for use by implementors of software
+ to compress data into "deflate" format and/or decompress data from
+ "deflate" format.
+
+ The text of the specification assumes a basic background in
+ programming at the level of bits and other primitive data
+ representations. Familiarity with the technique of Huffman coding
+ is helpful but not required.
+
+ 1.3. Scope
+
+ The specification specifies a method for representing a sequence
+ of bytes as a (usually shorter) sequence of bits, and a method for
+ packing the latter bit sequence into bytes.
+
+ 1.4. Compliance
+
+ Unless otherwise indicated below, a compliant decompressor must be
+ able to accept and decompress any data set that conforms to all
+ the specifications presented here; a compliant compressor must
+ produce data sets that conform to all the specifications presented
+ here.
+
+ 1.5. Definitions of terms and conventions used
+
+ Byte: 8 bits stored or transmitted as a unit (same as an octet).
+ For this specification, a byte is exactly 8 bits, even on machines
+
+
+
+Deutsch Informational [Page 3]
+
+RFC 1951 DEFLATE Compressed Data Format Specification May 1996
+
+
+ which store a character on a number of bits different from eight.
+ See below, for the numbering of bits within a byte.
+
+ String: a sequence of arbitrary bytes.
+
+ 1.6. Changes from previous versions
+
+ There have been no technical changes to the deflate format since
+ version 1.1 of this specification. In version 1.2, some
+ terminology was changed. Version 1.3 is a conversion of the
+ specification to RFC style.
+
+2. Compressed representation overview
+
+ A compressed data set consists of a series of blocks, corresponding
+ to successive blocks of input data. The block sizes are arbitrary,
+ except that non-compressible blocks are limited to 65,535 bytes.
+
+ Each block is compressed using a combination of the LZ77 algorithm
+ and Huffman coding. The Huffman trees for each block are independent
+ of those for previous or subsequent blocks; the LZ77 algorithm may
+ use a reference to a duplicated string occurring in a previous block,
+ up to 32K input bytes before.
+
+ Each block consists of two parts: a pair of Huffman code trees that
+ describe the representation of the compressed data part, and a
+ compressed data part. (The Huffman trees themselves are compressed
+ using Huffman encoding.) The compressed data consists of a series of
+ elements of two types: literal bytes (of strings that have not been
+ detected as duplicated within the previous 32K input bytes), and
+ pointers to duplicated strings, where a pointer is represented as a
+ pair <length, backward distance>. The representation used in the
+ "deflate" format limits distances to 32K bytes and lengths to 258
+ bytes, but does not limit the size of a block, except for
+ uncompressible blocks, which are limited as noted above.
+
+ Each type of value (literals, distances, and lengths) in the
+ compressed data is represented using a Huffman code, using one code
+ tree for literals and lengths and a separate code tree for distances.
+ The code trees for each block appear in a compact form just before
+ the compressed data for that block.
+
+
+
+
+
+
+
+
+
+
+Deutsch Informational [Page 4]
+
+RFC 1951 DEFLATE Compressed Data Format Specification May 1996
+
+
+3. Detailed specification
+
+ 3.1. Overall conventions In the diagrams below, a box like this:
+
+ +---+
+ | | <-- the vertical bars might be missing
+ +---+
+
+ represents one byte; a box like this:
+
+ +==============+
+ | |
+ +==============+
+
+ represents a variable number of bytes.
+
+ Bytes stored within a computer do not have a "bit order", since
+ they are always treated as a unit. However, a byte considered as
+ an integer between 0 and 255 does have a most- and least-
+ significant bit, and since we write numbers with the most-
+ significant digit on the left, we also write bytes with the most-
+ significant bit on the left. In the diagrams below, we number the
+ bits of a byte so that bit 0 is the least-significant bit, i.e.,
+ the bits are numbered:
+
+ +--------+
+ |76543210|
+ +--------+
+
+ Within a computer, a number may occupy multiple bytes. All
+ multi-byte numbers in the format described here are stored with
+ the least-significant byte first (at the lower memory address).
+ For example, the decimal number 520 is stored as:
+
+ 0 1
+ +--------+--------+
+ |00001000|00000010|
+ +--------+--------+
+ ^ ^
+ | |
+ | + more significant byte = 2 x 256
+ + less significant byte = 8
+
+ 3.1.1. Packing into bytes
+
+ This document does not address the issue of the order in which
+ bits of a byte are transmitted on a bit-sequential medium,
+ since the final data format described here is byte- rather than
+
+
+
+Deutsch Informational [Page 5]
+
+RFC 1951 DEFLATE Compressed Data Format Specification May 1996
+
+
+ bit-oriented. However, we describe the compressed block format
+ in below, as a sequence of data elements of various bit
+ lengths, not a sequence of bytes. We must therefore specify
+ how to pack these data elements into bytes to form the final
+ compressed byte sequence:
+
+ * Data elements are packed into bytes in order of
+ increasing bit number within the byte, i.e., starting
+ with the least-significant bit of the byte.
+ * Data elements other than Huffman codes are packed
+ starting with the least-significant bit of the data
+ element.
+ * Huffman codes are packed starting with the most-
+ significant bit of the code.
+
+ In other words, if one were to print out the compressed data as
+ a sequence of bytes, starting with the first byte at the
+ *right* margin and proceeding to the *left*, with the most-
+ significant bit of each byte on the left as usual, one would be
+ able to parse the result from right to left, with fixed-width
+ elements in the correct MSB-to-LSB order and Huffman codes in
+ bit-reversed order (i.e., with the first bit of the code in the
+ relative LSB position).
+
+ 3.2. Compressed block format
+
+ 3.2.1. Synopsis of prefix and Huffman coding
+
+ Prefix coding represents symbols from an a priori known
+ alphabet by bit sequences (codes), one code for each symbol, in
+ a manner such that different symbols may be represented by bit
+ sequences of different lengths, but a parser can always parse
+ an encoded string unambiguously symbol-by-symbol.
+
+ We define a prefix code in terms of a binary tree in which the
+ two edges descending from each non-leaf node are labeled 0 and
+ 1 and in which the leaf nodes correspond one-for-one with (are
+ labeled with) the symbols of the alphabet; then the code for a
+ symbol is the sequence of 0's and 1's on the edges leading from
+ the root to the leaf labeled with that symbol. For example:
+
+
+
+
+
+
+
+
+
+
+
+Deutsch Informational [Page 6]
+
+RFC 1951 DEFLATE Compressed Data Format Specification May 1996
+
+
+ /\ Symbol Code
+ 0 1 ------ ----
+ / \ A 00
+ /\ B B 1
+ 0 1 C 011
+ / \ D 010
+ A /\
+ 0 1
+ / \
+ D C
+
+ A parser can decode the next symbol from an encoded input
+ stream by walking down the tree from the root, at each step
+ choosing the edge corresponding to the next input bit.
+
+ Given an alphabet with known symbol frequencies, the Huffman
+ algorithm allows the construction of an optimal prefix code
+ (one which represents strings with those symbol frequencies
+ using the fewest bits of any possible prefix codes for that
+ alphabet). Such a code is called a Huffman code. (See
+ reference [1] in Chapter 5, references for additional
+ information on Huffman codes.)
+
+ Note that in the "deflate" format, the Huffman codes for the
+ various alphabets must not exceed certain maximum code lengths.
+ This constraint complicates the algorithm for computing code
+ lengths from symbol frequencies. Again, see Chapter 5,
+ references for details.
+
+ 3.2.2. Use of Huffman coding in the "deflate" format
+
+ The Huffman codes used for each alphabet in the "deflate"
+ format have two additional rules:
+
+ * All codes of a given bit length have lexicographically
+ consecutive values, in the same order as the symbols
+ they represent;
+
+ * Shorter codes lexicographically precede longer codes.
+
+
+
+
+
+
+
+
+
+
+
+
+Deutsch Informational [Page 7]
+
+RFC 1951 DEFLATE Compressed Data Format Specification May 1996
+
+
+ We could recode the example above to follow this rule as
+ follows, assuming that the order of the alphabet is ABCD:
+
+ Symbol Code
+ ------ ----
+ A 10
+ B 0
+ C 110
+ D 111
+
+ I.e., 0 precedes 10 which precedes 11x, and 110 and 111 are
+ lexicographically consecutive.
+
+ Given this rule, we can define the Huffman code for an alphabet
+ just by giving the bit lengths of the codes for each symbol of
+ the alphabet in order; this is sufficient to determine the
+ actual codes. In our example, the code is completely defined
+ by the sequence of bit lengths (2, 1, 3, 3). The following
+ algorithm generates the codes as integers, intended to be read
+ from most- to least-significant bit. The code lengths are
+ initially in tree[I].Len; the codes are produced in
+ tree[I].Code.
+
+ 1) Count the number of codes for each code length. Let
+ bl_count[N] be the number of codes of length N, N >= 1.
+
+ 2) Find the numerical value of the smallest code for each
+ code length:
+
+ code = 0;
+ bl_count[0] = 0;
+ for (bits = 1; bits <= MAX_BITS; bits++) {
+ code = (code + bl_count[bits-1]) << 1;
+ next_code[bits] = code;
+ }
+
+ 3) Assign numerical values to all codes, using consecutive
+ values for all codes of the same length with the base
+ values determined at step 2. Codes that are never used
+ (which have a bit length of zero) must not be assigned a
+ value.
+
+ for (n = 0; n <= max_code; n++) {
+ len = tree[n].Len;
+ if (len != 0) {
+ tree[n].Code = next_code[len];
+ next_code[len]++;
+ }
+
+
+
+Deutsch Informational [Page 8]
+
+RFC 1951 DEFLATE Compressed Data Format Specification May 1996
+
+
+ }
+
+ Example:
+
+ Consider the alphabet ABCDEFGH, with bit lengths (3, 3, 3, 3,
+ 3, 2, 4, 4). After step 1, we have:
+
+ N bl_count[N]
+ - -----------
+ 2 1
+ 3 5
+ 4 2
+
+ Step 2 computes the following next_code values:
+
+ N next_code[N]
+ - ------------
+ 1 0
+ 2 0
+ 3 2
+ 4 14
+
+ Step 3 produces the following code values:
+
+ Symbol Length Code
+ ------ ------ ----
+ A 3 010
+ B 3 011
+ C 3 100
+ D 3 101
+ E 3 110
+ F 2 00
+ G 4 1110
+ H 4 1111
+
+ 3.2.3. Details of block format
+
+ Each block of compressed data begins with 3 header bits
+ containing the following data:
+
+ first bit BFINAL
+ next 2 bits BTYPE
+
+ Note that the header bits do not necessarily begin on a byte
+ boundary, since a block does not necessarily occupy an integral
+ number of bytes.
+
+
+
+
+
+Deutsch Informational [Page 9]
+
+RFC 1951 DEFLATE Compressed Data Format Specification May 1996
+
+
+ BFINAL is set if and only if this is the last block of the data
+ set.
+
+ BTYPE specifies how the data are compressed, as follows:
+
+ 00 - no compression
+ 01 - compressed with fixed Huffman codes
+ 10 - compressed with dynamic Huffman codes
+ 11 - reserved (error)
+
+ The only difference between the two compressed cases is how the
+ Huffman codes for the literal/length and distance alphabets are
+ defined.
+
+ In all cases, the decoding algorithm for the actual data is as
+ follows:
+
+ do
+ read block header from input stream.
+ if stored with no compression
+ skip any remaining bits in current partially
+ processed byte
+ read LEN and NLEN (see next section)
+ copy LEN bytes of data to output
+ otherwise
+ if compressed with dynamic Huffman codes
+ read representation of code trees (see
+ subsection below)
+ loop (until end of block code recognized)
+ decode literal/length value from input stream
+ if value < 256
+ copy value (literal byte) to output stream
+ otherwise
+ if value = end of block (256)
+ break from loop
+ otherwise (value = 257..285)
+ decode distance from input stream
+
+ move backwards distance bytes in the output
+ stream, and copy length bytes from this
+ position to the output stream.
+ end loop
+ while not last block
+
+ Note that a duplicated string reference may refer to a string
+ in a previous block; i.e., the backward distance may cross one
+ or more block boundaries. However a distance cannot refer past
+ the beginning of the output stream. (An application using a
+
+
+
+Deutsch Informational [Page 10]
+
+RFC 1951 DEFLATE Compressed Data Format Specification May 1996
+
+
+ preset dictionary might discard part of the output stream; a
+ distance can refer to that part of the output stream anyway)
+ Note also that the referenced string may overlap the current
+ position; for example, if the last 2 bytes decoded have values
+ X and Y, a string reference with <length = 5, distance = 2>
+ adds X,Y,X,Y,X to the output stream.
+
+ We now specify each compression method in turn.
+
+ 3.2.4. Non-compressed blocks (BTYPE=00)
+
+ Any bits of input up to the next byte boundary are ignored.
+ The rest of the block consists of the following information:
+
+ 0 1 2 3 4...
+ +---+---+---+---+================================+
+ | LEN | NLEN |... LEN bytes of literal data...|
+ +---+---+---+---+================================+
+
+ LEN is the number of data bytes in the block. NLEN is the
+ one's complement of LEN.
+
+ 3.2.5. Compressed blocks (length and distance codes)
+
+ As noted above, encoded data blocks in the "deflate" format
+ consist of sequences of symbols drawn from three conceptually
+ distinct alphabets: either literal bytes, from the alphabet of
+ byte values (0..255), or <length, backward distance> pairs,
+ where the length is drawn from (3..258) and the distance is
+ drawn from (1..32,768). In fact, the literal and length
+ alphabets are merged into a single alphabet (0..285), where
+ values 0..255 represent literal bytes, the value 256 indicates
+ end-of-block, and values 257..285 represent length codes
+ (possibly in conjunction with extra bits following the symbol
+ code) as follows:
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Deutsch Informational [Page 11]
+
+RFC 1951 DEFLATE Compressed Data Format Specification May 1996
+
+
+ Extra Extra Extra
+ Code Bits Length(s) Code Bits Lengths Code Bits Length(s)
+ ---- ---- ------ ---- ---- ------- ---- ---- -------
+ 257 0 3 267 1 15,16 277 4 67-82
+ 258 0 4 268 1 17,18 278 4 83-98
+ 259 0 5 269 2 19-22 279 4 99-114
+ 260 0 6 270 2 23-26 280 4 115-130
+ 261 0 7 271 2 27-30 281 5 131-162
+ 262 0 8 272 2 31-34 282 5 163-194
+ 263 0 9 273 3 35-42 283 5 195-226
+ 264 0 10 274 3 43-50 284 5 227-257
+ 265 1 11,12 275 3 51-58 285 0 258
+ 266 1 13,14 276 3 59-66
+
+ The extra bits should be interpreted as a machine integer
+ stored with the most-significant bit first, e.g., bits 1110
+ represent the value 14.
+
+ Extra Extra Extra
+ Code Bits Dist Code Bits Dist Code Bits Distance
+ ---- ---- ---- ---- ---- ------ ---- ---- --------
+ 0 0 1 10 4 33-48 20 9 1025-1536
+ 1 0 2 11 4 49-64 21 9 1537-2048
+ 2 0 3 12 5 65-96 22 10 2049-3072
+ 3 0 4 13 5 97-128 23 10 3073-4096
+ 4 1 5,6 14 6 129-192 24 11 4097-6144
+ 5 1 7,8 15 6 193-256 25 11 6145-8192
+ 6 2 9-12 16 7 257-384 26 12 8193-12288
+ 7 2 13-16 17 7 385-512 27 12 12289-16384
+ 8 3 17-24 18 8 513-768 28 13 16385-24576
+ 9 3 25-32 19 8 769-1024 29 13 24577-32768
+
+ 3.2.6. Compression with fixed Huffman codes (BTYPE=01)
+
+ The Huffman codes for the two alphabets are fixed, and are not
+ represented explicitly in the data. The Huffman code lengths
+ for the literal/length alphabet are:
+
+ Lit Value Bits Codes
+ --------- ---- -----
+ 0 - 143 8 00110000 through
+ 10111111
+ 144 - 255 9 110010000 through
+ 111111111
+ 256 - 279 7 0000000 through
+ 0010111
+ 280 - 287 8 11000000 through
+ 11000111
+
+
+
+Deutsch Informational [Page 12]
+
+RFC 1951 DEFLATE Compressed Data Format Specification May 1996
+
+
+ The code lengths are sufficient to generate the actual codes,
+ as described above; we show the codes in the table for added
+ clarity. Literal/length values 286-287 will never actually
+ occur in the compressed data, but participate in the code
+ construction.
+
+ Distance codes 0-31 are represented by (fixed-length) 5-bit
+ codes, with possible additional bits as shown in the table
+ shown in Paragraph 3.2.5, above. Note that distance codes 30-
+ 31 will never actually occur in the compressed data.
+
+ 3.2.7. Compression with dynamic Huffman codes (BTYPE=10)
+
+ The Huffman codes for the two alphabets appear in the block
+ immediately after the header bits and before the actual
+ compressed data, first the literal/length code and then the
+ distance code. Each code is defined by a sequence of code
+ lengths, as discussed in Paragraph 3.2.2, above. For even
+ greater compactness, the code length sequences themselves are
+ compressed using a Huffman code. The alphabet for code lengths
+ is as follows:
+
+ 0 - 15: Represent code lengths of 0 - 15
+ 16: Copy the previous code length 3 - 6 times.
+ The next 2 bits indicate repeat length
+ (0 = 3, ... , 3 = 6)
+ Example: Codes 8, 16 (+2 bits 11),
+ 16 (+2 bits 10) will expand to
+ 12 code lengths of 8 (1 + 6 + 5)
+ 17: Repeat a code length of 0 for 3 - 10 times.
+ (3 bits of length)
+ 18: Repeat a code length of 0 for 11 - 138 times
+ (7 bits of length)
+
+ A code length of 0 indicates that the corresponding symbol in
+ the literal/length or distance alphabet will not occur in the
+ block, and should not participate in the Huffman code
+ construction algorithm given earlier. If only one distance
+ code is used, it is encoded using one bit, not zero bits; in
+ this case there is a single code length of one, with one unused
+ code. One distance code of zero bits means that there are no
+ distance codes used at all (the data is all literals).
+
+ We can now define the format of the block:
+
+ 5 Bits: HLIT, # of Literal/Length codes - 257 (257 - 286)
+ 5 Bits: HDIST, # of Distance codes - 1 (1 - 32)
+ 4 Bits: HCLEN, # of Code Length codes - 4 (4 - 19)
+
+
+
+Deutsch Informational [Page 13]
+
+RFC 1951 DEFLATE Compressed Data Format Specification May 1996
+
+
+ (HCLEN + 4) x 3 bits: code lengths for the code length
+ alphabet given just above, in the order: 16, 17, 18,
+ 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15
+
+ These code lengths are interpreted as 3-bit integers
+ (0-7); as above, a code length of 0 means the
+ corresponding symbol (literal/length or distance code
+ length) is not used.
+
+ HLIT + 257 code lengths for the literal/length alphabet,
+ encoded using the code length Huffman code
+
+ HDIST + 1 code lengths for the distance alphabet,
+ encoded using the code length Huffman code
+
+ The actual compressed data of the block,
+ encoded using the literal/length and distance Huffman
+ codes
+
+ The literal/length symbol 256 (end of data),
+ encoded using the literal/length Huffman code
+
+ The code length repeat codes can cross from HLIT + 257 to the
+ HDIST + 1 code lengths. In other words, all code lengths form
+ a single sequence of HLIT + HDIST + 258 values.
+
+ 3.3. Compliance
+
+ A compressor may limit further the ranges of values specified in
+ the previous section and still be compliant; for example, it may
+ limit the range of backward pointers to some value smaller than
+ 32K. Similarly, a compressor may limit the size of blocks so that
+ a compressible block fits in memory.
+
+ A compliant decompressor must accept the full range of possible
+ values defined in the previous section, and must accept blocks of
+ arbitrary size.
+
+4. Compression algorithm details
+
+ While it is the intent of this document to define the "deflate"
+ compressed data format without reference to any particular
+ compression algorithm, the format is related to the compressed
+ formats produced by LZ77 (Lempel-Ziv 1977, see reference [2] below);
+ since many variations of LZ77 are patented, it is strongly
+ recommended that the implementor of a compressor follow the general
+ algorithm presented here, which is known not to be patented per se.
+ The material in this section is not part of the definition of the
+
+
+
+Deutsch Informational [Page 14]
+
+RFC 1951 DEFLATE Compressed Data Format Specification May 1996
+
+
+ specification per se, and a compressor need not follow it in order to
+ be compliant.
+
+ The compressor terminates a block when it determines that starting a
+ new block with fresh trees would be useful, or when the block size
+ fills up the compressor's block buffer.
+
+ The compressor uses a chained hash table to find duplicated strings,
+ using a hash function that operates on 3-byte sequences. At any
+ given point during compression, let XYZ be the next 3 input bytes to
+ be examined (not necessarily all different, of course). First, the
+ compressor examines the hash chain for XYZ. If the chain is empty,
+ the compressor simply writes out X as a literal byte and advances one
+ byte in the input. If the hash chain is not empty, indicating that
+ the sequence XYZ (or, if we are unlucky, some other 3 bytes with the
+ same hash function value) has occurred recently, the compressor
+ compares all strings on the XYZ hash chain with the actual input data
+ sequence starting at the current point, and selects the longest
+ match.
+
+ The compressor searches the hash chains starting with the most recent
+ strings, to favor small distances and thus take advantage of the
+ Huffman encoding. The hash chains are singly linked. There are no
+ deletions from the hash chains; the algorithm simply discards matches
+ that are too old. To avoid a worst-case situation, very long hash
+ chains are arbitrarily truncated at a certain length, determined by a
+ run-time parameter.
+
+ To improve overall compression, the compressor optionally defers the
+ selection of matches ("lazy matching"): after a match of length N has
+ been found, the compressor searches for a longer match starting at
+ the next input byte. If it finds a longer match, it truncates the
+ previous match to a length of one (thus producing a single literal
+ byte) and then emits the longer match. Otherwise, it emits the
+ original match, and, as described above, advances N bytes before
+ continuing.
+
+ Run-time parameters also control this "lazy match" procedure. If
+ compression ratio is most important, the compressor attempts a
+ complete second search regardless of the length of the first match.
+ In the normal case, if the current match is "long enough", the
+ compressor reduces the search for a longer match, thus speeding up
+ the process. If speed is most important, the compressor inserts new
+ strings in the hash table only when no match was found, or when the
+ match is not "too long". This degrades the compression ratio but
+ saves time since there are both fewer insertions and fewer searches.
+
+
+
+
+
+Deutsch Informational [Page 15]
+
+RFC 1951 DEFLATE Compressed Data Format Specification May 1996
+
+
+5. References
+
+ [1] Huffman, D. A., "A Method for the Construction of Minimum
+ Redundancy Codes", Proceedings of the Institute of Radio
+ Engineers, September 1952, Volume 40, Number 9, pp. 1098-1101.
+
+ [2] Ziv J., Lempel A., "A Universal Algorithm for Sequential Data
+ Compression", IEEE Transactions on Information Theory, Vol. 23,
+ No. 3, pp. 337-343.
+
+ [3] Gailly, J.-L., and Adler, M., ZLIB documentation and sources,
+ available in ftp://ftp.uu.net/pub/archiving/zip/doc/
+
+ [4] Gailly, J.-L., and Adler, M., GZIP documentation and sources,
+ available as gzip-*.tar in ftp://prep.ai.mit.edu/pub/gnu/
+
+ [5] Schwartz, E. S., and Kallick, B. "Generating a canonical prefix
+ encoding." Comm. ACM, 7,3 (Mar. 1964), pp. 166-169.
+
+ [6] Hirschberg and Lelewer, "Efficient decoding of prefix codes,"
+ Comm. ACM, 33,4, April 1990, pp. 449-459.
+
+6. Security Considerations
+
+ Any data compression method involves the reduction of redundancy in
+ the data. Consequently, any corruption of the data is likely to have
+ severe effects and be difficult to correct. Uncompressed text, on
+ the other hand, will probably still be readable despite the presence
+ of some corrupted bytes.
+
+ It is recommended that systems using this data format provide some
+ means of validating the integrity of the compressed data. See
+ reference [3], for example.
+
+7. Source code
+
+ Source code for a C language implementation of a "deflate" compliant
+ compressor and decompressor is available within the zlib package at
+ ftp://ftp.uu.net/pub/archiving/zip/zlib/.
+
+8. Acknowledgements
+
+ Trademarks cited in this document are the property of their
+ respective owners.
+
+ Phil Katz designed the deflate format. Jean-Loup Gailly and Mark
+ Adler wrote the related software described in this specification.
+ Glenn Randers-Pehrson converted this document to RFC and HTML format.
+
+
+
+Deutsch Informational [Page 16]
+
+RFC 1951 DEFLATE Compressed Data Format Specification May 1996
+
+
+9. Author's Address
+
+ L. Peter Deutsch
+ Aladdin Enterprises
+ 203 Santa Margarita Ave.
+ Menlo Park, CA 94025
+
+ Phone: (415) 322-0103 (AM only)
+ FAX: (415) 322-1734
+ EMail: <ghost@aladdin.com>
+
+ Questions about the technical content of this specification can be
+ sent by email to:
+
+ Jean-Loup Gailly <gzip@prep.ai.mit.edu> and
+ Mark Adler <madler@alumni.caltech.edu>
+
+ Editorial comments on this specification can be sent by email to:
+
+ L. Peter Deutsch <ghost@aladdin.com> and
+ Glenn Randers-Pehrson <randeg@alumni.rpi.edu>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Deutsch Informational [Page 17]
+
diff --git a/lib/std/compress/rfc1951.txt.fixed.z.9 b/lib/std/compress/rfc1951.txt.fixed.z.9
new file mode 100644
index 0000000000..8ea5904770
--- /dev/null
+++ b/lib/std/compress/rfc1951.txt.fixed.z.9
Binary files differ
diff --git a/lib/std/compress/rfc1951.txt.z.0 b/lib/std/compress/rfc1951.txt.z.0
new file mode 100644
index 0000000000..3f50fb68f8
--- /dev/null
+++ b/lib/std/compress/rfc1951.txt.z.0
Binary files differ
diff --git a/lib/std/compress/rfc1951.txt.z.9 b/lib/std/compress/rfc1951.txt.z.9
new file mode 100644
index 0000000000..84e7cbe5b7
--- /dev/null
+++ b/lib/std/compress/rfc1951.txt.z.9
Binary files differ
diff --git a/lib/std/compress/zlib.zig b/lib/std/compress/zlib.zig
new file mode 100644
index 0000000000..c4228ded26
--- /dev/null
+++ b/lib/std/compress/zlib.zig
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2015-2020 Zig Contributors
+// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
+// The MIT license requires this copyright notice to be included in all copies
+// and substantial portions of the software.
+//
+// Decompressor for ZLIB data streams (RFC1950)
+
+const std = @import("std");
+const io = std.io;
+const fs = std.fs;
+const testing = std.testing;
+const mem = std.mem;
+const deflate = std.compress.deflate;
+
+pub fn ZlibStream(comptime ReaderType: type) type {
+ return struct {
+ const Self = @This();
+
+ pub const Error = ReaderType.Error ||
+ deflate.InflateStream(ReaderType).Error ||
+ error{ WrongChecksum, Unsupported };
+ pub const Reader = io.Reader(*Self, Error, read);
+
+ allocator: *mem.Allocator,
+ inflater: deflate.InflateStream(ReaderType),
+ in_reader: ReaderType,
+ hasher: std.hash.Adler32,
+ window_slice: []u8,
+
+ fn init(allocator: *mem.Allocator, source: ReaderType) !Self {
+ // Zlib header format is specified in RFC1950
+ const header = try source.readBytesNoEof(2);
+
+ const CM = @truncate(u4, header[0]);
+ const CINFO = @truncate(u4, header[0] >> 4);
+ const FCHECK = @truncate(u5, header[1]);
+ const FDICT = @truncate(u1, header[1] >> 5);
+
+ if ((@as(u16, header[0]) << 8 | header[1]) % 31 != 0)
+ return error.BadHeader;
+
+ // The CM field must be 8 to indicate the use of DEFLATE
+ if (CM != 8) return error.InvalidCompression;
+ // CINFO is the base-2 logarithm of the window size, minus 8.
+ // Values above 7 are unspecified and therefore rejected.
+ if (CINFO > 7) return error.InvalidWindowSize;
+ const window_size: u16 = @as(u16, 1) << (CINFO + 8);
+
+ // TODO: Support this case
+ if (FDICT != 0)
+ return error.Unsupported;
+
+ var window_slice = try allocator.alloc(u8, window_size);
+
+ return Self{
+ .allocator = allocator,
+ .inflater = deflate.inflateStream(source, window_slice),
+ .in_reader = source,
+ .hasher = std.hash.Adler32.init(),
+ .window_slice = window_slice,
+ };
+ }
+
+ fn deinit(self: *Self) void {
+ self.allocator.free(self.window_slice);
+ }
+
+ // Implements the io.Reader interface
+ pub fn read(self: *Self, buffer: []u8) Error!usize {
+ if (buffer.len == 0)
+ return 0;
+
+ // Read from the compressed stream and update the computed checksum
+ const r = try self.inflater.read(buffer);
+ if (r != 0) {
+ self.hasher.update(buffer[0..r]);
+ return r;
+ }
+
+ // We've reached the end of stream, check if the checksum matches
+ const hash = try self.in_reader.readIntBig(u32);
+ if (hash != self.hasher.final())
+ return error.WrongChecksum;
+
+ return 0;
+ }
+
+ pub fn reader(self: *Self) Reader {
+ return .{ .context = self };
+ }
+ };
+}
+
+pub fn zlibStream(allocator: *mem.Allocator, reader: anytype) !ZlibStream(@TypeOf(reader)) {
+ return ZlibStream(@TypeOf(reader)).init(allocator, reader);
+}
+
+fn testReader(data: []const u8, comptime expected: []const u8) !void {
+ var in_stream = io.fixedBufferStream(data);
+
+ var zlib_stream = try zlibStream(testing.allocator, in_stream.reader());
+ defer zlib_stream.deinit();
+
+ // Read and decompress the whole file
+ const buf = try zlib_stream.reader().readAllAlloc(testing.allocator, std.math.maxInt(usize));
+ defer testing.allocator.free(buf);
+ // Calculate its SHA256 hash and check it against the reference
+ var hash: [32]u8 = undefined;
+ std.crypto.hash.sha2.Sha256.hash(buf, hash[0..], .{});
+
+ assertEqual(expected, &hash);
+}
+
+// Assert `expected` == `input` where `input` is a bytestring.
+pub fn assertEqual(comptime expected: []const u8, input: []const u8) void {
+ var expected_bytes: [expected.len / 2]u8 = undefined;
+ for (expected_bytes) |*r, i| {
+ r.* = std.fmt.parseInt(u8, expected[2 * i .. 2 * i + 2], 16) catch unreachable;
+ }
+
+ testing.expectEqualSlices(u8, &expected_bytes, input);
+}
+
+// All the test cases are obtained by compressing the RFC1950 text
+//
+// https://tools.ietf.org/rfc/rfc1950.txt length=36944 bytes
+// SHA256=5ebf4b5b7fe1c3a0c0ab9aa3ac8c0f3853a7dc484905e76e03b0b0f301350009
+test "compressed data" {
+ // Compressed with compression level = 0
+ try testReader(
+ @embedFile("rfc1951.txt.z.0"),
+ "5ebf4b5b7fe1c3a0c0ab9aa3ac8c0f3853a7dc484905e76e03b0b0f301350009",
+ );
+ // Compressed with compression level = 9
+ try testReader(
+ @embedFile("rfc1951.txt.z.9"),
+ "5ebf4b5b7fe1c3a0c0ab9aa3ac8c0f3853a7dc484905e76e03b0b0f301350009",
+ );
+ // Compressed with compression level = 9 and fixed Huffman codes
+ try testReader(
+ @embedFile("rfc1951.txt.fixed.z.9"),
+ "5ebf4b5b7fe1c3a0c0ab9aa3ac8c0f3853a7dc484905e76e03b0b0f301350009",
+ );
+}
+
+test "sanity checks" {
+ // Truncated header
+ testing.expectError(
+ error.EndOfStream,
+ testReader(&[_]u8{0x78}, ""),
+ );
+ // Failed FCHECK check
+ testing.expectError(
+ error.BadHeader,
+ testReader(&[_]u8{ 0x78, 0x9D }, ""),
+ );
+ // Wrong CM
+ testing.expectError(
+ error.InvalidCompression,
+ testReader(&[_]u8{ 0x79, 0x94 }, ""),
+ );
+ // Wrong CINFO
+ testing.expectError(
+ error.InvalidWindowSize,
+ testReader(&[_]u8{ 0x88, 0x98 }, ""),
+ );
+ // Wrong checksum
+ testing.expectError(
+ error.WrongChecksum,
+ testReader(&[_]u8{ 0x78, 0xda, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00 }, ""),
+ );
+ // Truncated checksum
+ testing.expectError(
+ error.EndOfStream,
+ testReader(&[_]u8{ 0x78, 0xda, 0x03, 0x00, 0x00 }, ""),
+ );
+}
diff --git a/lib/std/debug/leb128.zig b/lib/std/debug/leb128.zig
index eca777c1cf..2b96d39131 100644
--- a/lib/std/debug/leb128.zig
+++ b/lib/std/debug/leb128.zig
@@ -9,10 +9,10 @@ const testing = std.testing;
/// Read a single unsigned LEB128 value from the given reader as type T,
/// or error.Overflow if the value cannot fit.
pub fn readULEB128(comptime T: type, reader: anytype) !T {
- const U = if (T.bit_count < 8) u8 else T;
+ const U = if (@typeInfo(T).Int.bits < 8) u8 else T;
const ShiftT = std.math.Log2Int(U);
- const max_group = (U.bit_count + 6) / 7;
+ const max_group = (@typeInfo(U).Int.bits + 6) / 7;
var value = @as(U, 0);
var group = @as(ShiftT, 0);
@@ -40,7 +40,7 @@ pub fn readULEB128(comptime T: type, reader: anytype) !T {
/// Write a single unsigned integer as unsigned LEB128 to the given writer.
pub fn writeULEB128(writer: anytype, uint_value: anytype) !void {
const T = @TypeOf(uint_value);
- const U = if (T.bit_count < 8) u8 else T;
+ const U = if (@typeInfo(T).Int.bits < 8) u8 else T;
var value = @intCast(U, uint_value);
while (true) {
@@ -68,7 +68,7 @@ pub fn readULEB128Mem(comptime T: type, ptr: *[]const u8) !T {
/// returning the number of bytes written.
pub fn writeULEB128Mem(ptr: []u8, uint_value: anytype) !usize {
const T = @TypeOf(uint_value);
- const max_group = (T.bit_count + 6) / 7;
+ const max_group = (@typeInfo(T).Int.bits + 6) / 7;
var buf = std.io.fixedBufferStream(ptr);
try writeULEB128(buf.writer(), uint_value);
return buf.pos;
@@ -77,11 +77,11 @@ pub fn writeULEB128Mem(ptr: []u8, uint_value: anytype) !usize {
/// Read a single signed LEB128 value from the given reader as type T,
/// or error.Overflow if the value cannot fit.
pub fn readILEB128(comptime T: type, reader: anytype) !T {
- const S = if (T.bit_count < 8) i8 else T;
- const U = std.meta.Int(false, S.bit_count);
+ const S = if (@typeInfo(T).Int.bits < 8) i8 else T;
+ const U = std.meta.Int(false, @typeInfo(S).Int.bits);
const ShiftU = std.math.Log2Int(U);
- const max_group = (U.bit_count + 6) / 7;
+ const max_group = (@typeInfo(U).Int.bits + 6) / 7;
var value = @as(U, 0);
var group = @as(ShiftU, 0);
@@ -97,7 +97,7 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T {
if (@bitCast(S, temp) >= 0) return error.Overflow;
// and all the overflowed bits are 1
- const remaining_shift = @intCast(u3, U.bit_count - @as(u16, shift));
+ const remaining_shift = @intCast(u3, @typeInfo(U).Int.bits - @as(u16, shift));
const remaining_bits = @bitCast(i8, byte | 0x80) >> remaining_shift;
if (remaining_bits != -1) return error.Overflow;
}
@@ -127,8 +127,8 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T {
/// Write a single signed integer as signed LEB128 to the given writer.
pub fn writeILEB128(writer: anytype, int_value: anytype) !void {
const T = @TypeOf(int_value);
- const S = if (T.bit_count < 8) i8 else T;
- const U = std.meta.Int(false, S.bit_count);
+ const S = if (@typeInfo(T).Int.bits < 8) i8 else T;
+ const U = std.meta.Int(false, @typeInfo(S).Int.bits);
var value = @intCast(S, int_value);
@@ -173,7 +173,7 @@ pub fn writeILEB128Mem(ptr: []u8, int_value: anytype) !usize {
/// different value without shifting all the following code.
pub fn writeUnsignedFixed(comptime l: usize, ptr: *[l]u8, int: std.meta.Int(false, l * 7)) void {
const T = @TypeOf(int);
- const U = if (T.bit_count < 8) u8 else T;
+ const U = if (@typeInfo(T).Int.bits < 8) u8 else T;
var value = @intCast(U, int);
comptime var i = 0;
@@ -346,28 +346,29 @@ test "deserialize unsigned LEB128" {
fn test_write_leb128(value: anytype) !void {
const T = @TypeOf(value);
+ const t_signed = @typeInfo(T).Int.is_signed;
- const writeStream = if (T.is_signed) writeILEB128 else writeULEB128;
- const writeMem = if (T.is_signed) writeILEB128Mem else writeULEB128Mem;
- const readStream = if (T.is_signed) readILEB128 else readULEB128;
- const readMem = if (T.is_signed) readILEB128Mem else readULEB128Mem;
+ const writeStream = if (t_signed) writeILEB128 else writeULEB128;
+ const writeMem = if (t_signed) writeILEB128Mem else writeULEB128Mem;
+ const readStream = if (t_signed) readILEB128 else readULEB128;
+ const readMem = if (t_signed) readILEB128Mem else readULEB128Mem;
// decode to a larger bit size too, to ensure sign extension
// is working as expected
- const larger_type_bits = ((T.bit_count + 8) / 8) * 8;
- const B = std.meta.Int(T.is_signed, larger_type_bits);
+ const larger_type_bits = ((@typeInfo(T).Int.bits + 8) / 8) * 8;
+ const B = std.meta.Int(t_signed, larger_type_bits);
const bytes_needed = bn: {
- const S = std.meta.Int(T.is_signed, @sizeOf(T) * 8);
- if (T.bit_count <= 7) break :bn @as(u16, 1);
+ const S = std.meta.Int(t_signed, @sizeOf(T) * 8);
+ if (@typeInfo(T).Int.bits <= 7) break :bn @as(u16, 1);
const unused_bits = if (value < 0) @clz(T, ~value) else @clz(T, value);
- const used_bits: u16 = (T.bit_count - unused_bits) + @boolToInt(T.is_signed);
+ const used_bits: u16 = (@typeInfo(T).Int.bits - unused_bits) + @boolToInt(t_signed);
if (used_bits <= 7) break :bn @as(u16, 1);
break :bn ((used_bits + 6) / 7);
};
- const max_groups = if (T.bit_count == 0) 1 else (T.bit_count + 6) / 7;
+ const max_groups = if (@typeInfo(T).Int.bits == 0) 1 else (@typeInfo(T).Int.bits + 6) / 7;
var buf: [max_groups]u8 = undefined;
var fbs = std.io.fixedBufferStream(&buf);
@@ -414,7 +415,7 @@ test "serialize unsigned LEB128" {
const T = std.meta.Int(false, t);
const min = std.math.minInt(T);
const max = std.math.maxInt(T);
- var i = @as(std.meta.Int(false, T.bit_count + 1), min);
+ var i = @as(std.meta.Int(false, @typeInfo(T).Int.bits + 1), min);
while (i <= max) : (i += 1) try test_write_leb128(@intCast(T, i));
}
@@ -432,7 +433,7 @@ test "serialize signed LEB128" {
const T = std.meta.Int(true, t);
const min = std.math.minInt(T);
const max = std.math.maxInt(T);
- var i = @as(std.meta.Int(true, T.bit_count + 1), min);
+ var i = @as(std.meta.Int(true, @typeInfo(T).Int.bits + 1), min);
while (i <= max) : (i += 1) try test_write_leb128(@intCast(T, i));
}
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index 16d0eaa07a..a652bd8c21 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -66,6 +66,7 @@ fn peekIsAlign(comptime fmt: []const u8) bool {
/// - output numeric value in hexadecimal notation
/// - `s`: print a pointer-to-many as a c-string, use zero-termination
/// - `B` and `Bi`: output a memory size in either metric (1000) or power-of-two (1024) based notation. works for both float and integer values.
+/// - `e` and `E`: if printing a string, escape non-printable characters
/// - `e`: output floating point value in scientific notation
/// - `d`: output numeric value in decimal notation
/// - `b`: output integer value in binary notation
@@ -81,6 +82,8 @@ fn peekIsAlign(comptime fmt: []const u8) bool {
/// This allows user types to be formatted in a logical manner instead of dumping all fields of the type.
///
/// A user type may be a `struct`, `vector`, `union` or `enum` type.
+///
+/// To print literal curly braces, escape them by writing them twice, e.g. `{{` or `}}`.
pub fn format(
writer: anytype,
comptime fmt: []const u8,
@@ -90,7 +93,7 @@ pub fn format(
if (@typeInfo(@TypeOf(args)) != .Struct) {
@compileError("Expected tuple or struct argument, found " ++ @typeName(@TypeOf(args)));
}
- if (args.len > ArgSetType.bit_count) {
+ if (args.len > @typeInfo(ArgSetType).Int.bits) {
@compileError("32 arguments max are supported per format call");
}
@@ -324,7 +327,7 @@ pub fn formatType(
max_depth: usize,
) @TypeOf(writer).Error!void {
if (comptime std.mem.eql(u8, fmt, "*")) {
- try writer.writeAll(@typeName(@TypeOf(value).Child));
+ try writer.writeAll(@typeName(@typeInfo(@TypeOf(value)).Pointer.child));
try writer.writeAll("@");
try formatInt(@ptrToInt(value), 16, false, FormatOptions{}, writer);
return;
@@ -429,12 +432,12 @@ pub fn formatType(
if (info.child == u8) {
return formatText(value, fmt, options, writer);
}
- return format(writer, "{}@{x}", .{ @typeName(T.Child), @ptrToInt(value) });
+ return format(writer, "{}@{x}", .{ @typeName(@typeInfo(T).Pointer.child), @ptrToInt(value) });
},
.Enum, .Union, .Struct => {
return formatType(value.*, fmt, options, writer, max_depth);
},
- else => return format(writer, "{}@{x}", .{ @typeName(T.Child), @ptrToInt(value) }),
+ else => return format(writer, "{}@{x}", .{ @typeName(@typeInfo(T).Pointer.child), @ptrToInt(value) }),
},
.Many, .C => {
if (ptr_info.sentinel) |sentinel| {
@@ -445,7 +448,7 @@ pub fn formatType(
return formatText(mem.span(value), fmt, options, writer);
}
}
- return format(writer, "{}@{x}", .{ @typeName(T.Child), @ptrToInt(value) });
+ return format(writer, "{}@{x}", .{ @typeName(@typeInfo(T).Pointer.child), @ptrToInt(value) });
},
.Slice => {
if (fmt.len > 0 and ((fmt[0] == 'x') or (fmt[0] == 'X'))) {
@@ -535,7 +538,7 @@ pub fn formatIntValue(
radix = 10;
uppercase = false;
} else if (comptime std.mem.eql(u8, fmt, "c")) {
- if (@TypeOf(int_value).bit_count <= 8) {
+ if (@typeInfo(@TypeOf(int_value)).Int.bits <= 8) {
return formatAsciiChar(@as(u8, int_value), options, writer);
} else {
@compileError("Cannot print integer that is larger than 8 bits as a ascii");
@@ -599,6 +602,16 @@ pub fn formatText(
try formatInt(c, 16, fmt[0] == 'X', FormatOptions{ .width = 2, .fill = '0' }, writer);
}
return;
+ } else if (comptime (std.mem.eql(u8, fmt, "e") or std.mem.eql(u8, fmt, "E"))) {
+ for (bytes) |c| {
+ if (std.ascii.isPrint(c)) {
+ try writer.writeByte(c);
+ } else {
+ try writer.writeAll("\\x");
+ try formatInt(c, 16, fmt[0] == 'E', FormatOptions{ .width = 2, .fill = '0' }, writer);
+ }
+ }
+ return;
} else {
@compileError("Unknown format string: '" ++ fmt ++ "'");
}
@@ -934,7 +947,7 @@ pub fn formatInt(
} else
value;
- if (@TypeOf(int_value).is_signed) {
+ if (@typeInfo(@TypeOf(int_value)).Int.is_signed) {
return formatIntSigned(int_value, base, uppercase, options, writer);
} else {
return formatIntUnsigned(int_value, base, uppercase, options, writer);
@@ -976,9 +989,10 @@ fn formatIntUnsigned(
writer: anytype,
) !void {
assert(base >= 2);
- var buf: [math.max(@TypeOf(value).bit_count, 1)]u8 = undefined;
- const min_int_bits = comptime math.max(@TypeOf(value).bit_count, @TypeOf(base).bit_count);
- const MinInt = std.meta.Int(@TypeOf(value).is_signed, min_int_bits);
+ const value_info = @typeInfo(@TypeOf(value)).Int;
+ var buf: [math.max(value_info.bits, 1)]u8 = undefined;
+ const min_int_bits = comptime math.max(value_info.bits, @typeInfo(@TypeOf(base)).Int.bits);
+ const MinInt = std.meta.Int(value_info.is_signed, min_int_bits);
var a: MinInt = value;
var index: usize = buf.len;
@@ -1319,6 +1333,12 @@ test "slice" {
try testFmt("buf: Test\n Other text", "buf: {s}\n Other text", .{"Test"});
}
+test "escape non-printable" {
+ try testFmt("abc", "{e}", .{"abc"});
+ try testFmt("ab\\xffc", "{e}", .{"ab\xffc"});
+ try testFmt("ab\\xFFc", "{E}", .{"ab\xffc"});
+}
+
test "pointer" {
{
const value = @intToPtr(*align(1) i32, 0xdeadbeef);
diff --git a/lib/std/fmt/parse_float.zig b/lib/std/fmt/parse_float.zig
index 69557714f6..de17c60db6 100644
--- a/lib/std/fmt/parse_float.zig
+++ b/lib/std/fmt/parse_float.zig
@@ -37,7 +37,9 @@
const std = @import("../std.zig");
const ascii = std.ascii;
-const max_digits = 25;
+// The mantissa field in FloatRepr is 64bit wide and holds only 19 digits
+// without overflowing
+const max_digits = 19;
const f64_plus_zero: u64 = 0x0000000000000000;
const f64_minus_zero: u64 = 0x8000000000000000;
@@ -372,7 +374,7 @@ test "fmt.parseFloat" {
const epsilon = 1e-7;
inline for ([_]type{ f16, f32, f64, f128 }) |T| {
- const Z = std.meta.Int(false, T.bit_count);
+ const Z = std.meta.Int(false, @typeInfo(T).Float.bits);
testing.expectError(error.InvalidCharacter, parseFloat(T, ""));
testing.expectError(error.InvalidCharacter, parseFloat(T, " 1"));
@@ -409,6 +411,7 @@ test "fmt.parseFloat" {
expect(approxEq(T, try parseFloat(T, "123142.1"), 123142.1, epsilon));
expect(approxEq(T, try parseFloat(T, "-123142.1124"), @as(T, -123142.1124), epsilon));
expect(approxEq(T, try parseFloat(T, "0.7062146892655368"), @as(T, 0.7062146892655368), epsilon));
+ expect(approxEq(T, try parseFloat(T, "2.71828182845904523536"), @as(T, 2.718281828459045), epsilon));
}
}
}
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index 21a00eeb1d..a217fb3e9b 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -1437,26 +1437,32 @@ pub const Dir = struct {
/// On success, caller owns returned buffer.
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
pub fn readFileAlloc(self: Dir, allocator: *mem.Allocator, file_path: []const u8, max_bytes: usize) ![]u8 {
- return self.readFileAllocOptions(allocator, file_path, max_bytes, @alignOf(u8), null);
+ return self.readFileAllocOptions(allocator, file_path, max_bytes, null, @alignOf(u8), null);
}
/// On success, caller owns returned buffer.
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
+ /// If `size_hint` is specified the initial buffer size is calculated using
+ /// that value, otherwise the effective file size is used instead.
/// Allows specifying alignment and a sentinel value.
pub fn readFileAllocOptions(
self: Dir,
allocator: *mem.Allocator,
file_path: []const u8,
max_bytes: usize,
+ size_hint: ?usize,
comptime alignment: u29,
comptime optional_sentinel: ?u8,
) !(if (optional_sentinel) |s| [:s]align(alignment) u8 else []align(alignment) u8) {
var file = try self.openFile(file_path, .{});
defer file.close();
- const stat_size = try file.getEndPos();
+ // If the file size doesn't fit a usize it'll be certainly greater than
+ // `max_bytes`
+ const stat_size = size_hint orelse math.cast(usize, try file.getEndPos()) catch
+ return error.FileTooBig;
- return file.readAllAllocOptions(allocator, stat_size, max_bytes, alignment, optional_sentinel);
+ return file.readToEndAllocOptions(allocator, max_bytes, stat_size, alignment, optional_sentinel);
}
pub const DeleteTreeError = error{
diff --git a/lib/std/fs/file.zig b/lib/std/fs/file.zig
index 6fb2385a85..ef1b501ec3 100644
--- a/lib/std/fs/file.zig
+++ b/lib/std/fs/file.zig
@@ -363,31 +363,49 @@ pub const File = struct {
try os.futimens(self.handle, &times);
}
+ /// Reads all the bytes from the current position to the end of the file.
/// On success, caller owns returned buffer.
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
- pub fn readAllAlloc(self: File, allocator: *mem.Allocator, stat_size: u64, max_bytes: usize) ![]u8 {
- return self.readAllAllocOptions(allocator, stat_size, max_bytes, @alignOf(u8), null);
+ pub fn readToEndAlloc(self: File, allocator: *mem.Allocator, max_bytes: usize) ![]u8 {
+ return self.readToEndAllocOptions(allocator, max_bytes, null, @alignOf(u8), null);
}
+ /// Reads all the bytes from the current position to the end of the file.
/// On success, caller owns returned buffer.
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
+ /// If `size_hint` is specified the initial buffer size is calculated using
+ /// that value, otherwise an arbitrary value is used instead.
/// Allows specifying alignment and a sentinel value.
- pub fn readAllAllocOptions(
+ pub fn readToEndAllocOptions(
self: File,
allocator: *mem.Allocator,
- stat_size: u64,
max_bytes: usize,
+ size_hint: ?usize,
comptime alignment: u29,
comptime optional_sentinel: ?u8,
) !(if (optional_sentinel) |s| [:s]align(alignment) u8 else []align(alignment) u8) {
- const size = math.cast(usize, stat_size) catch math.maxInt(usize);
- if (size > max_bytes) return error.FileTooBig;
-
- const buf = try allocator.allocWithOptions(u8, size, alignment, optional_sentinel);
- errdefer allocator.free(buf);
+ // If no size hint is provided fall back to the size=0 code path
+ const size = size_hint orelse 0;
+
+ // The file size returned by stat is used as hint to set the buffer
+ // size. If the reported size is zero, as it happens on Linux for files
+ // in /proc, a small buffer is allocated instead.
+ const initial_cap = (if (size > 0) size else 1024) + @boolToInt(optional_sentinel != null);
+ var array_list = try std.ArrayListAligned(u8, alignment).initCapacity(allocator, initial_cap);
+ defer array_list.deinit();
+
+ self.reader().readAllArrayList(&array_list, max_bytes) catch |err| switch (err) {
+ error.StreamTooLong => return error.FileTooBig,
+ else => |e| return e,
+ };
- try self.reader().readNoEof(buf);
- return buf;
+ if (optional_sentinel) |sentinel| {
+ try array_list.append(sentinel);
+ const buf = array_list.toOwnedSlice();
+ return buf[0 .. buf.len - 1 :sentinel];
+ } else {
+ return array_list.toOwnedSlice();
+ }
}
pub const ReadError = os.ReadError;
diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig
index 409a53b1a7..a59bc46245 100644
--- a/lib/std/fs/test.zig
+++ b/lib/std/fs/test.zig
@@ -188,30 +188,30 @@ test "readAllAlloc" {
var file = try tmp_dir.dir.createFile("test_file", .{ .read = true });
defer file.close();
- const buf1 = try file.readAllAlloc(testing.allocator, 0, 1024);
+ const buf1 = try file.readToEndAlloc(testing.allocator, 1024);
defer testing.allocator.free(buf1);
testing.expect(buf1.len == 0);
const write_buf: []const u8 = "this is a test.\nthis is a test.\nthis is a test.\nthis is a test.\n";
try file.writeAll(write_buf);
try file.seekTo(0);
- const file_size = try file.getEndPos();
// max_bytes > file_size
- const buf2 = try file.readAllAlloc(testing.allocator, file_size, 1024);
+ const buf2 = try file.readToEndAlloc(testing.allocator, 1024);
defer testing.allocator.free(buf2);
testing.expectEqual(write_buf.len, buf2.len);
testing.expect(std.mem.eql(u8, write_buf, buf2));
try file.seekTo(0);
// max_bytes == file_size
- const buf3 = try file.readAllAlloc(testing.allocator, file_size, write_buf.len);
+ const buf3 = try file.readToEndAlloc(testing.allocator, write_buf.len);
defer testing.allocator.free(buf3);
testing.expectEqual(write_buf.len, buf3.len);
testing.expect(std.mem.eql(u8, write_buf, buf3));
+ try file.seekTo(0);
// max_bytes < file_size
- testing.expectError(error.FileTooBig, file.readAllAlloc(testing.allocator, file_size, write_buf.len - 1));
+ testing.expectError(error.FileTooBig, file.readToEndAlloc(testing.allocator, write_buf.len - 1));
}
test "directory operations on files" {
diff --git a/lib/std/hash/auto_hash.zig b/lib/std/hash/auto_hash.zig
index 2905a6af13..5877c77b5d 100644
--- a/lib/std/hash/auto_hash.zig
+++ b/lib/std/hash/auto_hash.zig
@@ -113,7 +113,7 @@ pub fn hash(hasher: anytype, key: anytype, comptime strat: HashStrategy) void {
.Array => hashArray(hasher, key, strat),
.Vector => |info| {
- if (info.child.bit_count % 8 == 0) {
+ if (std.meta.bitCount(info.child) % 8 == 0) {
// If there's no unused bits in the child type, we can just hash
// this as an array of bytes.
hasher.update(mem.asBytes(&key));
diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index d6977f2f9c..6db1be539c 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -915,6 +915,10 @@ pub fn testAllocator(base_allocator: *mem.Allocator) !void {
testing.expect(slice.len == 10);
allocator.free(slice);
+
+ const zero_bit_ptr = try allocator.create(u0);
+ zero_bit_ptr.* = 0;
+ allocator.destroy(zero_bit_ptr);
}
pub fn testAllocatorAligned(base_allocator: *mem.Allocator, comptime alignment: u29) !void {
@@ -952,7 +956,7 @@ pub fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) mem.Allocator
// very near usize?
if (mem.page_size << 2 > maxInt(usize)) return;
- const USizeShift = std.meta.Int(false, std.math.log2(usize.bit_count));
+ const USizeShift = std.meta.Int(false, std.math.log2(std.meta.bitCount(usize)));
const large_align = @as(u29, mem.page_size << 2);
var align_mask: usize = undefined;
diff --git a/lib/std/io.zig b/lib/std/io.zig
index e30ed1fa92..3f02128a6c 100644
--- a/lib/std/io.zig
+++ b/lib/std/io.zig
@@ -169,6 +169,15 @@ pub const BitOutStream = BitWriter;
/// Deprecated: use `bitWriter`
pub const bitOutStream = bitWriter;
+pub const AutoIndentingStream = @import("io/auto_indenting_stream.zig").AutoIndentingStream;
+pub const autoIndentingStream = @import("io/auto_indenting_stream.zig").autoIndentingStream;
+
+pub const ChangeDetectionStream = @import("io/change_detection_stream.zig").ChangeDetectionStream;
+pub const changeDetectionStream = @import("io/change_detection_stream.zig").changeDetectionStream;
+
+pub const FindByteOutStream = @import("io/find_byte_out_stream.zig").FindByteOutStream;
+pub const findByteOutStream = @import("io/find_byte_out_stream.zig").findByteOutStream;
+
pub const Packing = @import("io/serialization.zig").Packing;
pub const Serializer = @import("io/serialization.zig").Serializer;
diff --git a/lib/std/io/auto_indenting_stream.zig b/lib/std/io/auto_indenting_stream.zig
new file mode 100644
index 0000000000..d08878e851
--- /dev/null
+++ b/lib/std/io/auto_indenting_stream.zig
@@ -0,0 +1,148 @@
+const std = @import("../std.zig");
+const io = std.io;
+const mem = std.mem;
+const assert = std.debug.assert;
+
+/// Automatically inserts indentation of written data by keeping
+/// track of the current indentation level
+pub fn AutoIndentingStream(comptime UnderlyingWriter: type) type {
+ return struct {
+ const Self = @This();
+ pub const Error = UnderlyingWriter.Error;
+ pub const Writer = io.Writer(*Self, Error, write);
+
+ underlying_writer: UnderlyingWriter,
+
+ indent_count: usize = 0,
+ indent_delta: usize,
+ current_line_empty: bool = true,
+ indent_one_shot_count: usize = 0, // automatically popped when applied
+ applied_indent: usize = 0, // the most recently applied indent
+ indent_next_line: usize = 0, // not used until the next line
+
+ pub fn writer(self: *Self) Writer {
+ return .{ .context = self };
+ }
+
+ pub fn write(self: *Self, bytes: []const u8) Error!usize {
+ if (bytes.len == 0)
+ return @as(usize, 0);
+
+ try self.applyIndent();
+ return self.writeNoIndent(bytes);
+ }
+
+ // Change the indent delta without changing the final indentation level
+ pub fn setIndentDelta(self: *Self, indent_delta: usize) void {
+ if (self.indent_delta == indent_delta) {
+ return;
+ } else if (self.indent_delta > indent_delta) {
+ assert(self.indent_delta % indent_delta == 0);
+ self.indent_count = self.indent_count * (self.indent_delta / indent_delta);
+ } else {
+ // assert that the current indentation (in spaces) in a multiple of the new delta
+ assert((self.indent_count * self.indent_delta) % indent_delta == 0);
+ self.indent_count = self.indent_count / (indent_delta / self.indent_delta);
+ }
+ self.indent_delta = indent_delta;
+ }
+
+ fn writeNoIndent(self: *Self, bytes: []const u8) Error!usize {
+ if (bytes.len == 0)
+ return @as(usize, 0);
+
+ try self.underlying_writer.writeAll(bytes);
+ if (bytes[bytes.len - 1] == '\n')
+ self.resetLine();
+ return bytes.len;
+ }
+
+ pub fn insertNewline(self: *Self) Error!void {
+ _ = try self.writeNoIndent("\n");
+ }
+
+ fn resetLine(self: *Self) void {
+ self.current_line_empty = true;
+ self.indent_next_line = 0;
+ }
+
+ /// Insert a newline unless the current line is blank
+ pub fn maybeInsertNewline(self: *Self) Error!void {
+ if (!self.current_line_empty)
+ try self.insertNewline();
+ }
+
+ /// Push default indentation
+ pub fn pushIndent(self: *Self) void {
+ // Doesn't actually write any indentation.
+ // Just primes the stream to be able to write the correct indentation if it needs to.
+ self.indent_count += 1;
+ }
+
+ /// Push an indent that is automatically popped after being applied
+ pub fn pushIndentOneShot(self: *Self) void {
+ self.indent_one_shot_count += 1;
+ self.pushIndent();
+ }
+
+ /// Turns all one-shot indents into regular indents
+ /// Returns number of indents that must now be manually popped
+ pub fn lockOneShotIndent(self: *Self) usize {
+ var locked_count = self.indent_one_shot_count;
+ self.indent_one_shot_count = 0;
+ return locked_count;
+ }
+
+ /// Push an indent that should not take effect until the next line
+ pub fn pushIndentNextLine(self: *Self) void {
+ self.indent_next_line += 1;
+ self.pushIndent();
+ }
+
+ pub fn popIndent(self: *Self) void {
+ assert(self.indent_count != 0);
+ self.indent_count -= 1;
+
+ if (self.indent_next_line > 0)
+ self.indent_next_line -= 1;
+ }
+
+ /// Writes ' ' bytes if the current line is empty
+ fn applyIndent(self: *Self) Error!void {
+ const current_indent = self.currentIndent();
+ if (self.current_line_empty and current_indent > 0) {
+ try self.underlying_writer.writeByteNTimes(' ', current_indent);
+ self.applied_indent = current_indent;
+ }
+
+ self.indent_count -= self.indent_one_shot_count;
+ self.indent_one_shot_count = 0;
+ self.current_line_empty = false;
+ }
+
+ /// Checks to see if the most recent indentation exceeds the currently pushed indents
+ pub fn isLineOverIndented(self: *Self) bool {
+ if (self.current_line_empty) return false;
+ return self.applied_indent > self.currentIndent();
+ }
+
+ fn currentIndent(self: *Self) usize {
+ var indent_current: usize = 0;
+ if (self.indent_count > 0) {
+ const indent_count = self.indent_count - self.indent_next_line;
+ indent_current = indent_count * self.indent_delta;
+ }
+ return indent_current;
+ }
+ };
+}
+
+pub fn autoIndentingStream(
+ indent_delta: usize,
+ underlying_writer: anytype,
+) AutoIndentingStream(@TypeOf(underlying_writer)) {
+ return AutoIndentingStream(@TypeOf(underlying_writer)){
+ .underlying_writer = underlying_writer,
+ .indent_delta = indent_delta,
+ };
+}
diff --git a/lib/std/io/change_detection_stream.zig b/lib/std/io/change_detection_stream.zig
new file mode 100644
index 0000000000..5ba2bb3c10
--- /dev/null
+++ b/lib/std/io/change_detection_stream.zig
@@ -0,0 +1,55 @@
+const std = @import("../std.zig");
+const io = std.io;
+const mem = std.mem;
+const assert = std.debug.assert;
+
+/// Used to detect if the data written to a stream differs from a source buffer
+pub fn ChangeDetectionStream(comptime WriterType: type) type {
+ return struct {
+ const Self = @This();
+ pub const Error = WriterType.Error;
+ pub const Writer = io.Writer(*Self, Error, write);
+
+ anything_changed: bool,
+ underlying_writer: WriterType,
+ source_index: usize,
+ source: []const u8,
+
+ pub fn writer(self: *Self) Writer {
+ return .{ .context = self };
+ }
+
+ fn write(self: *Self, bytes: []const u8) Error!usize {
+ if (!self.anything_changed) {
+ const end = self.source_index + bytes.len;
+ if (end > self.source.len) {
+ self.anything_changed = true;
+ } else {
+ const src_slice = self.source[self.source_index..end];
+ self.source_index += bytes.len;
+ if (!mem.eql(u8, bytes, src_slice)) {
+ self.anything_changed = true;
+ }
+ }
+ }
+
+ return self.underlying_writer.write(bytes);
+ }
+
+ pub fn changeDetected(self: *Self) bool {
+ return self.anything_changed or (self.source_index != self.source.len);
+ }
+ };
+}
+
+pub fn changeDetectionStream(
+ source: []const u8,
+ underlying_writer: anytype,
+) ChangeDetectionStream(@TypeOf(underlying_writer)) {
+ return ChangeDetectionStream(@TypeOf(underlying_writer)){
+ .anything_changed = false,
+ .underlying_writer = underlying_writer,
+ .source_index = 0,
+ .source = source,
+ };
+}
diff --git a/lib/std/io/find_byte_out_stream.zig b/lib/std/io/find_byte_out_stream.zig
new file mode 100644
index 0000000000..b8689b7992
--- /dev/null
+++ b/lib/std/io/find_byte_out_stream.zig
@@ -0,0 +1,40 @@
+const std = @import("../std.zig");
+const io = std.io;
+const assert = std.debug.assert;
+
+/// An OutStream that returns whether the given character has been written to it.
+/// The contents are not written to anything.
+pub fn FindByteOutStream(comptime UnderlyingWriter: type) type {
+ return struct {
+ const Self = @This();
+ pub const Error = UnderlyingWriter.Error;
+ pub const Writer = io.Writer(*Self, Error, write);
+
+ underlying_writer: UnderlyingWriter,
+ byte_found: bool,
+ byte: u8,
+
+ pub fn writer(self: *Self) Writer {
+ return .{ .context = self };
+ }
+
+ fn write(self: *Self, bytes: []const u8) Error!usize {
+ if (!self.byte_found) {
+ self.byte_found = blk: {
+ for (bytes) |b|
+ if (b == self.byte) break :blk true;
+ break :blk false;
+ };
+ }
+ return self.underlying_writer.write(bytes);
+ }
+ };
+}
+
+pub fn findByteOutStream(byte: u8, underlying_writer: anytype) FindByteOutStream(@TypeOf(underlying_writer)) {
+ return FindByteOutStream(@TypeOf(underlying_writer)){
+ .underlying_writer = underlying_writer,
+ .byte = byte,
+ .byte_found = false,
+ };
+}
diff --git a/lib/std/io/reader.zig b/lib/std/io/reader.zig
index 2ab799046a..4090f5a476 100644
--- a/lib/std/io/reader.zig
+++ b/lib/std/io/reader.zig
@@ -198,28 +198,28 @@ pub fn Reader(
/// Reads a native-endian integer
pub fn readIntNative(self: Self, comptime T: type) !T {
- const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8);
+ const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8);
return mem.readIntNative(T, &bytes);
}
/// Reads a foreign-endian integer
pub fn readIntForeign(self: Self, comptime T: type) !T {
- const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8);
+ const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8);
return mem.readIntForeign(T, &bytes);
}
pub fn readIntLittle(self: Self, comptime T: type) !T {
- const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8);
+ const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8);
return mem.readIntLittle(T, &bytes);
}
pub fn readIntBig(self: Self, comptime T: type) !T {
- const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8);
+ const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8);
return mem.readIntBig(T, &bytes);
}
pub fn readInt(self: Self, comptime T: type, endian: builtin.Endian) !T {
- const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8);
+ const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8);
return mem.readInt(T, &bytes, endian);
}
diff --git a/lib/std/io/serialization.zig b/lib/std/io/serialization.zig
index 4f8c149b47..925c929cee 100644
--- a/lib/std/io/serialization.zig
+++ b/lib/std/io/serialization.zig
@@ -60,7 +60,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
const U = std.meta.Int(false, t_bit_count);
const Log2U = math.Log2Int(U);
- const int_size = (U.bit_count + 7) / 8;
+ const int_size = (t_bit_count + 7) / 8;
if (packing == .Bit) {
const result = try self.in_stream.readBitsNoEof(U, t_bit_count);
@@ -73,7 +73,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
if (int_size == 1) {
if (t_bit_count == 8) return @bitCast(T, buffer[0]);
- const PossiblySignedByte = std.meta.Int(T.is_signed, 8);
+ const PossiblySignedByte = std.meta.Int(@typeInfo(T).Int.is_signed, 8);
return @truncate(T, @bitCast(PossiblySignedByte, buffer[0]));
}
@@ -247,7 +247,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
const U = std.meta.Int(false, t_bit_count);
const Log2U = math.Log2Int(U);
- const int_size = (U.bit_count + 7) / 8;
+ const int_size = (t_bit_count + 7) / 8;
const u_value = @bitCast(U, value);
diff --git a/lib/std/io/writer.zig b/lib/std/io/writer.zig
index 39729ef0a2..770cd5f0fa 100644
--- a/lib/std/io/writer.zig
+++ b/lib/std/io/writer.zig
@@ -53,7 +53,7 @@ pub fn Writer(
/// Write a native-endian integer.
/// TODO audit non-power-of-two int sizes
pub fn writeIntNative(self: Self, comptime T: type, value: T) Error!void {
- var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
+ var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined;
mem.writeIntNative(T, &bytes, value);
return self.writeAll(&bytes);
}
@@ -61,28 +61,28 @@ pub fn Writer(
/// Write a foreign-endian integer.
/// TODO audit non-power-of-two int sizes
pub fn writeIntForeign(self: Self, comptime T: type, value: T) Error!void {
- var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
+ var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined;
mem.writeIntForeign(T, &bytes, value);
return self.writeAll(&bytes);
}
/// TODO audit non-power-of-two int sizes
pub fn writeIntLittle(self: Self, comptime T: type, value: T) Error!void {
- var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
+ var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined;
mem.writeIntLittle(T, &bytes, value);
return self.writeAll(&bytes);
}
/// TODO audit non-power-of-two int sizes
pub fn writeIntBig(self: Self, comptime T: type, value: T) Error!void {
- var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
+ var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined;
mem.writeIntBig(T, &bytes, value);
return self.writeAll(&bytes);
}
/// TODO audit non-power-of-two int sizes
pub fn writeInt(self: Self, comptime T: type, value: T, endian: builtin.Endian) Error!void {
- var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
+ var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined;
mem.writeInt(T, &bytes, value, endian);
return self.writeAll(&bytes);
}
diff --git a/lib/std/log.zig b/lib/std/log.zig
index 50bdfdc068..7b677f698a 100644
--- a/lib/std/log.zig
+++ b/lib/std/log.zig
@@ -127,6 +127,10 @@ fn log(
if (@enumToInt(message_level) <= @enumToInt(level)) {
if (@hasDecl(root, "log")) {
root.log(message_level, scope, format, args);
+ } else if (std.Target.current.os.tag == .freestanding) {
+ // On freestanding one must provide a log function; we do not have
+ // any I/O configured.
+ return;
} else if (builtin.mode != .ReleaseSmall) {
const held = std.debug.getStderrMutex().acquire();
defer held.release();
diff --git a/lib/std/math.zig b/lib/std/math.zig
index de9f5e349d..f05c967b2d 100644
--- a/lib/std/math.zig
+++ b/lib/std/math.zig
@@ -195,7 +195,7 @@ test "" {
pub fn floatMantissaBits(comptime T: type) comptime_int {
assert(@typeInfo(T) == .Float);
- return switch (T.bit_count) {
+ return switch (@typeInfo(T).Float.bits) {
16 => 10,
32 => 23,
64 => 52,
@@ -208,7 +208,7 @@ pub fn floatMantissaBits(comptime T: type) comptime_int {
pub fn floatExponentBits(comptime T: type) comptime_int {
assert(@typeInfo(T) == .Float);
- return switch (T.bit_count) {
+ return switch (@typeInfo(T).Float.bits) {
16 => 5,
32 => 8,
64 => 11,
@@ -347,9 +347,9 @@ pub fn shlExact(comptime T: type, a: T, shift_amt: Log2Int(T)) !T {
/// A negative shift amount results in a right shift.
pub fn shl(comptime T: type, a: T, shift_amt: anytype) T {
const abs_shift_amt = absCast(shift_amt);
- const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt);
+ const casted_shift_amt = if (abs_shift_amt >= @typeInfo(T).Int.bits) return 0 else @intCast(Log2Int(T), abs_shift_amt);
- if (@TypeOf(shift_amt) == comptime_int or @TypeOf(shift_amt).is_signed) {
+ if (@TypeOf(shift_amt) == comptime_int or @typeInfo(@TypeOf(shift_amt)).Int.is_signed) {
if (shift_amt < 0) {
return a >> casted_shift_amt;
}
@@ -373,9 +373,9 @@ test "math.shl" {
/// A negative shift amount results in a left shift.
pub fn shr(comptime T: type, a: T, shift_amt: anytype) T {
const abs_shift_amt = absCast(shift_amt);
- const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt);
+ const casted_shift_amt = if (abs_shift_amt >= @typeInfo(T).Int.bits) return 0 else @intCast(Log2Int(T), abs_shift_amt);
- if (@TypeOf(shift_amt) == comptime_int or @TypeOf(shift_amt).is_signed) {
+ if (@TypeOf(shift_amt) == comptime_int or @typeInfo(@TypeOf(shift_amt)).Int.is_signed) {
if (shift_amt >= 0) {
return a >> casted_shift_amt;
} else {
@@ -400,11 +400,11 @@ test "math.shr" {
/// Rotates right. Only unsigned values can be rotated.
/// Negative shift values results in shift modulo the bit count.
pub fn rotr(comptime T: type, x: T, r: anytype) T {
- if (T.is_signed) {
+ if (@typeInfo(T).Int.is_signed) {
@compileError("cannot rotate signed integer");
} else {
- const ar = @mod(r, T.bit_count);
- return shr(T, x, ar) | shl(T, x, T.bit_count - ar);
+ const ar = @mod(r, @typeInfo(T).Int.bits);
+ return shr(T, x, ar) | shl(T, x, @typeInfo(T).Int.bits - ar);
}
}
@@ -419,11 +419,11 @@ test "math.rotr" {
/// Rotates left. Only unsigned values can be rotated.
/// Negative shift values results in shift modulo the bit count.
pub fn rotl(comptime T: type, x: T, r: anytype) T {
- if (T.is_signed) {
+ if (@typeInfo(T).Int.is_signed) {
@compileError("cannot rotate signed integer");
} else {
- const ar = @mod(r, T.bit_count);
- return shl(T, x, ar) | shr(T, x, T.bit_count - ar);
+ const ar = @mod(r, @typeInfo(T).Int.bits);
+ return shl(T, x, ar) | shr(T, x, @typeInfo(T).Int.bits - ar);
}
}
@@ -438,7 +438,7 @@ test "math.rotl" {
pub fn Log2Int(comptime T: type) type {
// comptime ceil log2
comptime var count = 0;
- comptime var s = T.bit_count - 1;
+ comptime var s = @typeInfo(T).Int.bits - 1;
inline while (s != 0) : (s >>= 1) {
count += 1;
}
@@ -524,7 +524,7 @@ fn testOverflow() void {
pub fn absInt(x: anytype) !@TypeOf(x) {
const T = @TypeOf(x);
comptime assert(@typeInfo(T) == .Int); // must pass an integer to absInt
- comptime assert(T.is_signed); // must pass a signed integer to absInt
+ comptime assert(@typeInfo(T).Int.is_signed); // must pass a signed integer to absInt
if (x == minInt(@TypeOf(x))) {
return error.Overflow;
@@ -557,7 +557,7 @@ fn testAbsFloat() void {
pub fn divTrunc(comptime T: type, numerator: T, denominator: T) !T {
@setRuntimeSafety(false);
if (denominator == 0) return error.DivisionByZero;
- if (@typeInfo(T) == .Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
+ if (@typeInfo(T) == .Int and @typeInfo(T).Int.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
return @divTrunc(numerator, denominator);
}
@@ -578,7 +578,7 @@ fn testDivTrunc() void {
pub fn divFloor(comptime T: type, numerator: T, denominator: T) !T {
@setRuntimeSafety(false);
if (denominator == 0) return error.DivisionByZero;
- if (@typeInfo(T) == .Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
+ if (@typeInfo(T) == .Int and @typeInfo(T).Int.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
return @divFloor(numerator, denominator);
}
@@ -652,7 +652,7 @@ fn testDivCeil() void {
pub fn divExact(comptime T: type, numerator: T, denominator: T) !T {
@setRuntimeSafety(false);
if (denominator == 0) return error.DivisionByZero;
- if (@typeInfo(T) == .Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
+ if (@typeInfo(T) == .Int and @typeInfo(T).Int.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
const result = @divTrunc(numerator, denominator);
if (result * denominator != numerator) return error.UnexpectedRemainder;
return result;
@@ -757,10 +757,10 @@ test "math.absCast" {
/// Returns the negation of the integer parameter.
/// Result is a signed integer.
-pub fn negateCast(x: anytype) !std.meta.Int(true, @TypeOf(x).bit_count) {
- if (@TypeOf(x).is_signed) return negate(x);
+pub fn negateCast(x: anytype) !std.meta.Int(true, std.meta.bitCount(@TypeOf(x))) {
+ if (@typeInfo(@TypeOf(x)).Int.is_signed) return negate(x);
- const int = std.meta.Int(true, @TypeOf(x).bit_count);
+ const int = std.meta.Int(true, std.meta.bitCount(@TypeOf(x)));
if (x > -minInt(int)) return error.Overflow;
if (x == -minInt(int)) return minInt(int);
@@ -823,7 +823,7 @@ pub fn floorPowerOfTwo(comptime T: type, value: T) T {
var x = value;
comptime var i = 1;
- inline while (T.bit_count > i) : (i *= 2) {
+ inline while (@typeInfo(T).Int.bits > i) : (i *= 2) {
x |= (x >> i);
}
@@ -847,13 +847,13 @@ fn testFloorPowerOfTwo() void {
/// Returns the next power of two (if the value is not already a power of two).
/// Only unsigned integers can be used. Zero is not an allowed input.
/// Result is a type with 1 more bit than the input type.
-pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.Int(T.is_signed, T.bit_count + 1) {
+pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.Int(@typeInfo(T).Int.is_signed, @typeInfo(T).Int.bits + 1) {
comptime assert(@typeInfo(T) == .Int);
- comptime assert(!T.is_signed);
+ comptime assert(!@typeInfo(T).Int.is_signed);
assert(value != 0);
- comptime const PromotedType = std.meta.Int(T.is_signed, T.bit_count + 1);
+ comptime const PromotedType = std.meta.Int(@typeInfo(T).Int.is_signed, @typeInfo(T).Int.bits + 1);
comptime const shiftType = std.math.Log2Int(PromotedType);
- return @as(PromotedType, 1) << @intCast(shiftType, T.bit_count - @clz(T, value - 1));
+ return @as(PromotedType, 1) << @intCast(shiftType, @typeInfo(T).Int.bits - @clz(T, value - 1));
}
/// Returns the next power of two (if the value is not already a power of two).
@@ -861,9 +861,10 @@ pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.Int(T.is_signe
/// If the value doesn't fit, returns an error.
pub fn ceilPowerOfTwo(comptime T: type, value: T) (error{Overflow}!T) {
comptime assert(@typeInfo(T) == .Int);
- comptime assert(!T.is_signed);
- comptime const PromotedType = std.meta.Int(T.is_signed, T.bit_count + 1);
- comptime const overflowBit = @as(PromotedType, 1) << T.bit_count;
+ const info = @typeInfo(T).Int;
+ comptime assert(!info.is_signed);
+ comptime const PromotedType = std.meta.Int(info.is_signed, info.bits + 1);
+ comptime const overflowBit = @as(PromotedType, 1) << info.bits;
var x = ceilPowerOfTwoPromote(T, value);
if (overflowBit & x != 0) {
return error.Overflow;
@@ -911,7 +912,7 @@ fn testCeilPowerOfTwo() !void {
pub fn log2_int(comptime T: type, x: T) Log2Int(T) {
assert(x != 0);
- return @intCast(Log2Int(T), T.bit_count - 1 - @clz(T, x));
+ return @intCast(Log2Int(T), @typeInfo(T).Int.bits - 1 - @clz(T, x));
}
pub fn log2_int_ceil(comptime T: type, x: T) Log2Int(T) {
@@ -1008,8 +1009,8 @@ test "max value type" {
testing.expect(x == 2147483647);
}
-pub fn mulWide(comptime T: type, a: T, b: T) std.meta.Int(T.is_signed, T.bit_count * 2) {
- const ResultInt = std.meta.Int(T.is_signed, T.bit_count * 2);
+pub fn mulWide(comptime T: type, a: T, b: T) std.meta.Int(@typeInfo(T).Int.is_signed, @typeInfo(T).Int.bits * 2) {
+ const ResultInt = std.meta.Int(@typeInfo(T).Int.is_signed, @typeInfo(T).Int.bits * 2);
return @as(ResultInt, a) * @as(ResultInt, b);
}
diff --git a/lib/std/math/big.zig b/lib/std/math/big.zig
index 6246a4fb8b..03257e35ea 100644
--- a/lib/std/math/big.zig
+++ b/lib/std/math/big.zig
@@ -9,14 +9,15 @@ const assert = std.debug.assert;
pub const Rational = @import("big/rational.zig").Rational;
pub const int = @import("big/int.zig");
pub const Limb = usize;
-pub const DoubleLimb = std.meta.IntType(false, 2 * Limb.bit_count);
-pub const SignedDoubleLimb = std.meta.IntType(true, DoubleLimb.bit_count);
+const limb_info = @typeInfo(Limb).Int;
+pub const DoubleLimb = std.meta.IntType(false, 2 * limb_info.bits);
+pub const SignedDoubleLimb = std.meta.IntType(true, 2 * limb_info.bits);
pub const Log2Limb = std.math.Log2Int(Limb);
comptime {
- assert(std.math.floorPowerOfTwo(usize, Limb.bit_count) == Limb.bit_count);
- assert(Limb.bit_count <= 64); // u128 set is unsupported
- assert(Limb.is_signed == false);
+ assert(std.math.floorPowerOfTwo(usize, limb_info.bits) == limb_info.bits);
+ assert(limb_info.bits <= 64); // u128 set is unsupported
+ assert(limb_info.is_signed == false);
}
test "" {
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index 28da1064c9..19f6d0809e 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -6,6 +6,7 @@
const std = @import("../../std.zig");
const math = std.math;
const Limb = std.math.big.Limb;
+const limb_bits = @typeInfo(Limb).Int.bits;
const DoubleLimb = std.math.big.DoubleLimb;
const SignedDoubleLimb = std.math.big.SignedDoubleLimb;
const Log2Limb = std.math.big.Log2Limb;
@@ -28,7 +29,7 @@ pub fn calcLimbLen(scalar: anytype) usize {
},
.ComptimeInt => {
const w_value = if (scalar < 0) -scalar else scalar;
- return @divFloor(math.log2(w_value), Limb.bit_count) + 1;
+ return @divFloor(math.log2(w_value), limb_bits) + 1;
},
else => @compileError("parameter must be a primitive integer type"),
}
@@ -54,7 +55,7 @@ pub fn calcSetStringLimbsBufferLen(base: u8, string_len: usize) usize {
}
pub fn calcSetStringLimbCount(base: u8, string_len: usize) usize {
- return (string_len + (Limb.bit_count / base - 1)) / (Limb.bit_count / base);
+ return (string_len + (limb_bits / base - 1)) / (limb_bits / base);
}
/// a + b * c + *carry, sets carry to the overflow bits
@@ -68,7 +69,7 @@ pub fn addMulLimbWithCarry(a: Limb, b: Limb, c: Limb, carry: *Limb) Limb {
// r2 = b * c
const bc = @as(DoubleLimb, math.mulWide(Limb, b, c));
const r2 = @truncate(Limb, bc);
- const c2 = @truncate(Limb, bc >> Limb.bit_count);
+ const c2 = @truncate(Limb, bc >> limb_bits);
// r1 = r1 + r2
const c3: Limb = @boolToInt(@addWithOverflow(Limb, r1, r2, &r1));
@@ -181,7 +182,7 @@ pub const Mutable = struct {
switch (@typeInfo(T)) {
.Int => |info| {
- const UT = if (T.is_signed) std.meta.Int(false, T.bit_count - 1) else T;
+ const UT = if (info.is_signed) std.meta.Int(false, info.bits - 1) else T;
const needed_limbs = @sizeOf(UT) / @sizeOf(Limb);
assert(needed_limbs <= self.limbs.len); // value too big
@@ -190,7 +191,7 @@ pub const Mutable = struct {
var w_value: UT = if (value < 0) @intCast(UT, -value) else @intCast(UT, value);
- if (info.bits <= Limb.bit_count) {
+ if (info.bits <= limb_bits) {
self.limbs[0] = @as(Limb, w_value);
self.len += 1;
} else {
@@ -200,15 +201,15 @@ pub const Mutable = struct {
self.len += 1;
// TODO: shift == 64 at compile-time fails. Fails on u128 limbs.
- w_value >>= Limb.bit_count / 2;
- w_value >>= Limb.bit_count / 2;
+ w_value >>= limb_bits / 2;
+ w_value >>= limb_bits / 2;
}
}
},
.ComptimeInt => {
comptime var w_value = if (value < 0) -value else value;
- const req_limbs = @divFloor(math.log2(w_value), Limb.bit_count) + 1;
+ const req_limbs = @divFloor(math.log2(w_value), limb_bits) + 1;
assert(req_limbs <= self.limbs.len); // value too big
self.len = req_limbs;
@@ -217,14 +218,14 @@ pub const Mutable = struct {
if (w_value <= maxInt(Limb)) {
self.limbs[0] = w_value;
} else {
- const mask = (1 << Limb.bit_count) - 1;
+ const mask = (1 << limb_bits) - 1;
comptime var i = 0;
inline while (w_value != 0) : (i += 1) {
self.limbs[i] = w_value & mask;
- w_value >>= Limb.bit_count / 2;
- w_value >>= Limb.bit_count / 2;
+ w_value >>= limb_bits / 2;
+ w_value >>= limb_bits / 2;
}
}
},
@@ -506,7 +507,7 @@ pub const Mutable = struct {
/// `a.limbs.len + (shift / (@sizeOf(Limb) * 8))`.
pub fn shiftLeft(r: *Mutable, a: Const, shift: usize) void {
llshl(r.limbs[0..], a.limbs[0..a.limbs.len], shift);
- r.normalize(a.limbs.len + (shift / Limb.bit_count) + 1);
+ r.normalize(a.limbs.len + (shift / limb_bits) + 1);
r.positive = a.positive;
}
@@ -516,7 +517,7 @@ pub const Mutable = struct {
/// Asserts there is enough memory to fit the result. The upper bound Limb count is
/// `a.limbs.len - (shift / (@sizeOf(Limb) * 8))`.
pub fn shiftRight(r: *Mutable, a: Const, shift: usize) void {
- if (a.limbs.len <= shift / Limb.bit_count) {
+ if (a.limbs.len <= shift / limb_bits) {
r.len = 1;
r.positive = true;
r.limbs[0] = 0;
@@ -524,7 +525,7 @@ pub const Mutable = struct {
}
const r_len = llshr(r.limbs[0..], a.limbs[0..a.limbs.len], shift);
- r.len = a.limbs.len - (shift / Limb.bit_count);
+ r.len = a.limbs.len - (shift / limb_bits);
r.positive = a.positive;
}
@@ -772,7 +773,7 @@ pub const Mutable = struct {
}
if (ab_zero_limb_count != 0) {
- rem.shiftLeft(rem.toConst(), ab_zero_limb_count * Limb.bit_count);
+ rem.shiftLeft(rem.toConst(), ab_zero_limb_count * limb_bits);
}
}
@@ -803,10 +804,10 @@ pub const Mutable = struct {
};
tmp.limbs[0] = 0;
- // Normalize so y > Limb.bit_count / 2 (i.e. leading bit is set) and even
+ // Normalize so y > limb_bits / 2 (i.e. leading bit is set) and even
var norm_shift = @clz(Limb, y.limbs[y.len - 1]);
if (norm_shift == 0 and y.toConst().isOdd()) {
- norm_shift = Limb.bit_count;
+ norm_shift = limb_bits;
}
x.shiftLeft(x.toConst(), norm_shift);
y.shiftLeft(y.toConst(), norm_shift);
@@ -820,7 +821,7 @@ pub const Mutable = struct {
mem.set(Limb, q.limbs[0..q.len], 0);
// 2.
- tmp.shiftLeft(y.toConst(), Limb.bit_count * (n - t));
+ tmp.shiftLeft(y.toConst(), limb_bits * (n - t));
while (x.toConst().order(tmp.toConst()) != .lt) {
q.limbs[n - t] += 1;
x.sub(x.toConst(), tmp.toConst());
@@ -833,7 +834,7 @@ pub const Mutable = struct {
if (x.limbs[i] == y.limbs[t]) {
q.limbs[i - t - 1] = maxInt(Limb);
} else {
- const num = (@as(DoubleLimb, x.limbs[i]) << Limb.bit_count) | @as(DoubleLimb, x.limbs[i - 1]);
+ const num = (@as(DoubleLimb, x.limbs[i]) << limb_bits) | @as(DoubleLimb, x.limbs[i - 1]);
const z = @intCast(Limb, num / @as(DoubleLimb, y.limbs[t]));
q.limbs[i - t - 1] = if (z > maxInt(Limb)) maxInt(Limb) else @as(Limb, z);
}
@@ -862,11 +863,11 @@ pub const Mutable = struct {
// 3.3
tmp.set(q.limbs[i - t - 1]);
tmp.mul(tmp.toConst(), y.toConst(), mul_limb_buf, allocator);
- tmp.shiftLeft(tmp.toConst(), Limb.bit_count * (i - t - 1));
+ tmp.shiftLeft(tmp.toConst(), limb_bits * (i - t - 1));
x.sub(x.toConst(), tmp.toConst());
if (!x.positive) {
- tmp.shiftLeft(y.toConst(), Limb.bit_count * (i - t - 1));
+ tmp.shiftLeft(y.toConst(), limb_bits * (i - t - 1));
x.add(x.toConst(), tmp.toConst());
q.limbs[i - t - 1] -= 1;
}
@@ -949,7 +950,7 @@ pub const Const = struct {
/// Returns the number of bits required to represent the absolute value of an integer.
pub fn bitCountAbs(self: Const) usize {
- return (self.limbs.len - 1) * Limb.bit_count + (Limb.bit_count - @clz(Limb, self.limbs[self.limbs.len - 1]));
+ return (self.limbs.len - 1) * limb_bits + (limb_bits - @clz(Limb, self.limbs[self.limbs.len - 1]));
}
/// Returns the number of bits required to represent the integer in twos-complement form.
@@ -1019,10 +1020,10 @@ pub const Const = struct {
/// Returns an error if self cannot be narrowed into the requested type without truncation.
pub fn to(self: Const, comptime T: type) ConvertError!T {
switch (@typeInfo(T)) {
- .Int => {
- const UT = std.meta.Int(false, T.bit_count);
+ .Int => |info| {
+ const UT = std.meta.Int(false, info.bits);
- if (self.bitCountTwosComp() > T.bit_count) {
+ if (self.bitCountTwosComp() > info.bits) {
return error.TargetTooSmall;
}
@@ -1033,12 +1034,12 @@ pub const Const = struct {
} else {
for (self.limbs[0..self.limbs.len]) |_, ri| {
const limb = self.limbs[self.limbs.len - ri - 1];
- r <<= Limb.bit_count;
+ r <<= limb_bits;
r |= limb;
}
}
- if (!T.is_signed) {
+ if (!info.is_signed) {
return if (self.positive) @intCast(T, r) else error.NegativeIntoUnsigned;
} else {
if (self.positive) {
@@ -1149,7 +1150,7 @@ pub const Const = struct {
outer: for (self.limbs[0..self.limbs.len]) |limb| {
var shift: usize = 0;
- while (shift < Limb.bit_count) : (shift += base_shift) {
+ while (shift < limb_bits) : (shift += base_shift) {
const r = @intCast(u8, (limb >> @intCast(Log2Limb, shift)) & @as(Limb, base - 1));
const ch = std.fmt.digitToChar(r, uppercase);
string[digits_len] = ch;
@@ -1295,7 +1296,7 @@ pub const Const = struct {
/// Memory is allocated as needed to ensure operations never overflow. The range
/// is bounded only by available memory.
pub const Managed = struct {
- pub const sign_bit: usize = 1 << (usize.bit_count - 1);
+ pub const sign_bit: usize = 1 << (@typeInfo(usize).Int.bits - 1);
/// Default number of limbs to allocate on creation of a `Managed`.
pub const default_capacity = 4;
@@ -1448,7 +1449,7 @@ pub const Managed = struct {
for (self.limbs[0..self.len()]) |limb| {
std.debug.warn("{x} ", .{limb});
}
- std.debug.warn("capacity={} positive={}\n", .{ self.limbs.len, self.positive });
+ std.debug.warn("capacity={} positive={}\n", .{ self.limbs.len, self.isPositive() });
}
/// Negate the sign.
@@ -1716,7 +1717,7 @@ pub const Managed = struct {
/// r = a << shift, in other words, r = a * 2^shift
pub fn shiftLeft(r: *Managed, a: Managed, shift: usize) !void {
- try r.ensureCapacity(a.len() + (shift / Limb.bit_count) + 1);
+ try r.ensureCapacity(a.len() + (shift / limb_bits) + 1);
var m = r.toMutable();
m.shiftLeft(a.toConst(), shift);
r.setMetadata(m.positive, m.len);
@@ -1724,13 +1725,13 @@ pub const Managed = struct {
/// r = a >> shift
pub fn shiftRight(r: *Managed, a: Managed, shift: usize) !void {
- if (a.len() <= shift / Limb.bit_count) {
+ if (a.len() <= shift / limb_bits) {
r.metadata = 1;
r.limbs[0] = 0;
return;
}
- try r.ensureCapacity(a.len() - (shift / Limb.bit_count));
+ try r.ensureCapacity(a.len() - (shift / limb_bits));
var m = r.toMutable();
m.shiftRight(a.toConst(), shift);
r.setMetadata(m.positive, m.len);
@@ -2021,7 +2022,7 @@ fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void {
rem.* = 0;
for (a) |_, ri| {
const i = a.len - ri - 1;
- const pdiv = ((@as(DoubleLimb, rem.*) << Limb.bit_count) | a[i]);
+ const pdiv = ((@as(DoubleLimb, rem.*) << limb_bits) | a[i]);
if (pdiv == 0) {
quo[i] = 0;
@@ -2042,10 +2043,10 @@ fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void {
fn llshl(r: []Limb, a: []const Limb, shift: usize) void {
@setRuntimeSafety(debug_safety);
assert(a.len >= 1);
- assert(r.len >= a.len + (shift / Limb.bit_count) + 1);
+ assert(r.len >= a.len + (shift / limb_bits) + 1);
- const limb_shift = shift / Limb.bit_count + 1;
- const interior_limb_shift = @intCast(Log2Limb, shift % Limb.bit_count);
+ const limb_shift = shift / limb_bits + 1;
+ const interior_limb_shift = @intCast(Log2Limb, shift % limb_bits);
var carry: Limb = 0;
var i: usize = 0;
@@ -2057,7 +2058,7 @@ fn llshl(r: []Limb, a: []const Limb, shift: usize) void {
r[dst_i] = carry | @call(.{ .modifier = .always_inline }, math.shr, .{
Limb,
src_digit,
- Limb.bit_count - @intCast(Limb, interior_limb_shift),
+ limb_bits - @intCast(Limb, interior_limb_shift),
});
carry = (src_digit << interior_limb_shift);
}
@@ -2069,10 +2070,10 @@ fn llshl(r: []Limb, a: []const Limb, shift: usize) void {
fn llshr(r: []Limb, a: []const Limb, shift: usize) void {
@setRuntimeSafety(debug_safety);
assert(a.len >= 1);
- assert(r.len >= a.len - (shift / Limb.bit_count));
+ assert(r.len >= a.len - (shift / limb_bits));
- const limb_shift = shift / Limb.bit_count;
- const interior_limb_shift = @intCast(Log2Limb, shift % Limb.bit_count);
+ const limb_shift = shift / limb_bits;
+ const interior_limb_shift = @intCast(Log2Limb, shift % limb_bits);
var carry: Limb = 0;
var i: usize = 0;
@@ -2085,7 +2086,7 @@ fn llshr(r: []Limb, a: []const Limb, shift: usize) void {
carry = @call(.{ .modifier = .always_inline }, math.shl, .{
Limb,
src_digit,
- Limb.bit_count - @intCast(Limb, interior_limb_shift),
+ limb_bits - @intCast(Limb, interior_limb_shift),
});
}
}
@@ -2135,7 +2136,7 @@ fn fixedIntFromSignedDoubleLimb(A: SignedDoubleLimb, storage: []Limb) Mutable {
const A_is_positive = A >= 0;
const Au = @intCast(DoubleLimb, if (A < 0) -A else A);
storage[0] = @truncate(Limb, Au);
- storage[1] = @truncate(Limb, Au >> Limb.bit_count);
+ storage[1] = @truncate(Limb, Au >> limb_bits);
return .{
.limbs = storage[0..2],
.positive = A_is_positive,
diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig
index 5931767a82..9de93e94ac 100644
--- a/lib/std/math/big/int_test.zig
+++ b/lib/std/math/big/int_test.zig
@@ -23,13 +23,13 @@ test "big.int comptime_int set" {
var a = try Managed.initSet(testing.allocator, s);
defer a.deinit();
- const s_limb_count = 128 / Limb.bit_count;
+ const s_limb_count = 128 / @typeInfo(Limb).Int.bits;
comptime var i: usize = 0;
inline while (i < s_limb_count) : (i += 1) {
const result = @as(Limb, s & maxInt(Limb));
- s >>= Limb.bit_count / 2;
- s >>= Limb.bit_count / 2;
+ s >>= @typeInfo(Limb).Int.bits / 2;
+ s >>= @typeInfo(Limb).Int.bits / 2;
testing.expect(a.limbs[i] == result);
}
}
diff --git a/lib/std/math/big/rational.zig b/lib/std/math/big/rational.zig
index 5b3c105718..d75a7b599c 100644
--- a/lib/std/math/big/rational.zig
+++ b/lib/std/math/big/rational.zig
@@ -136,7 +136,7 @@ pub const Rational = struct {
// Translated from golang.go/src/math/big/rat.go.
debug.assert(@typeInfo(T) == .Float);
- const UnsignedInt = std.meta.Int(false, T.bit_count);
+ const UnsignedInt = std.meta.Int(false, @typeInfo(T).Float.bits);
const f_bits = @bitCast(UnsignedInt, f);
const exponent_bits = math.floatExponentBits(T);
@@ -194,8 +194,8 @@ pub const Rational = struct {
// TODO: Indicate whether the result is not exact.
debug.assert(@typeInfo(T) == .Float);
- const fsize = T.bit_count;
- const BitReprType = std.meta.Int(false, T.bit_count);
+ const fsize = @typeInfo(T).Float.bits;
+ const BitReprType = std.meta.Int(false, fsize);
const msize = math.floatMantissaBits(T);
const msize1 = msize + 1;
@@ -475,16 +475,18 @@ pub const Rational = struct {
fn extractLowBits(a: Int, comptime T: type) T {
testing.expect(@typeInfo(T) == .Int);
- if (T.bit_count <= Limb.bit_count) {
+ const t_bits = @typeInfo(T).Int.bits;
+ const limb_bits = @typeInfo(Limb).Int.bits;
+ if (t_bits <= limb_bits) {
return @truncate(T, a.limbs[0]);
} else {
var r: T = 0;
comptime var i: usize = 0;
- // Remainder is always 0 since if T.bit_count >= Limb.bit_count -> Limb | T and both
+ // Remainder is always 0 since if t_bits >= limb_bits -> Limb | T and both
// are powers of two.
- inline while (i < T.bit_count / Limb.bit_count) : (i += 1) {
- r |= math.shl(T, a.limbs[i], i * Limb.bit_count);
+ inline while (i < t_bits / limb_bits) : (i += 1) {
+ r |= math.shl(T, a.limbs[i], i * limb_bits);
}
return r;
diff --git a/lib/std/math/cos.zig b/lib/std/math/cos.zig
index 3d282c82e1..54d08d12ca 100644
--- a/lib/std/math/cos.zig
+++ b/lib/std/math/cos.zig
@@ -49,7 +49,7 @@ const pi4c = 2.69515142907905952645E-15;
const m4pi = 1.273239544735162542821171882678754627704620361328125;
fn cos_(comptime T: type, x_: T) T {
- const I = std.meta.Int(true, T.bit_count);
+ const I = std.meta.Int(true, @typeInfo(T).Float.bits);
var x = x_;
if (math.isNan(x) or math.isInf(x)) {
diff --git a/lib/std/math/pow.zig b/lib/std/math/pow.zig
index 30b52acbda..66a371fc3e 100644
--- a/lib/std/math/pow.zig
+++ b/lib/std/math/pow.zig
@@ -128,7 +128,7 @@ pub fn pow(comptime T: type, x: T, y: T) T {
if (yf != 0 and x < 0) {
return math.nan(T);
}
- if (yi >= 1 << (T.bit_count - 1)) {
+ if (yi >= 1 << (@typeInfo(T).Float.bits - 1)) {
return math.exp(y * math.ln(x));
}
@@ -150,7 +150,7 @@ pub fn pow(comptime T: type, x: T, y: T) T {
var xe = r2.exponent;
var x1 = r2.significand;
- var i = @floatToInt(std.meta.Int(true, T.bit_count), yi);
+ var i = @floatToInt(std.meta.Int(true, @typeInfo(T).Float.bits), yi);
while (i != 0) : (i >>= 1) {
const overflow_shift = math.floatExponentBits(T) + 1;
if (xe < -(1 << overflow_shift) or (1 << overflow_shift) < xe) {
diff --git a/lib/std/math/sin.zig b/lib/std/math/sin.zig
index c7db4f8623..c4a330df5d 100644
--- a/lib/std/math/sin.zig
+++ b/lib/std/math/sin.zig
@@ -50,7 +50,7 @@ const pi4c = 2.69515142907905952645E-15;
const m4pi = 1.273239544735162542821171882678754627704620361328125;
fn sin_(comptime T: type, x_: T) T {
- const I = std.meta.Int(true, T.bit_count);
+ const I = std.meta.Int(true, @typeInfo(T).Float.bits);
var x = x_;
if (x == 0 or math.isNan(x)) {
diff --git a/lib/std/math/sqrt.zig b/lib/std/math/sqrt.zig
index 34851ca647..1c0b15c3de 100644
--- a/lib/std/math/sqrt.zig
+++ b/lib/std/math/sqrt.zig
@@ -36,10 +36,10 @@ pub fn sqrt(x: anytype) Sqrt(@TypeOf(x)) {
}
}
-fn sqrt_int(comptime T: type, value: T) std.meta.Int(false, T.bit_count / 2) {
+fn sqrt_int(comptime T: type, value: T) std.meta.Int(false, @typeInfo(T).Int.bits / 2) {
var op = value;
var res: T = 0;
- var one: T = 1 << (T.bit_count - 2);
+ var one: T = 1 << (@typeInfo(T).Int.bits - 2);
// "one" starts at the highest power of four <= than the argument.
while (one > op) {
@@ -55,7 +55,7 @@ fn sqrt_int(comptime T: type, value: T) std.meta.Int(false, T.bit_count / 2) {
one >>= 2;
}
- const ResultType = std.meta.Int(false, T.bit_count / 2);
+ const ResultType = std.meta.Int(false, @typeInfo(T).Int.bits / 2);
return @intCast(ResultType, res);
}
diff --git a/lib/std/math/tan.zig b/lib/std/math/tan.zig
index 5e5a80e15d..358eb8a380 100644
--- a/lib/std/math/tan.zig
+++ b/lib/std/math/tan.zig
@@ -43,7 +43,7 @@ const pi4c = 2.69515142907905952645E-15;
const m4pi = 1.273239544735162542821171882678754627704620361328125;
fn tan_(comptime T: type, x_: T) T {
- const I = std.meta.Int(true, T.bit_count);
+ const I = std.meta.Int(true, @typeInfo(T).Float.bits);
var x = x_;
if (x == 0 or math.isNan(x)) {
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index 71190069a8..b10c318635 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -949,7 +949,7 @@ pub fn readVarInt(comptime ReturnType: type, bytes: []const u8, endian: builtin.
/// This function cannot fail and cannot cause undefined behavior.
/// Assumes the endianness of memory is native. This means the function can
/// simply pointer cast memory.
-pub fn readIntNative(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]u8) T {
+pub fn readIntNative(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8) T {
return @ptrCast(*align(1) const T, bytes).*;
}
@@ -957,7 +957,7 @@ pub fn readIntNative(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]
/// The bit count of T must be evenly divisible by 8.
/// This function cannot fail and cannot cause undefined behavior.
/// Assumes the endianness of memory is foreign, so it must byte-swap.
-pub fn readIntForeign(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]u8) T {
+pub fn readIntForeign(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8) T {
return @byteSwap(T, readIntNative(T, bytes));
}
@@ -971,18 +971,18 @@ pub const readIntBig = switch (builtin.endian) {
.Big => readIntNative,
};
-/// Asserts that bytes.len >= T.bit_count / 8. Reads the integer starting from index 0
+/// Asserts that bytes.len >= @typeInfo(T).Int.bits / 8. Reads the integer starting from index 0
/// and ignores extra bytes.
/// The bit count of T must be evenly divisible by 8.
/// Assumes the endianness of memory is native. This means the function can
/// simply pointer cast memory.
pub fn readIntSliceNative(comptime T: type, bytes: []const u8) T {
- const n = @divExact(T.bit_count, 8);
+ const n = @divExact(@typeInfo(T).Int.bits, 8);
assert(bytes.len >= n);
return readIntNative(T, bytes[0..n]);
}
-/// Asserts that bytes.len >= T.bit_count / 8. Reads the integer starting from index 0
+/// Asserts that bytes.len >= @typeInfo(T).Int.bits / 8. Reads the integer starting from index 0
/// and ignores extra bytes.
/// The bit count of T must be evenly divisible by 8.
/// Assumes the endianness of memory is foreign, so it must byte-swap.
@@ -1003,7 +1003,7 @@ pub const readIntSliceBig = switch (builtin.endian) {
/// Reads an integer from memory with bit count specified by T.
/// The bit count of T must be evenly divisible by 8.
/// This function cannot fail and cannot cause undefined behavior.
-pub fn readInt(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]u8, endian: builtin.Endian) T {
+pub fn readInt(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8, endian: builtin.Endian) T {
if (endian == builtin.endian) {
return readIntNative(T, bytes);
} else {
@@ -1011,11 +1011,11 @@ pub fn readInt(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]u8, en
}
}
-/// Asserts that bytes.len >= T.bit_count / 8. Reads the integer starting from index 0
+/// Asserts that bytes.len >= @typeInfo(T).Int.bits / 8. Reads the integer starting from index 0
/// and ignores extra bytes.
/// The bit count of T must be evenly divisible by 8.
pub fn readIntSlice(comptime T: type, bytes: []const u8, endian: builtin.Endian) T {
- const n = @divExact(T.bit_count, 8);
+ const n = @divExact(@typeInfo(T).Int.bits, 8);
assert(bytes.len >= n);
return readInt(T, bytes[0..n], endian);
}
@@ -1060,7 +1060,7 @@ test "readIntBig and readIntLittle" {
/// accepts any integer bit width.
/// This function stores in native endian, which means it is implemented as a simple
/// memory store.
-pub fn writeIntNative(comptime T: type, buf: *[(T.bit_count + 7) / 8]u8, value: T) void {
+pub fn writeIntNative(comptime T: type, buf: *[(@typeInfo(T).Int.bits + 7) / 8]u8, value: T) void {
@ptrCast(*align(1) T, buf).* = value;
}
@@ -1068,7 +1068,7 @@ pub fn writeIntNative(comptime T: type, buf: *[(T.bit_count + 7) / 8]u8, value:
/// This function always succeeds, has defined behavior for all inputs, but
/// the integer bit width must be divisible by 8.
/// This function stores in foreign endian, which means it does a @byteSwap first.
-pub fn writeIntForeign(comptime T: type, buf: *[@divExact(T.bit_count, 8)]u8, value: T) void {
+pub fn writeIntForeign(comptime T: type, buf: *[@divExact(@typeInfo(T).Int.bits, 8)]u8, value: T) void {
writeIntNative(T, buf, @byteSwap(T, value));
}
@@ -1085,7 +1085,7 @@ pub const writeIntBig = switch (builtin.endian) {
/// Writes an integer to memory, storing it in twos-complement.
/// This function always succeeds, has defined behavior for all inputs, but
/// the integer bit width must be divisible by 8.
-pub fn writeInt(comptime T: type, buffer: *[@divExact(T.bit_count, 8)]u8, value: T, endian: builtin.Endian) void {
+pub fn writeInt(comptime T: type, buffer: *[@divExact(@typeInfo(T).Int.bits, 8)]u8, value: T, endian: builtin.Endian) void {
if (endian == builtin.endian) {
return writeIntNative(T, buffer, value);
} else {
@@ -1094,19 +1094,19 @@ pub fn writeInt(comptime T: type, buffer: *[@divExact(T.bit_count, 8)]u8, value:
}
/// Writes a twos-complement little-endian integer to memory.
-/// Asserts that buf.len >= T.bit_count / 8.
+/// Asserts that buf.len >= @typeInfo(T).Int.bits / 8.
/// The bit count of T must be divisible by 8.
/// Any extra bytes in buffer after writing the integer are set to zero. To
/// avoid the branch to check for extra buffer bytes, use writeIntLittle
/// instead.
pub fn writeIntSliceLittle(comptime T: type, buffer: []u8, value: T) void {
- assert(buffer.len >= @divExact(T.bit_count, 8));
+ assert(buffer.len >= @divExact(@typeInfo(T).Int.bits, 8));
- if (T.bit_count == 0)
+ if (@typeInfo(T).Int.bits == 0)
return set(u8, buffer, 0);
// TODO I want to call writeIntLittle here but comptime eval facilities aren't good enough
- const uint = std.meta.Int(false, T.bit_count);
+ const uint = std.meta.Int(false, @typeInfo(T).Int.bits);
var bits = @truncate(uint, value);
for (buffer) |*b| {
b.* = @truncate(u8, bits);
@@ -1115,18 +1115,18 @@ pub fn writeIntSliceLittle(comptime T: type, buffer: []u8, value: T) void {
}
/// Writes a twos-complement big-endian integer to memory.
-/// Asserts that buffer.len >= T.bit_count / 8.
+/// Asserts that buffer.len >= @typeInfo(T).Int.bits / 8.
/// The bit count of T must be divisible by 8.
/// Any extra bytes in buffer before writing the integer are set to zero. To
/// avoid the branch to check for extra buffer bytes, use writeIntBig instead.
pub fn writeIntSliceBig(comptime T: type, buffer: []u8, value: T) void {
- assert(buffer.len >= @divExact(T.bit_count, 8));
+ assert(buffer.len >= @divExact(@typeInfo(T).Int.bits, 8));
- if (T.bit_count == 0)
+ if (@typeInfo(T).Int.bits == 0)
return set(u8, buffer, 0);
// TODO I want to call writeIntBig here but comptime eval facilities aren't good enough
- const uint = std.meta.Int(false, T.bit_count);
+ const uint = std.meta.Int(false, @typeInfo(T).Int.bits);
var bits = @truncate(uint, value);
var index: usize = buffer.len;
while (index != 0) {
@@ -1147,13 +1147,13 @@ pub const writeIntSliceForeign = switch (builtin.endian) {
};
/// Writes a twos-complement integer to memory, with the specified endianness.
-/// Asserts that buf.len >= T.bit_count / 8.
+/// Asserts that buf.len >= @typeInfo(T).Int.bits / 8.
/// The bit count of T must be evenly divisible by 8.
/// Any extra bytes in buffer not part of the integer are set to zero, with
/// respect to endianness. To avoid the branch to check for extra buffer bytes,
/// use writeInt instead.
pub fn writeIntSlice(comptime T: type, buffer: []u8, value: T, endian: builtin.Endian) void {
- comptime assert(T.bit_count % 8 == 0);
+ comptime assert(@typeInfo(T).Int.bits % 8 == 0);
return switch (endian) {
.Little => writeIntSliceLittle(T, buffer, value),
.Big => writeIntSliceBig(T, buffer, value),
diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig
index bb59de2a7e..326a73b915 100644
--- a/lib/std/mem/Allocator.zig
+++ b/lib/std/mem/Allocator.zig
@@ -159,7 +159,7 @@ fn moveBytes(
/// Returns a pointer to undefined memory.
/// Call `destroy` with the result to free the memory.
pub fn create(self: *Allocator, comptime T: type) Error!*T {
- if (@sizeOf(T) == 0) return &(T{});
+ if (@sizeOf(T) == 0) return @as(*T, undefined);
const slice = try self.allocAdvancedWithRetAddr(T, null, 1, .exact, @returnAddress());
return &slice[0];
}
@@ -167,11 +167,11 @@ pub fn create(self: *Allocator, comptime T: type) Error!*T {
/// `ptr` should be the return value of `create`, or otherwise
/// have the same address and alignment property.
pub fn destroy(self: *Allocator, ptr: anytype) void {
- const T = @TypeOf(ptr).Child;
+ const info = @typeInfo(@TypeOf(ptr)).Pointer;
+ const T = info.child;
if (@sizeOf(T) == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
- const ptr_align = @typeInfo(@TypeOf(ptr)).Pointer.alignment;
- _ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], ptr_align, 0, 0, @returnAddress());
+ _ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], info.alignment, 0, 0, @returnAddress());
}
/// Allocates an array of `n` items of type `T` and sets all the
diff --git a/lib/std/meta.zig b/lib/std/meta.zig
index aaa8e7ca78..73e0661498 100644
--- a/lib/std/meta.zig
+++ b/lib/std/meta.zig
@@ -705,34 +705,34 @@ pub fn Vector(comptime len: u32, comptime child: type) type {
pub fn cast(comptime DestType: type, target: anytype) DestType {
const TargetType = @TypeOf(target);
switch (@typeInfo(DestType)) {
- .Pointer => {
+ .Pointer => |dest_ptr| {
switch (@typeInfo(TargetType)) {
.Int, .ComptimeInt => {
return @intToPtr(DestType, target);
},
.Pointer => |ptr| {
- return @ptrCast(DestType, @alignCast(ptr.alignment, target));
+ return @ptrCast(DestType, @alignCast(dest_ptr.alignment, target));
},
.Optional => |opt| {
if (@typeInfo(opt.child) == .Pointer) {
- return @ptrCast(DestType, @alignCast(@alignOf(opt.child.Child), target));
+ return @ptrCast(DestType, @alignCast(dest_ptr, target));
}
},
else => {},
}
},
- .Optional => |opt| {
- if (@typeInfo(opt.child) == .Pointer) {
+ .Optional => |dest_opt| {
+ if (@typeInfo(dest_opt.child) == .Pointer) {
switch (@typeInfo(TargetType)) {
.Int, .ComptimeInt => {
return @intToPtr(DestType, target);
},
- .Pointer => |ptr| {
- return @ptrCast(DestType, @alignCast(ptr.alignment, target));
+ .Pointer => {
+ return @ptrCast(DestType, @alignCast(@alignOf(dest_opt.child.Child), target));
},
.Optional => |target_opt| {
if (@typeInfo(target_opt.child) == .Pointer) {
- return @ptrCast(DestType, @alignCast(@alignOf(target_opt.child.Child), target));
+ return @ptrCast(DestType, @alignCast(@alignOf(dest_opt.child.Child), target));
}
},
else => {},
diff --git a/lib/std/net.zig b/lib/std/net.zig
index 10e5b371f8..45d8f07f04 100644
--- a/lib/std/net.zig
+++ b/lib/std/net.zig
@@ -1164,7 +1164,7 @@ fn linuxLookupNameFromDnsSearch(
}
const search = if (rc.search.isNull() or dots >= rc.ndots or mem.endsWith(u8, name, "."))
- &[_]u8{}
+ ""
else
rc.search.span();
@@ -1641,6 +1641,9 @@ pub const StreamServer = struct {
/// by the socket buffer limits, not by the system memory.
SystemResources,
+ /// Socket is not listening for new connections.
+ SocketNotListening,
+
ProtocolFailure,
/// Firewall rules forbid connection.
diff --git a/lib/std/os.zig b/lib/std/os.zig
index abf72529ad..08021cdefe 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -2512,13 +2512,14 @@ pub fn readlinkatZ(dirfd: fd_t, file_path: [*:0]const u8, out_buffer: []u8) Read
}
}
-pub const SetIdError = error{
- ResourceLimitReached,
+pub const SetEidError = error{
InvalidUserId,
PermissionDenied,
-} || UnexpectedError;
+};
+
+pub const SetIdError = error{ResourceLimitReached} || SetEidError || UnexpectedError;
-pub fn setuid(uid: u32) SetIdError!void {
+pub fn setuid(uid: uid_t) SetIdError!void {
switch (errno(system.setuid(uid))) {
0 => return,
EAGAIN => return error.ResourceLimitReached,
@@ -2528,7 +2529,16 @@ pub fn setuid(uid: u32) SetIdError!void {
}
}
-pub fn setreuid(ruid: u32, euid: u32) SetIdError!void {
+pub fn seteuid(uid: uid_t) SetEidError!void {
+ switch (errno(system.seteuid(uid))) {
+ 0 => return,
+ EINVAL => return error.InvalidUserId,
+ EPERM => return error.PermissionDenied,
+ else => |err| return unexpectedErrno(err),
+ }
+}
+
+pub fn setreuid(ruid: uid_t, euid: uid_t) SetIdError!void {
switch (errno(system.setreuid(ruid, euid))) {
0 => return,
EAGAIN => return error.ResourceLimitReached,
@@ -2538,7 +2548,7 @@ pub fn setreuid(ruid: u32, euid: u32) SetIdError!void {
}
}
-pub fn setgid(gid: u32) SetIdError!void {
+pub fn setgid(gid: gid_t) SetIdError!void {
switch (errno(system.setgid(gid))) {
0 => return,
EAGAIN => return error.ResourceLimitReached,
@@ -2548,7 +2558,16 @@ pub fn setgid(gid: u32) SetIdError!void {
}
}
-pub fn setregid(rgid: u32, egid: u32) SetIdError!void {
+pub fn setegid(uid: uid_t) SetEidError!void {
+ switch (errno(system.setegid(uid))) {
+ 0 => return,
+ EINVAL => return error.InvalidUserId,
+ EPERM => return error.PermissionDenied,
+ else => |err| return unexpectedErrno(err),
+ }
+}
+
+pub fn setregid(rgid: gid_t, egid: gid_t) SetIdError!void {
switch (errno(system.setregid(rgid, egid))) {
0 => return,
EAGAIN => return error.ResourceLimitReached,
@@ -2815,6 +2834,9 @@ pub const AcceptError = error{
/// by the socket buffer limits, not by the system memory.
SystemResources,
+ /// Socket is not listening for new connections.
+ SocketNotListening,
+
ProtocolFailure,
/// Firewall rules forbid connection.
@@ -2884,21 +2906,21 @@ pub fn accept(
loop.waitUntilFdReadable(sock);
continue;
} else {
- return error.WouldBlock;
- },
- EBADF => unreachable, // always a race condition
- ECONNABORTED => return error.ConnectionAborted,
- EFAULT => unreachable,
- EINVAL => unreachable,
- ENOTSOCK => unreachable,
- EMFILE => return error.ProcessFdQuotaExceeded,
- ENFILE => return error.SystemFdQuotaExceeded,
- ENOBUFS => return error.SystemResources,
- ENOMEM => return error.SystemResources,
- EOPNOTSUPP => unreachable,
- EPROTO => return error.ProtocolFailure,
- EPERM => return error.BlockedByFirewall,
- else => |err| return unexpectedErrno(err),
+ return error.WouldBlock;
+ },
+ EBADF => unreachable, // always a race condition
+ ECONNABORTED => return error.ConnectionAborted,
+ EFAULT => unreachable,
+ EINVAL => return error.SocketNotListening,
+ ENOTSOCK => unreachable,
+ EMFILE => return error.ProcessFdQuotaExceeded,
+ ENFILE => return error.SystemFdQuotaExceeded,
+ ENOBUFS => return error.SystemResources,
+ ENOMEM => return error.SystemResources,
+ EOPNOTSUPP => unreachable,
+ EPROTO => return error.ProtocolFailure,
+ EPERM => return error.BlockedByFirewall,
+ else => |err| return unexpectedErrno(err),
}
}
} else unreachable;
@@ -4554,7 +4576,7 @@ pub fn res_mkquery(
// Make a reasonably unpredictable id
var ts: timespec = undefined;
clock_gettime(CLOCK_REALTIME, &ts) catch {};
- const UInt = std.meta.Int(false, @TypeOf(ts.tv_nsec).bit_count);
+ const UInt = std.meta.Int(false, std.meta.bitCount(@TypeOf(ts.tv_nsec)));
const unsec = @bitCast(UInt, ts.tv_nsec);
const id = @truncate(u32, unsec + unsec / 65536);
q[0] = @truncate(u8, id / 256);
@@ -5404,3 +5426,71 @@ pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) !fd_t {
else => |err| return std.os.unexpectedErrno(err),
}
}
+
+pub const SyncError = error{
+ InputOutput,
+ NoSpaceLeft,
+ DiskQuota,
+ AccessDenied,
+} || UnexpectedError;
+
+/// Write all pending file contents and metadata modifications to all filesystems.
+pub fn sync() void {
+ system.sync();
+}
+
+/// Write all pending file contents and metadata modifications to the filesystem which contains the specified file.
+pub fn syncfs(fd: fd_t) SyncError!void {
+ const rc = system.syncfs(fd);
+ switch (errno(rc)) {
+ 0 => return,
+ EBADF, EINVAL, EROFS => unreachable,
+ EIO => return error.InputOutput,
+ ENOSPC => return error.NoSpaceLeft,
+ EDQUOT => return error.DiskQuota,
+ else => |err| return std.os.unexpectedErrno(err),
+ }
+}
+
+/// Write all pending file contents and metadata modifications for the specified file descriptor to the underlying filesystem.
+pub fn fsync(fd: fd_t) SyncError!void {
+ if (std.Target.current.os.tag == .windows) {
+ if (windows.kernel32.FlushFileBuffers(fd) != 0)
+ return;
+ switch (windows.kernel32.GetLastError()) {
+ .SUCCESS => return,
+ .INVALID_HANDLE => unreachable,
+ .ACCESS_DENIED => return error.AccessDenied, // a sync was performed but the system couldn't update the access time
+ .UNEXP_NET_ERR => return error.InputOutput,
+ else => return error.InputOutput,
+ }
+ }
+ const rc = system.fsync(fd);
+ switch (errno(rc)) {
+ 0 => return,
+ EBADF, EINVAL, EROFS => unreachable,
+ EIO => return error.InputOutput,
+ ENOSPC => return error.NoSpaceLeft,
+ EDQUOT => return error.DiskQuota,
+ else => |err| return std.os.unexpectedErrno(err),
+ }
+}
+
+/// Write all pending file contents for the specified file descriptor to the underlying filesystem, but not necessarily the metadata.
+pub fn fdatasync(fd: fd_t) SyncError!void {
+ if (std.Target.current.os.tag == .windows) {
+ return fsync(fd) catch |err| switch (err) {
+ SyncError.AccessDenied => return, // fdatasync doesn't promise that the access time was synced
+ else => return err,
+ };
+ }
+ const rc = system.fdatasync(fd);
+ switch (errno(rc)) {
+ 0 => return,
+ EBADF, EINVAL, EROFS => unreachable,
+ EIO => return error.InputOutput,
+ ENOSPC => return error.NoSpaceLeft,
+ EDQUOT => return error.DiskQuota,
+ else => |err| return std.os.unexpectedErrno(err),
+ }
+}
diff --git a/lib/std/os/bits/darwin.zig b/lib/std/os/bits/darwin.zig
index 375127f278..ce73d2a6dc 100644
--- a/lib/std/os/bits/darwin.zig
+++ b/lib/std/os/bits/darwin.zig
@@ -7,9 +7,13 @@ const std = @import("../../std.zig");
const assert = std.debug.assert;
const maxInt = std.math.maxInt;
+// See: https://opensource.apple.com/source/xnu/xnu-6153.141.1/bsd/sys/_types.h.auto.html
+// TODO: audit mode_t/pid_t, should likely be u16/i32
pub const fd_t = c_int;
pub const pid_t = c_int;
pub const mode_t = c_uint;
+pub const uid_t = u32;
+pub const gid_t = u32;
pub const in_port_t = u16;
pub const sa_family_t = u8;
@@ -79,8 +83,8 @@ pub const Stat = extern struct {
mode: u16,
nlink: u16,
ino: ino_t,
- uid: u32,
- gid: u32,
+ uid: uid_t,
+ gid: gid_t,
rdev: i32,
atimesec: isize,
atimensec: isize,
diff --git a/lib/std/os/bits/dragonfly.zig b/lib/std/os/bits/dragonfly.zig
index 8b6d6be212..1412aa5c41 100644
--- a/lib/std/os/bits/dragonfly.zig
+++ b/lib/std/os/bits/dragonfly.zig
@@ -9,10 +9,17 @@ const maxInt = std.math.maxInt;
pub fn S_ISCHR(m: u32) bool {
return m & S_IFMT == S_IFCHR;
}
+
+// See:
+// - https://gitweb.dragonflybsd.org/dragonfly.git/blob/HEAD:/include/unistd.h
+// - https://gitweb.dragonflybsd.org/dragonfly.git/blob/HEAD:/sys/sys/types.h
+// TODO: mode_t should probably be changed to a u16, audit pid_t/off_t as well
pub const fd_t = c_int;
pub const pid_t = c_int;
pub const off_t = c_long;
pub const mode_t = c_uint;
+pub const uid_t = u32;
+pub const gid_t = u32;
pub const ENOTSUP = EOPNOTSUPP;
pub const EWOULDBLOCK = EAGAIN;
@@ -151,8 +158,8 @@ pub const Stat = extern struct {
dev: c_uint,
mode: c_ushort,
padding1: u16,
- uid: c_uint,
- gid: c_uint,
+ uid: uid_t,
+ gid: gid_t,
rdev: c_uint,
atim: timespec,
mtim: timespec,
@@ -511,7 +518,7 @@ pub const siginfo_t = extern struct {
si_errno: c_int,
si_code: c_int,
si_pid: c_int,
- si_uid: c_uint,
+ si_uid: uid_t,
si_status: c_int,
si_addr: ?*c_void,
si_value: union_sigval,
diff --git a/lib/std/os/bits/freebsd.zig b/lib/std/os/bits/freebsd.zig
index 22edf4b9d1..32936f7515 100644
--- a/lib/std/os/bits/freebsd.zig
+++ b/lib/std/os/bits/freebsd.zig
@@ -6,8 +6,12 @@
const std = @import("../../std.zig");
const maxInt = std.math.maxInt;
+// See https://svnweb.freebsd.org/base/head/sys/sys/_types.h?view=co
+// TODO: audit pid_t/mode_t. They should likely be i32 and u16, respectively
pub const fd_t = c_int;
pub const pid_t = c_int;
+pub const uid_t = u32;
+pub const gid_t = u32;
pub const mode_t = c_uint;
pub const socklen_t = u32;
@@ -128,8 +132,8 @@ pub const Stat = extern struct {
mode: u16,
__pad0: u16,
- uid: u32,
- gid: u32,
+ uid: uid_t,
+ gid: gid_t,
__pad1: u32,
rdev: u64,
diff --git a/lib/std/os/bits/linux.zig b/lib/std/os/bits/linux.zig
index 1327eaa330..6d85d06236 100644
--- a/lib/std/os/bits/linux.zig
+++ b/lib/std/os/bits/linux.zig
@@ -29,7 +29,7 @@ const is_mips = builtin.arch.isMIPS();
pub const pid_t = i32;
pub const fd_t = i32;
-pub const uid_t = i32;
+pub const uid_t = u32;
pub const gid_t = u32;
pub const clock_t = isize;
@@ -846,14 +846,14 @@ pub const SIG_ERR = @intToPtr(?Sigaction.sigaction_fn, maxInt(usize));
pub const SIG_DFL = @intToPtr(?Sigaction.sigaction_fn, 0);
pub const SIG_IGN = @intToPtr(?Sigaction.sigaction_fn, 1);
-pub const empty_sigset = [_]u32{0} ** sigset_t.len;
+pub const empty_sigset = [_]u32{0} ** @typeInfo(sigset_t).Array.len;
pub const signalfd_siginfo = extern struct {
signo: u32,
errno: i32,
code: i32,
pid: u32,
- uid: u32,
+ uid: uid_t,
fd: i32,
tid: u32,
band: u32,
@@ -1491,10 +1491,10 @@ pub const Statx = extern struct {
nlink: u32,
/// User ID of owner
- uid: u32,
+ uid: uid_t,
/// Group ID of owner
- gid: u32,
+ gid: gid_t,
/// File type and mode
mode: u16,
diff --git a/lib/std/os/bits/linux/x86_64.zig b/lib/std/os/bits/linux/x86_64.zig
index 0800feeddf..0f01c40813 100644
--- a/lib/std/os/bits/linux/x86_64.zig
+++ b/lib/std/os/bits/linux/x86_64.zig
@@ -7,6 +7,7 @@
const std = @import("../../../std.zig");
const pid_t = linux.pid_t;
const uid_t = linux.uid_t;
+const gid_t = linux.gid_t;
const clock_t = linux.clock_t;
const stack_t = linux.stack_t;
const sigset_t = linux.sigset_t;
@@ -523,8 +524,8 @@ pub const Stat = extern struct {
nlink: usize,
mode: u32,
- uid: u32,
- gid: u32,
+ uid: uid_t,
+ gid: gid_t,
__pad0: u32,
rdev: u64,
size: off_t,
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index 13094b3a3a..8f697fb967 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -655,7 +655,7 @@ pub fn nanosleep(req: *const timespec, rem: ?*timespec) usize {
return syscall2(.nanosleep, @ptrToInt(req), @ptrToInt(rem));
}
-pub fn setuid(uid: u32) usize {
+pub fn setuid(uid: uid_t) usize {
if (@hasField(SYS, "setuid32")) {
return syscall1(.setuid32, uid);
} else {
@@ -663,7 +663,7 @@ pub fn setuid(uid: u32) usize {
}
}
-pub fn setgid(gid: u32) usize {
+pub fn setgid(gid: gid_t) usize {
if (@hasField(SYS, "setgid32")) {
return syscall1(.setgid32, gid);
} else {
@@ -671,7 +671,7 @@ pub fn setgid(gid: u32) usize {
}
}
-pub fn setreuid(ruid: u32, euid: u32) usize {
+pub fn setreuid(ruid: uid_t, euid: uid_t) usize {
if (@hasField(SYS, "setreuid32")) {
return syscall2(.setreuid32, ruid, euid);
} else {
@@ -679,7 +679,7 @@ pub fn setreuid(ruid: u32, euid: u32) usize {
}
}
-pub fn setregid(rgid: u32, egid: u32) usize {
+pub fn setregid(rgid: gid_t, egid: gid_t) usize {
if (@hasField(SYS, "setregid32")) {
return syscall2(.setregid32, rgid, egid);
} else {
@@ -687,47 +687,61 @@ pub fn setregid(rgid: u32, egid: u32) usize {
}
}
-pub fn getuid() u32 {
+pub fn getuid() uid_t {
if (@hasField(SYS, "getuid32")) {
- return @as(u32, syscall0(.getuid32));
+ return @as(uid_t, syscall0(.getuid32));
} else {
- return @as(u32, syscall0(.getuid));
+ return @as(uid_t, syscall0(.getuid));
}
}
-pub fn getgid() u32 {
+pub fn getgid() gid_t {
if (@hasField(SYS, "getgid32")) {
- return @as(u32, syscall0(.getgid32));
+ return @as(gid_t, syscall0(.getgid32));
} else {
- return @as(u32, syscall0(.getgid));
+ return @as(gid_t, syscall0(.getgid));
}
}
-pub fn geteuid() u32 {
+pub fn geteuid() uid_t {
if (@hasField(SYS, "geteuid32")) {
- return @as(u32, syscall0(.geteuid32));
+ return @as(uid_t, syscall0(.geteuid32));
} else {
- return @as(u32, syscall0(.geteuid));
+ return @as(uid_t, syscall0(.geteuid));
}
}
-pub fn getegid() u32 {
+pub fn getegid() gid_t {
if (@hasField(SYS, "getegid32")) {
- return @as(u32, syscall0(.getegid32));
+ return @as(gid_t, syscall0(.getegid32));
} else {
- return @as(u32, syscall0(.getegid));
+ return @as(gid_t, syscall0(.getegid));
}
}
-pub fn seteuid(euid: u32) usize {
- return setreuid(std.math.maxInt(u32), euid);
+pub fn seteuid(euid: uid_t) usize {
+ // We use setresuid here instead of setreuid to ensure that the saved uid
+ // is not changed. This is what musl and recent glibc versions do as well.
+ //
+ // The setresuid(2) man page says that if -1 is passed the corresponding
+ // id will not be changed. Since uid_t is unsigned, this wraps around to the
+ // max value in C.
+ comptime assert(@typeInfo(uid_t) == .Int and !@typeInfo(uid_t).Int.is_signed);
+ return setresuid(std.math.maxInt(uid_t), euid, std.math.maxInt(uid_t));
}
-pub fn setegid(egid: u32) usize {
- return setregid(std.math.maxInt(u32), egid);
+pub fn setegid(egid: gid_t) usize {
+ // We use setresgid here instead of setregid to ensure that the saved uid
+ // is not changed. This is what musl and recent glibc versions do as well.
+ //
+ // The setresgid(2) man page says that if -1 is passed the corresponding
+ // id will not be changed. Since gid_t is unsigned, this wraps around to the
+ // max value in C.
+ comptime assert(@typeInfo(uid_t) == .Int and !@typeInfo(uid_t).Int.is_signed);
+ return setresgid(std.math.maxInt(gid_t), egid, std.math.maxInt(gid_t));
}
-pub fn getresuid(ruid: *u32, euid: *u32, suid: *u32) usize {
+pub fn getresuid(ruid: *uid_t, euid: *uid_t, suid: *uid_t) usize {
if (@hasField(SYS, "getresuid32")) {
return syscall3(.getresuid32, @ptrToInt(ruid), @ptrToInt(euid), @ptrToInt(suid));
} else {
@@ -735,7 +749,7 @@ pub fn getresuid(ruid: *u32, euid: *u32, suid: *u32) usize {
}
}
-pub fn getresgid(rgid: *u32, egid: *u32, sgid: *u32) usize {
+pub fn getresgid(rgid: *gid_t, egid: *gid_t, sgid: *gid_t) usize {
if (@hasField(SYS, "getresgid32")) {
return syscall3(.getresgid32, @ptrToInt(rgid), @ptrToInt(egid), @ptrToInt(sgid));
} else {
@@ -743,7 +757,7 @@ pub fn getresgid(rgid: *u32, egid: *u32, sgid: *u32) usize {
}
}
-pub fn setresuid(ruid: u32, euid: u32, suid: u32) usize {
+pub fn setresuid(ruid: uid_t, euid: uid_t, suid: uid_t) usize {
if (@hasField(SYS, "setresuid32")) {
return syscall3(.setresuid32, ruid, euid, suid);
} else {
@@ -751,7 +765,7 @@ pub fn setresuid(ruid: u32, euid: u32, suid: u32) usize {
}
}
-pub fn setresgid(rgid: u32, egid: u32, sgid: u32) usize {
+pub fn setresgid(rgid: gid_t, egid: gid_t, sgid: gid_t) usize {
if (@hasField(SYS, "setresgid32")) {
return syscall3(.setresgid32, rgid, egid, sgid);
} else {
@@ -759,7 +773,7 @@ pub fn setresgid(rgid: u32, egid: u32, sgid: u32) usize {
}
}
-pub fn getgroups(size: usize, list: *u32) usize {
+pub fn getgroups(size: usize, list: *gid_t) usize {
if (@hasField(SYS, "getgroups32")) {
return syscall2(.getgroups32, size, @ptrToInt(list));
} else {
@@ -767,7 +781,7 @@ pub fn getgroups(size: usize, list: *u32) usize {
}
}
-pub fn setgroups(size: usize, list: *const u32) usize {
+pub fn setgroups(size: usize, list: *const gid_t) usize {
if (@hasField(SYS, "setgroups32")) {
return syscall2(.setgroups32, size, @ptrToInt(list));
} else {
@@ -815,17 +829,19 @@ pub fn sigaction(sig: u6, noalias act: *const Sigaction, noalias oact: ?*Sigacti
return 0;
}
+const usize_bits = @typeInfo(usize).Int.bits;
+
pub fn sigaddset(set: *sigset_t, sig: u6) void {
const s = sig - 1;
// shift in musl: s&8*sizeof *set->__bits-1
- const shift = @intCast(u5, s & (usize.bit_count - 1));
+ const shift = @intCast(u5, s & (usize_bits - 1));
const val = @intCast(u32, 1) << shift;
- (set.*)[@intCast(usize, s) / usize.bit_count] |= val;
+ (set.*)[@intCast(usize, s) / usize_bits] |= val;
}
pub fn sigismember(set: *const sigset_t, sig: u6) bool {
const s = sig - 1;
- return ((set.*)[@intCast(usize, s) / usize.bit_count] & (@intCast(usize, 1) << (s & (usize.bit_count - 1)))) != 0;
+ return ((set.*)[@intCast(usize, s) / usize_bits] & (@intCast(usize, 1) << (s & (usize_bits - 1)))) != 0;
}
pub fn getsockname(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
@@ -1226,6 +1242,22 @@ pub fn bpf(cmd: BPF.Cmd, attr: *BPF.Attr, size: u32) usize {
return syscall3(.bpf, @enumToInt(cmd), @ptrToInt(attr), size);
}
+pub fn sync() void {
+ _ = syscall0(.sync);
+}
+
+pub fn syncfs(fd: fd_t) usize {
+ return syscall1(.syncfs, @bitCast(usize, @as(isize, fd)));
+}
+
+pub fn fsync(fd: fd_t) usize {
+ return syscall1(.fsync, @bitCast(usize, @as(isize, fd)));
+}
+
+pub fn fdatasync(fd: fd_t) usize {
+ return syscall1(.fdatasync, @bitCast(usize, @as(isize, fd)));
+}
+
test "" {
if (builtin.os.tag == .linux) {
_ = @import("linux/test.zig");
diff --git a/lib/std/os/linux/bpf.zig b/lib/std/os/linux/bpf.zig
index 928c157c42..7c0dd0bb19 100644
--- a/lib/std/os/linux/bpf.zig
+++ b/lib/std/os/linux/bpf.zig
@@ -3,9 +3,13 @@
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
-usingnamespace std.os;
+usingnamespace std.os.linux;
const std = @import("../../std.zig");
+const errno = getErrno;
+const unexpectedErrno = std.os.unexpectedErrno;
const expectEqual = std.testing.expectEqual;
+const expectError = std.testing.expectError;
+const expect = std.testing.expect;
// instruction classes
pub const LD = 0x00;
@@ -62,6 +66,7 @@ pub const MAXINSNS = 4096;
// instruction classes
/// jmp mode in word width
pub const JMP32 = 0x06;
+
/// alu mode in double word width
pub const ALU64 = 0x07;
@@ -72,14 +77,17 @@ pub const XADD = 0xc0;
// alu/jmp fields
/// mov reg to reg
pub const MOV = 0xb0;
+
/// sign extending arithmetic shift right */
pub const ARSH = 0xc0;
// change endianness of a register
/// flags for endianness conversion:
pub const END = 0xd0;
+
/// convert to little-endian */
pub const TO_LE = 0x00;
+
/// convert to big-endian
pub const TO_BE = 0x08;
pub const FROM_LE = TO_LE;
@@ -88,29 +96,39 @@ pub const FROM_BE = TO_BE;
// jmp encodings
/// jump != *
pub const JNE = 0x50;
+
/// LT is unsigned, '<'
pub const JLT = 0xa0;
+
/// LE is unsigned, '<=' *
pub const JLE = 0xb0;
+
/// SGT is signed '>', GT in x86
pub const JSGT = 0x60;
+
/// SGE is signed '>=', GE in x86
pub const JSGE = 0x70;
+
/// SLT is signed, '<'
pub const JSLT = 0xc0;
+
/// SLE is signed, '<='
pub const JSLE = 0xd0;
+
/// function call
pub const CALL = 0x80;
+
/// function return
pub const EXIT = 0x90;
/// Flag for prog_attach command. If a sub-cgroup installs some bpf program, the
/// program in this cgroup yields to sub-cgroup program.
pub const F_ALLOW_OVERRIDE = 0x1;
+
/// Flag for prog_attach command. If a sub-cgroup installs some bpf program,
/// that cgroup program gets run in addition to the program in this cgroup.
pub const F_ALLOW_MULTI = 0x2;
+
/// Flag for prog_attach command.
pub const F_REPLACE = 0x4;
@@ -164,47 +182,61 @@ pub const PSEUDO_CALL = 1;
/// flag for BPF_MAP_UPDATE_ELEM command. create new element or update existing
pub const ANY = 0;
+
/// flag for BPF_MAP_UPDATE_ELEM command. create new element if it didn't exist
pub const NOEXIST = 1;
+
/// flag for BPF_MAP_UPDATE_ELEM command. update existing element
pub const EXIST = 2;
+
/// flag for BPF_MAP_UPDATE_ELEM command. spin_lock-ed map_lookup/map_update
pub const F_LOCK = 4;
/// flag for BPF_MAP_CREATE command */
pub const BPF_F_NO_PREALLOC = 0x1;
+
/// flag for BPF_MAP_CREATE command. Instead of having one common LRU list in
/// the BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list which can
/// scale and perform better. Note, the LRU nodes (including free nodes) cannot
/// be moved across different LRU lists.
pub const BPF_F_NO_COMMON_LRU = 0x2;
+
/// flag for BPF_MAP_CREATE command. Specify numa node during map creation
pub const BPF_F_NUMA_NODE = 0x4;
+
/// flag for BPF_MAP_CREATE command. Flags for BPF object read access from
/// syscall side
pub const BPF_F_RDONLY = 0x8;
+
/// flag for BPF_MAP_CREATE command. Flags for BPF object write access from
/// syscall side
pub const BPF_F_WRONLY = 0x10;
+
/// flag for BPF_MAP_CREATE command. Flag for stack_map, store build_id+offset
/// instead of pointer
pub const BPF_F_STACK_BUILD_ID = 0x20;
+
/// flag for BPF_MAP_CREATE command. Zero-initialize hash function seed. This
/// should only be used for testing.
pub const BPF_F_ZERO_SEED = 0x40;
+
/// flag for BPF_MAP_CREATE command Flags for accessing BPF object from program
/// side.
pub const BPF_F_RDONLY_PROG = 0x80;
+
/// flag for BPF_MAP_CREATE command. Flags for accessing BPF object from program
/// side.
pub const BPF_F_WRONLY_PROG = 0x100;
+
/// flag for BPF_MAP_CREATE command. Clone map from listener for newly accepted
/// socket
pub const BPF_F_CLONE = 0x200;
+
/// flag for BPF_MAP_CREATE command. Enable memory-mapping BPF map
pub const BPF_F_MMAPABLE = 0x400;
-/// These values correspond to "syscalls" within the BPF program's environment
+/// These values correspond to "syscalls" within the BPF program's environment,
+/// each one is documented in std.os.linux.BPF.kern
pub const Helper = enum(i32) {
unspec,
map_lookup_elem,
@@ -325,9 +357,34 @@ pub const Helper = enum(i32) {
tcp_send_ack,
send_signal_thread,
jiffies64,
+ read_branch_records,
+ get_ns_current_pid_tgid,
+ xdp_output,
+ get_netns_cookie,
+ get_current_ancestor_cgroup_id,
+ sk_assign,
+ ktime_get_boot_ns,
+ seq_printf,
+ seq_write,
+ sk_cgroup_id,
+ sk_ancestor_cgroup_id,
+ ringbuf_output,
+ ringbuf_reserve,
+ ringbuf_submit,
+ ringbuf_discard,
+ ringbuf_query,
+ csum_level,
+ skc_to_tcp6_sock,
+ skc_to_tcp_sock,
+ skc_to_tcp_timewait_sock,
+ skc_to_tcp_request_sock,
+ skc_to_udp6_sock,
+ get_task_stack,
_,
};
+// TODO: determine that this is the expected bit layout for both little and big
+// endian systems
/// a single BPF instruction
pub const Insn = packed struct {
code: u8,
@@ -340,19 +397,30 @@ pub const Insn = packed struct {
/// frame
pub const Reg = packed enum(u4) { r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10 };
const Source = packed enum(u1) { reg, imm };
+
+ const Mode = packed enum(u8) {
+ imm = IMM,
+ abs = ABS,
+ ind = IND,
+ mem = MEM,
+ len = LEN,
+ msh = MSH,
+ };
+
const AluOp = packed enum(u8) {
add = ADD,
sub = SUB,
mul = MUL,
div = DIV,
- op_or = OR,
- op_and = AND,
+ alu_or = OR,
+ alu_and = AND,
lsh = LSH,
rsh = RSH,
neg = NEG,
mod = MOD,
xor = XOR,
mov = MOV,
+ arsh = ARSH,
};
pub const Size = packed enum(u8) {
@@ -368,6 +436,13 @@ pub const Insn = packed struct {
jgt = JGT,
jge = JGE,
jset = JSET,
+ jlt = JLT,
+ jle = JLE,
+ jne = JNE,
+ jsgt = JSGT,
+ jsge = JSGE,
+ jslt = JSLT,
+ jsle = JSLE,
};
const ImmOrReg = union(Source) {
@@ -419,22 +494,100 @@ pub const Insn = packed struct {
return alu(64, .add, dst, src);
}
+ pub fn sub(dst: Reg, src: anytype) Insn {
+ return alu(64, .sub, dst, src);
+ }
+
+ pub fn mul(dst: Reg, src: anytype) Insn {
+ return alu(64, .mul, dst, src);
+ }
+
+ pub fn div(dst: Reg, src: anytype) Insn {
+ return alu(64, .div, dst, src);
+ }
+
+ pub fn alu_or(dst: Reg, src: anytype) Insn {
+ return alu(64, .alu_or, dst, src);
+ }
+
+ pub fn alu_and(dst: Reg, src: anytype) Insn {
+ return alu(64, .alu_and, dst, src);
+ }
+
+ pub fn lsh(dst: Reg, src: anytype) Insn {
+ return alu(64, .lsh, dst, src);
+ }
+
+ pub fn rsh(dst: Reg, src: anytype) Insn {
+ return alu(64, .rsh, dst, src);
+ }
+
+ pub fn neg(dst: Reg) Insn {
+ return alu(64, .neg, dst, 0);
+ }
+
+ pub fn mod(dst: Reg, src: anytype) Insn {
+ return alu(64, .mod, dst, src);
+ }
+
+ pub fn xor(dst: Reg, src: anytype) Insn {
+ return alu(64, .xor, dst, src);
+ }
+
+ pub fn arsh(dst: Reg, src: anytype) Insn {
+ return alu(64, .arsh, dst, src);
+ }
+
fn jmp(op: JmpOp, dst: Reg, src: anytype, off: i16) Insn {
return imm_reg(JMP | @enumToInt(op), dst, src, off);
}
+ pub fn ja(off: i16) Insn {
+ return jmp(.ja, .r0, 0, off);
+ }
+
pub fn jeq(dst: Reg, src: anytype, off: i16) Insn {
return jmp(.jeq, dst, src, off);
}
- pub fn stx_mem(size: Size, dst: Reg, src: Reg, off: i16) Insn {
- return Insn{
- .code = STX | @enumToInt(size) | MEM,
- .dst = @enumToInt(dst),
- .src = @enumToInt(src),
- .off = off,
- .imm = 0,
- };
+ pub fn jgt(dst: Reg, src: anytype, off: i16) Insn {
+ return jmp(.jgt, dst, src, off);
+ }
+
+ pub fn jge(dst: Reg, src: anytype, off: i16) Insn {
+ return jmp(.jge, dst, src, off);
+ }
+
+ pub fn jlt(dst: Reg, src: anytype, off: i16) Insn {
+ return jmp(.jlt, dst, src, off);
+ }
+
+ pub fn jle(dst: Reg, src: anytype, off: i16) Insn {
+ return jmp(.jle, dst, src, off);
+ }
+
+ pub fn jset(dst: Reg, src: anytype, off: i16) Insn {
+ return jmp(.jset, dst, src, off);
+ }
+
+ pub fn jne(dst: Reg, src: anytype, off: i16) Insn {
+ return jmp(.jne, dst, src, off);
+ }
+
+ pub fn jsgt(dst: Reg, src: anytype, off: i16) Insn {
+ return jmp(.jsgt, dst, src, off);
+ }
+
+ pub fn jsge(dst: Reg, src: anytype, off: i16) Insn {
+ return jmp(.jsge, dst, src, off);
+ }
+
+ pub fn jslt(dst: Reg, src: anytype, off: i16) Insn {
+ return jmp(.jslt, dst, src, off);
+ }
+
+ pub fn jsle(dst: Reg, src: anytype, off: i16) Insn {
+ return jmp(.jsle, dst, src, off);
}
pub fn xadd(dst: Reg, src: Reg) Insn {
@@ -447,17 +600,34 @@ pub const Insn = packed struct {
};
}
- /// direct packet access, R0 = *(uint *)(skb->data + imm32)
- pub fn ld_abs(size: Size, imm: i32) Insn {
+ fn ld(mode: Mode, size: Size, dst: Reg, src: Reg, imm: i32) Insn {
return Insn{
- .code = LD | @enumToInt(size) | ABS,
- .dst = 0,
- .src = 0,
+ .code = @enumToInt(mode) | @enumToInt(size) | LD,
+ .dst = @enumToInt(dst),
+ .src = @enumToInt(src),
.off = 0,
.imm = imm,
};
}
+ pub fn ld_abs(size: Size, dst: Reg, src: Reg, imm: i32) Insn {
+ return ld(.abs, size, dst, src, imm);
+ }
+
+ pub fn ld_ind(size: Size, dst: Reg, src: Reg, imm: i32) Insn {
+ return ld(.ind, size, dst, src, imm);
+ }
+
+ pub fn ldx(size: Size, dst: Reg, src: Reg, off: i16) Insn {
+ return Insn{
+ .code = MEM | @enumToInt(size) | LDX,
+ .dst = @enumToInt(dst),
+ .src = @enumToInt(src),
+ .off = off,
+ .imm = 0,
+ };
+ }
+
fn ld_imm_impl1(dst: Reg, src: Reg, imm: u64) Insn {
return Insn{
.code = LD | DW | IMM,
@@ -478,6 +648,14 @@ pub const Insn = packed struct {
};
}
+ pub fn ld_dw1(dst: Reg, imm: u64) Insn {
+ return ld_imm_impl1(dst, .r0, imm);
+ }
+
+ pub fn ld_dw2(imm: u64) Insn {
+ return ld_imm_impl2(imm);
+ }
+
pub fn ld_map_fd1(dst: Reg, map_fd: fd_t) Insn {
return ld_imm_impl1(dst, @intToEnum(Reg, PSEUDO_MAP_FD), @intCast(u64, map_fd));
}
@@ -486,6 +664,53 @@ pub const Insn = packed struct {
return ld_imm_impl2(@intCast(u64, map_fd));
}
+ pub fn st(comptime size: Size, dst: Reg, off: i16, imm: i32) Insn {
+ if (size == .double_word) @compileError("TODO: need to determine how to correctly handle double words");
+ return Insn{
+ .code = MEM | @enumToInt(size) | ST,
+ .dst = @enumToInt(dst),
+ .src = 0,
+ .off = off,
+ .imm = imm,
+ };
+ }
+
+ pub fn stx(size: Size, dst: Reg, off: i16, src: Reg) Insn {
+ return Insn{
+ .code = MEM | @enumToInt(size) | STX,
+ .dst = @enumToInt(dst),
+ .src = @enumToInt(src),
+ .off = off,
+ .imm = 0,
+ };
+ }
+
+ fn endian_swap(endian: std.builtin.Endian, comptime size: Size, dst: Reg) Insn {
+ return Insn{
+ .code = switch (endian) {
+ .Big => 0xdc,
+ .Little => 0xd4,
+ },
+ .dst = @enumToInt(dst),
+ .src = 0,
+ .off = 0,
+ .imm = switch (size) {
+ .byte => @compileError("can't swap a single byte"),
+ .half_word => 16,
+ .word => 32,
+ .double_word => 64,
+ },
+ };
+ }
+
+ pub fn le(comptime size: Size, dst: Reg) Insn {
+ return endian_swap(.Little, size, dst);
+ }
+
+ pub fn be(comptime size: Size, dst: Reg) Insn {
+ return endian_swap(.Big, size, dst);
+ }
+
pub fn call(helper: Helper) Insn {
return Insn{
.code = JMP | CALL,
@@ -508,95 +733,242 @@ pub const Insn = packed struct {
}
};
-fn expect_insn(insn: Insn, val: u64) void {
- expectEqual(@bitCast(u64, insn), val);
-}
-
test "insn bitsize" {
expectEqual(@bitSizeOf(Insn), 64);
}
-// mov instructions
-test "mov imm" {
- expect_insn(Insn.mov(.r1, 1), 0x00000001000001b7);
-}
-
-test "mov reg" {
- expect_insn(Insn.mov(.r6, .r1), 0x00000000000016bf);
-}
-
-// alu instructions
-test "add imm" {
- expect_insn(Insn.add(.r2, -4), 0xfffffffc00000207);
+fn expect_opcode(code: u8, insn: Insn) void {
+ expectEqual(code, insn.code);
}
-// ld instructions
-test "ld_abs" {
- expect_insn(Insn.ld_abs(.byte, 42), 0x0000002a00000030);
-}
-
-test "ld_map_fd" {
- expect_insn(Insn.ld_map_fd1(.r1, 42), 0x0000002a00001118);
- expect_insn(Insn.ld_map_fd2(42), 0x0000000000000000);
-}
-
-// st instructions
-test "stx_mem" {
- expect_insn(Insn.stx_mem(.word, .r10, .r0, -4), 0x00000000fffc0a63);
-}
-
-test "xadd" {
- expect_insn(Insn.xadd(.r0, .r1), 0x00000000000010db);
-}
-
-// jmp instructions
-test "jeq imm" {
- expect_insn(Insn.jeq(.r0, 0, 2), 0x0000000000020015);
-}
-
-// other instructions
-test "call" {
- expect_insn(Insn.call(.map_lookup_elem), 0x0000000100000085);
-}
-
-test "exit" {
- expect_insn(Insn.exit(), 0x0000000000000095);
+// The opcodes were grabbed from https://github.com/iovisor/bpf-docs/blob/master/eBPF.md
+test "opcodes" {
+ // instructions that have a name that end with 1 or 2 are consecutive for
+ // loading 64-bit immediates (imm is only 32 bits wide)
+
+ // alu instructions
+ expect_opcode(0x07, Insn.add(.r1, 0));
+ expect_opcode(0x0f, Insn.add(.r1, .r2));
+ expect_opcode(0x17, Insn.sub(.r1, 0));
+ expect_opcode(0x1f, Insn.sub(.r1, .r2));
+ expect_opcode(0x27, Insn.mul(.r1, 0));
+ expect_opcode(0x2f, Insn.mul(.r1, .r2));
+ expect_opcode(0x37, Insn.div(.r1, 0));
+ expect_opcode(0x3f, Insn.div(.r1, .r2));
+ expect_opcode(0x47, Insn.alu_or(.r1, 0));
+ expect_opcode(0x4f, Insn.alu_or(.r1, .r2));
+ expect_opcode(0x57, Insn.alu_and(.r1, 0));
+ expect_opcode(0x5f, Insn.alu_and(.r1, .r2));
+ expect_opcode(0x67, Insn.lsh(.r1, 0));
+ expect_opcode(0x6f, Insn.lsh(.r1, .r2));
+ expect_opcode(0x77, Insn.rsh(.r1, 0));
+ expect_opcode(0x7f, Insn.rsh(.r1, .r2));
+ expect_opcode(0x87, Insn.neg(.r1));
+ expect_opcode(0x97, Insn.mod(.r1, 0));
+ expect_opcode(0x9f, Insn.mod(.r1, .r2));
+ expect_opcode(0xa7, Insn.xor(.r1, 0));
+ expect_opcode(0xaf, Insn.xor(.r1, .r2));
+ expect_opcode(0xb7, Insn.mov(.r1, 0));
+ expect_opcode(0xbf, Insn.mov(.r1, .r2));
+ expect_opcode(0xc7, Insn.arsh(.r1, 0));
+ expect_opcode(0xcf, Insn.arsh(.r1, .r2));
+
+ // atomic instructions: might be more of these not documented in the wild
+ expect_opcode(0xdb, Insn.xadd(.r1, .r2));
+
+ // TODO: byteswap instructions
+ expect_opcode(0xd4, Insn.le(.half_word, .r1));
+ expectEqual(@intCast(i32, 16), Insn.le(.half_word, .r1).imm);
+ expect_opcode(0xd4, Insn.le(.word, .r1));
+ expectEqual(@intCast(i32, 32), Insn.le(.word, .r1).imm);
+ expect_opcode(0xd4, Insn.le(.double_word, .r1));
+ expectEqual(@intCast(i32, 64), Insn.le(.double_word, .r1).imm);
+ expect_opcode(0xdc, Insn.be(.half_word, .r1));
+ expectEqual(@intCast(i32, 16), Insn.be(.half_word, .r1).imm);
+ expect_opcode(0xdc, Insn.be(.word, .r1));
+ expectEqual(@intCast(i32, 32), Insn.be(.word, .r1).imm);
+ expect_opcode(0xdc, Insn.be(.double_word, .r1));
+ expectEqual(@intCast(i32, 64), Insn.be(.double_word, .r1).imm);
+
+ // memory instructions
+ expect_opcode(0x18, Insn.ld_dw1(.r1, 0));
+ expect_opcode(0x00, Insn.ld_dw2(0));
+
+ // loading a map fd
+ expect_opcode(0x18, Insn.ld_map_fd1(.r1, 0));
+ expectEqual(@intCast(u4, PSEUDO_MAP_FD), Insn.ld_map_fd1(.r1, 0).src);
+ expect_opcode(0x00, Insn.ld_map_fd2(0));
+
+ expect_opcode(0x38, Insn.ld_abs(.double_word, .r1, .r2, 0));
+ expect_opcode(0x20, Insn.ld_abs(.word, .r1, .r2, 0));
+ expect_opcode(0x28, Insn.ld_abs(.half_word, .r1, .r2, 0));
+ expect_opcode(0x30, Insn.ld_abs(.byte, .r1, .r2, 0));
+
+ expect_opcode(0x58, Insn.ld_ind(.double_word, .r1, .r2, 0));
+ expect_opcode(0x40, Insn.ld_ind(.word, .r1, .r2, 0));
+ expect_opcode(0x48, Insn.ld_ind(.half_word, .r1, .r2, 0));
+ expect_opcode(0x50, Insn.ld_ind(.byte, .r1, .r2, 0));
+
+ expect_opcode(0x79, Insn.ldx(.double_word, .r1, .r2, 0));
+ expect_opcode(0x61, Insn.ldx(.word, .r1, .r2, 0));
+ expect_opcode(0x69, Insn.ldx(.half_word, .r1, .r2, 0));
+ expect_opcode(0x71, Insn.ldx(.byte, .r1, .r2, 0));
+
+ expect_opcode(0x62, Insn.st(.word, .r1, 0, 0));
+ expect_opcode(0x6a, Insn.st(.half_word, .r1, 0, 0));
+ expect_opcode(0x72, Insn.st(.byte, .r1, 0, 0));
+
+ expect_opcode(0x63, Insn.stx(.word, .r1, 0, .r2));
+ expect_opcode(0x6b, Insn.stx(.half_word, .r1, 0, .r2));
+ expect_opcode(0x73, Insn.stx(.byte, .r1, 0, .r2));
+ expect_opcode(0x7b, Insn.stx(.double_word, .r1, 0, .r2));
+
+ // branch instructions
+ expect_opcode(0x05, Insn.ja(0));
+ expect_opcode(0x15, Insn.jeq(.r1, 0, 0));
+ expect_opcode(0x1d, Insn.jeq(.r1, .r2, 0));
+ expect_opcode(0x25, Insn.jgt(.r1, 0, 0));
+ expect_opcode(0x2d, Insn.jgt(.r1, .r2, 0));
+ expect_opcode(0x35, Insn.jge(.r1, 0, 0));
+ expect_opcode(0x3d, Insn.jge(.r1, .r2, 0));
+ expect_opcode(0xa5, Insn.jlt(.r1, 0, 0));
+ expect_opcode(0xad, Insn.jlt(.r1, .r2, 0));
+ expect_opcode(0xb5, Insn.jle(.r1, 0, 0));
+ expect_opcode(0xbd, Insn.jle(.r1, .r2, 0));
+ expect_opcode(0x45, Insn.jset(.r1, 0, 0));
+ expect_opcode(0x4d, Insn.jset(.r1, .r2, 0));
+ expect_opcode(0x55, Insn.jne(.r1, 0, 0));
+ expect_opcode(0x5d, Insn.jne(.r1, .r2, 0));
+ expect_opcode(0x65, Insn.jsgt(.r1, 0, 0));
+ expect_opcode(0x6d, Insn.jsgt(.r1, .r2, 0));
+ expect_opcode(0x75, Insn.jsge(.r1, 0, 0));
+ expect_opcode(0x7d, Insn.jsge(.r1, .r2, 0));
+ expect_opcode(0xc5, Insn.jslt(.r1, 0, 0));
+ expect_opcode(0xcd, Insn.jslt(.r1, .r2, 0));
+ expect_opcode(0xd5, Insn.jsle(.r1, 0, 0));
+ expect_opcode(0xdd, Insn.jsle(.r1, .r2, 0));
+ expect_opcode(0x85, Insn.call(.unspec));
+ expect_opcode(0x95, Insn.exit());
}
pub const Cmd = extern enum(usize) {
+ /// Create a map and return a file descriptor that refers to the map. The
+ /// close-on-exec file descriptor flag is automatically enabled for the new
+ /// file descriptor.
+ ///
+ /// uses MapCreateAttr
map_create,
+
+ /// Look up an element by key in a specified map and return its value.
+ ///
+ /// uses MapElemAttr
map_lookup_elem,
+
+ /// Create or update an element (key/value pair) in a specified map.
+ ///
+ /// uses MapElemAttr
map_update_elem,
+
+ /// Look up and delete an element by key in a specified map.
+ ///
+ /// uses MapElemAttr
map_delete_elem,
+
+ /// Look up an element by key in a specified map and return the key of the
+ /// next element.
map_get_next_key,
+
+ /// Verify and load an eBPF program, returning a new file descriptor
+ /// associated with the program. The close-on-exec file descriptor flag
+ /// is automatically enabled for the new file descriptor.
+ ///
+ /// uses ProgLoadAttr
prog_load,
+
+ /// Pin a map or eBPF program to a path within the minimal BPF filesystem
+ ///
+ /// uses ObjAttr
obj_pin,
+
+ /// Get the file descriptor of a BPF object pinned to a certain path
+ ///
+ /// uses ObjAttr
obj_get,
+
+ /// uses ProgAttachAttr
prog_attach,
+
+ /// uses ProgAttachAttr
prog_detach,
+
+ /// uses TestRunAttr
prog_test_run,
+
+ /// uses GetIdAttr
prog_get_next_id,
+
+ /// uses GetIdAttr
map_get_next_id,
+
+ /// uses GetIdAttr
prog_get_fd_by_id,
+
+ /// uses GetIdAttr
map_get_fd_by_id,
+
+ /// uses InfoAttr
obj_get_info_by_fd,
+
+ /// uses QueryAttr
prog_query,
+
+ /// uses RawTracepointAttr
raw_tracepoint_open,
+
+ /// uses BtfLoadAttr
btf_load,
+
+ /// uses GetIdAttr
btf_get_fd_by_id,
+
+ /// uses TaskFdQueryAttr
task_fd_query,
+
+ /// uses MapElemAttr
map_lookup_and_delete_elem,
map_freeze,
+
+ /// uses GetIdAttr
btf_get_next_id,
+
+ /// uses MapBatchAttr
map_lookup_batch,
+
+ /// uses MapBatchAttr
map_lookup_and_delete_batch,
+
+ /// uses MapBatchAttr
map_update_batch,
+
+ /// uses MapBatchAttr
map_delete_batch,
+
+ /// uses LinkCreateAttr
link_create,
+
+ /// uses LinkUpdateAttr
link_update,
+
+ /// uses GetIdAttr
link_get_fd_by_id,
+
+ /// uses GetIdAttr
link_get_next_id,
+
+ /// uses EnableStatsAttr
enable_stats,
+
+ /// uses IterCreateAttr
iter_create,
link_detach,
_,
@@ -630,42 +1002,138 @@ pub const MapType = extern enum(u32) {
sk_storage,
devmap_hash,
struct_ops,
+
+ /// An ordered and shared CPU version of perf_event_array. They have
+ /// similar semantics:
+ /// - variable length records
+ /// - no blocking: when full, reservation fails
+ /// - memory mappable for ease and speed
+ /// - epoll notifications for new data, but can busy poll
+ ///
+ /// Ringbufs give BPF programs two sets of APIs:
+ /// - ringbuf_output() allows copy data from one place to a ring
+ /// buffer, similar to bpf_perf_event_output()
+ /// - ringbuf_reserve()/ringbuf_commit()/ringbuf_discard() split the
+ /// process into two steps. First a fixed amount of space is reserved,
+ /// if that is successful then the program gets a pointer to a chunk of
+ /// memory and can be submitted with commit() or discarded with
+ /// discard()
+ ///
+ /// ringbuf_output() will incurr an extra memory copy, but allows to submit
+ /// records of the length that's not known beforehand, and is an easy
+ /// replacement for perf_event_outptu().
+ ///
+ /// ringbuf_reserve() avoids the extra memory copy but requires a known size
+ /// of memory beforehand.
+ ///
+ /// ringbuf_query() allows to query properties of the map, 4 are currently
+ /// supported:
+ /// - BPF_RB_AVAIL_DATA: amount of unconsumed data in ringbuf
+ /// - BPF_RB_RING_SIZE: returns size of ringbuf
+ /// - BPF_RB_CONS_POS/BPF_RB_PROD_POS returns current logical position
+ /// of consumer and producer respectively
+ ///
+ /// key size: 0
+ /// value size: 0
+ /// max entries: size of ringbuf, must be power of 2
ringbuf,
+
_,
};
pub const ProgType = extern enum(u32) {
unspec,
+
+ /// context type: __sk_buff
socket_filter,
+
+ /// context type: bpf_user_pt_regs_t
kprobe,
+
+ /// context type: __sk_buff
sched_cls,
+
+ /// context type: __sk_buff
sched_act,
+
+ /// context type: u64
tracepoint,
+
+ /// context type: xdp_md
xdp,
+
+ /// context type: bpf_perf_event_data
perf_event,
+
+ /// context type: __sk_buff
cgroup_skb,
+
+ /// context type: bpf_sock
cgroup_sock,
+
+ /// context type: __sk_buff
lwt_in,
+
+ /// context type: __sk_buff
lwt_out,
+
+ /// context type: __sk_buff
lwt_xmit,
+
+ /// context type: bpf_sock_ops
sock_ops,
+
+ /// context type: __sk_buff
sk_skb,
+
+ /// context type: bpf_cgroup_dev_ctx
cgroup_device,
+
+ /// context type: sk_msg_md
sk_msg,
+
+ /// context type: bpf_raw_tracepoint_args
raw_tracepoint,
+
+ /// context type: bpf_sock_addr
cgroup_sock_addr,
+
+ /// context type: __sk_buff
lwt_seg6local,
+
+ /// context type: u32
lirc_mode2,
+
+ /// context type: sk_reuseport_md
sk_reuseport,
+
+ /// context type: __sk_buff
flow_dissector,
+
+ /// context type: bpf_sysctl
cgroup_sysctl,
+
+ /// context type: bpf_raw_tracepoint_args
raw_tracepoint_writable,
+
+ /// context type: bpf_sockopt
cgroup_sockopt,
+
+ /// context type: void *
tracing,
+
+ /// context type: void *
struct_ops,
+
+ /// context type: void *
ext,
+
+ /// context type: void *
lsm,
+
+ /// context type: bpf_sk_lookup
sk_lookup,
+ _,
};
pub const AttachType = extern enum(u32) {
@@ -715,27 +1183,38 @@ const obj_name_len = 16;
pub const MapCreateAttr = extern struct {
/// one of MapType
map_type: u32,
+
/// size of key in bytes
key_size: u32,
+
/// size of value in bytes
value_size: u32,
+
/// max number of entries in a map
max_entries: u32,
+
/// .map_create related flags
map_flags: u32,
+
/// fd pointing to the inner map
inner_map_fd: fd_t,
+
/// numa node (effective only if MapCreateFlags.numa_node is set)
numa_node: u32,
map_name: [obj_name_len]u8,
+
/// ifindex of netdev to create on
map_ifindex: u32,
+
/// fd pointing to a BTF type data
btf_fd: fd_t,
+
/// BTF type_id of the key
btf_key_type_id: u32,
+
/// BTF type_id of the value
bpf_value_type_id: u32,
+
/// BTF type_id of a kernel struct stored as the map value
btf_vmlinux_value_type_id: u32,
};
@@ -755,10 +1234,12 @@ pub const MapElemAttr = extern struct {
pub const MapBatchAttr = extern struct {
/// start batch, NULL to start from beginning
in_batch: u64,
+
/// output: next start batch
out_batch: u64,
keys: u64,
values: u64,
+
/// input/output:
/// input: # of key/value elements
/// output: # of filled elements
@@ -775,35 +1256,49 @@ pub const ProgLoadAttr = extern struct {
insn_cnt: u32,
insns: u64,
license: u64,
+
/// verbosity level of verifier
log_level: u32,
+
/// size of user buffer
log_size: u32,
+
/// user supplied buffer
log_buf: u64,
+
/// not used
kern_version: u32,
prog_flags: u32,
prog_name: [obj_name_len]u8,
- /// ifindex of netdev to prep for. For some prog types expected attach
- /// type must be known at load time to verify attach type specific parts
- /// of prog (context accesses, allowed helpers, etc).
+
+ /// ifindex of netdev to prep for.
prog_ifindex: u32,
+
+ /// For some prog types expected attach type must be known at load time to
+ /// verify attach type specific parts of prog (context accesses, allowed
+ /// helpers, etc).
expected_attach_type: u32,
+
/// fd pointing to BTF type data
prog_btf_fd: fd_t,
+
/// userspace bpf_func_info size
func_info_rec_size: u32,
func_info: u64,
+
/// number of bpf_func_info records
func_info_cnt: u32,
+
/// userspace bpf_line_info size
line_info_rec_size: u32,
line_info: u64,
+
/// number of bpf_line_info records
line_info_cnt: u32,
+
/// in-kernel BTF type id to attach to
attact_btf_id: u32,
+
/// 0 to attach to vmlinux
attach_prog_id: u32,
};
@@ -819,29 +1314,36 @@ pub const ObjAttr = extern struct {
pub const ProgAttachAttr = extern struct {
/// container object to attach to
target_fd: fd_t,
+
/// eBPF program to attach
attach_bpf_fd: fd_t,
+
attach_type: u32,
attach_flags: u32,
+
// TODO: BPF_F_REPLACE flags
/// previously attached eBPF program to replace if .replace is used
replace_bpf_fd: fd_t,
};
/// struct used by Cmd.prog_test_run command
-pub const TestAttr = extern struct {
+pub const TestRunAttr = extern struct {
prog_fd: fd_t,
retval: u32,
+
/// input: len of data_in
data_size_in: u32,
+
/// input/output: len of data_out. returns ENOSPC if data_out is too small.
data_size_out: u32,
data_in: u64,
data_out: u64,
repeat: u32,
duration: u32,
+
/// input: len of ctx_in
ctx_size_in: u32,
+
/// input/output: len of ctx_out. returns ENOSPC if ctx_out is too small.
ctx_size_out: u32,
ctx_in: u64,
@@ -894,26 +1396,35 @@ pub const BtfLoadAttr = extern struct {
btf_log_level: u32,
};
+/// struct used by Cmd.task_fd_query
pub const TaskFdQueryAttr = extern struct {
/// input: pid
pid: pid_t,
+
/// input: fd
fd: fd_t,
+
/// input: flags
flags: u32,
+
/// input/output: buf len
buf_len: u32,
+
/// input/output:
/// tp_name for tracepoint
/// symbol for kprobe
/// filename for uprobe
buf: u64,
+
/// output: prod_id
prog_id: u32,
+
/// output: BPF_FD_TYPE
fd_type: u32,
+
/// output: probe_offset
probe_offset: u64,
+
/// output: probe_addr
probe_addr: u64,
};
@@ -922,9 +1433,11 @@ pub const TaskFdQueryAttr = extern struct {
pub const LinkCreateAttr = extern struct {
/// eBPF program to attach
prog_fd: fd_t,
+
/// object to attach to
target_fd: fd_t,
attach_type: u32,
+
/// extra flags
flags: u32,
};
@@ -932,10 +1445,13 @@ pub const LinkCreateAttr = extern struct {
/// struct used by Cmd.link_update command
pub const LinkUpdateAttr = extern struct {
link_fd: fd_t,
+
/// new program to update link with
new_prog_fd: fd_t,
+
/// extra flags
flags: u32,
+
/// expected link's program fd, it is specified only if BPF_F_REPLACE is
/// set in flags
old_prog_fd: fd_t,
@@ -952,6 +1468,7 @@ pub const IterCreateAttr = extern struct {
flags: u32,
};
+/// Mega struct that is passed to the bpf() syscall
pub const Attr = extern union {
map_create: MapCreateAttr,
map_elem: MapElemAttr,
@@ -971,3 +1488,176 @@ pub const Attr = extern union {
enable_stats: EnableStatsAttr,
iter_create: IterCreateAttr,
};
+
+pub const Log = struct {
+ level: u32,
+ buf: []u8,
+};
+
+pub fn map_create(map_type: MapType, key_size: u32, value_size: u32, max_entries: u32) !fd_t {
+ var attr = Attr{
+ .map_create = std.mem.zeroes(MapCreateAttr),
+ };
+
+ attr.map_create.map_type = @enumToInt(map_type);
+ attr.map_create.key_size = key_size;
+ attr.map_create.value_size = value_size;
+ attr.map_create.max_entries = max_entries;
+
+ const rc = bpf(.map_create, &attr, @sizeOf(MapCreateAttr));
+ return switch (errno(rc)) {
+ 0 => @intCast(fd_t, rc),
+ EINVAL => error.MapTypeOrAttrInvalid,
+ ENOMEM => error.SystemResources,
+ EPERM => error.AccessDenied,
+ else => |err| unexpectedErrno(rc),
+ };
+}
+
+test "map_create" {
+ const map = try map_create(.hash, 4, 4, 32);
+ defer std.os.close(map);
+}
+
+pub fn map_lookup_elem(fd: fd_t, key: []const u8, value: []u8) !void {
+ var attr = Attr{
+ .map_elem = std.mem.zeroes(MapElemAttr),
+ };
+
+ attr.map_elem.map_fd = fd;
+ attr.map_elem.key = @ptrToInt(key.ptr);
+ attr.map_elem.result.value = @ptrToInt(value.ptr);
+
+ const rc = bpf(.map_lookup_elem, &attr, @sizeOf(MapElemAttr));
+ switch (errno(rc)) {
+ 0 => return,
+ EBADF => return error.BadFd,
+ EFAULT => unreachable,
+ EINVAL => return error.FieldInAttrNeedsZeroing,
+ ENOENT => return error.NotFound,
+ EPERM => return error.AccessDenied,
+ else => |err| return unexpectedErrno(rc),
+ }
+}
+
+pub fn map_update_elem(fd: fd_t, key: []const u8, value: []const u8, flags: u64) !void {
+ var attr = Attr{
+ .map_elem = std.mem.zeroes(MapElemAttr),
+ };
+
+ attr.map_elem.map_fd = fd;
+ attr.map_elem.key = @ptrToInt(key.ptr);
+ attr.map_elem.result = .{ .value = @ptrToInt(value.ptr) };
+ attr.map_elem.flags = flags;
+
+ const rc = bpf(.map_update_elem, &attr, @sizeOf(MapElemAttr));
+ switch (errno(rc)) {
+ 0 => return,
+ E2BIG => return error.ReachedMaxEntries,
+ EBADF => return error.BadFd,
+ EFAULT => unreachable,
+ EINVAL => return error.FieldInAttrNeedsZeroing,
+ ENOMEM => return error.SystemResources,
+ EPERM => return error.AccessDenied,
+ else => |err| return unexpectedErrno(err),
+ }
+}
+
+pub fn map_delete_elem(fd: fd_t, key: []const u8) !void {
+ var attr = Attr{
+ .map_elem = std.mem.zeroes(MapElemAttr),
+ };
+
+ attr.map_elem.map_fd = fd;
+ attr.map_elem.key = @ptrToInt(key.ptr);
+
+ const rc = bpf(.map_delete_elem, &attr, @sizeOf(MapElemAttr));
+ switch (errno(rc)) {
+ 0 => return,
+ EBADF => return error.BadFd,
+ EFAULT => unreachable,
+ EINVAL => return error.FieldInAttrNeedsZeroing,
+ ENOENT => return error.NotFound,
+ EPERM => return error.AccessDenied,
+ else => |err| return unexpectedErrno(err),
+ }
+}
+
+test "map lookup, update, and delete" {
+ const key_size = 4;
+ const value_size = 4;
+ const map = try map_create(.hash, key_size, value_size, 1);
+ defer std.os.close(map);
+
+ const key = std.mem.zeroes([key_size]u8);
+ var value = std.mem.zeroes([value_size]u8);
+
+ // fails looking up value that doesn't exist
+ expectError(error.NotFound, map_lookup_elem(map, &key, &value));
+
+ // succeed at updating and looking up element
+ try map_update_elem(map, &key, &value, 0);
+ try map_lookup_elem(map, &key, &value);
+
+ // fails inserting more than max entries
+ const second_key = [key_size]u8{ 0, 0, 0, 1 };
+ expectError(error.ReachedMaxEntries, map_update_elem(map, &second_key, &value, 0));
+
+ // succeed at deleting an existing elem
+ try map_delete_elem(map, &key);
+ expectError(error.NotFound, map_lookup_elem(map, &key, &value));
+
+ // fail at deleting a non-existing elem
+ expectError(error.NotFound, map_delete_elem(map, &key));
+}
+
+pub fn prog_load(
+ prog_type: ProgType,
+ insns: []const Insn,
+ log: ?*Log,
+ license: []const u8,
+ kern_version: u32,
+) !fd_t {
+ var attr = Attr{
+ .prog_load = std.mem.zeroes(ProgLoadAttr),
+ };
+
+ attr.prog_load.prog_type = @enumToInt(prog_type);
+ attr.prog_load.insns = @ptrToInt(insns.ptr);
+ attr.prog_load.insn_cnt = @intCast(u32, insns.len);
+ attr.prog_load.license = @ptrToInt(license.ptr);
+ attr.prog_load.kern_version = kern_version;
+
+ if (log) |l| {
+ attr.prog_load.log_buf = @ptrToInt(l.buf.ptr);
+ attr.prog_load.log_size = @intCast(u32, l.buf.len);
+ attr.prog_load.log_level = l.level;
+ }
+
+ const rc = bpf(.prog_load, &attr, @sizeOf(ProgLoadAttr));
+ return switch (errno(rc)) {
+ 0 => @intCast(fd_t, rc),
+ EACCES => error.UnsafeProgram,
+ EFAULT => unreachable,
+ EINVAL => error.InvalidProgram,
+ EPERM => error.AccessDenied,
+ else => |err| unexpectedErrno(err),
+ };
+}
+
+test "prog_load" {
+ // this should fail because it does not set r0 before exiting
+ const bad_prog = [_]Insn{
+ Insn.exit(),
+ };
+
+ const good_prog = [_]Insn{
+ Insn.mov(.r0, 0),
+ Insn.exit(),
+ };
+
+ const prog = try prog_load(.socket_filter, &good_prog, null, "MIT", 0);
+ defer std.os.close(prog);
+
+ expectError(error.UnsafeProgram, prog_load(.socket_filter, &bad_prog, null, "MIT", 0));
+}
diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig
index 576125e2a3..0a453d8b2e 100644
--- a/lib/std/os/test.zig
+++ b/lib/std/os/test.zig
@@ -555,3 +555,39 @@ test "signalfd" {
return error.SkipZigTest;
_ = std.os.signalfd;
}
+
+test "sync" {
+ if (builtin.os.tag != .linux)
+ return error.SkipZigTest;
+
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ const test_out_file = "os_tmp_test";
+ const file = try tmp.dir.createFile(test_out_file, .{});
+ defer {
+ file.close();
+ tmp.dir.deleteFile(test_out_file) catch {};
+ }
+
+ os.sync();
+ try os.syncfs(file.handle);
+}
+
+test "fsync" {
+ if (builtin.os.tag != .linux and builtin.os.tag != .windows)
+ return error.SkipZigTest;
+
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ const test_out_file = "os_tmp_test";
+ const file = try tmp.dir.createFile(test_out_file, .{});
+ defer {
+ file.close();
+ tmp.dir.deleteFile(test_out_file) catch {};
+ }
+
+ try os.fsync(file.handle);
+ try os.fdatasync(file.handle);
+}
diff --git a/lib/std/os/windows/kernel32.zig b/lib/std/os/windows/kernel32.zig
index fce9eea908..05d160485d 100644
--- a/lib/std/os/windows/kernel32.zig
+++ b/lib/std/os/windows/kernel32.zig
@@ -287,3 +287,5 @@ pub extern "kernel32" fn K32GetWsChangesEx(hProcess: HANDLE, lpWatchInfoEx: PPSA
pub extern "kernel32" fn K32InitializeProcessForWsWatch(hProcess: HANDLE) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn K32QueryWorkingSet(hProcess: HANDLE, pv: PVOID, cb: DWORD) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn K32QueryWorkingSetEx(hProcess: HANDLE, pv: PVOID, cb: DWORD) callconv(.Stdcall) BOOL;
+
+pub extern "kernel32" fn FlushFileBuffers(hFile: HANDLE) callconv(.Stdcall) BOOL;
diff --git a/lib/std/os/windows/ws2_32.zig b/lib/std/os/windows/ws2_32.zig
index 11fcf6c70d..2afe42bb3b 100644
--- a/lib/std/os/windows/ws2_32.zig
+++ b/lib/std/os/windows/ws2_32.zig
@@ -12,7 +12,7 @@ pub const SOCKET_ERROR = -1;
pub const WSADESCRIPTION_LEN = 256;
pub const WSASYS_STATUS_LEN = 128;
-pub const WSADATA = if (usize.bit_count == u64.bit_count)
+pub const WSADATA = if (@sizeOf(usize) == @sizeOf(u64))
extern struct {
wVersion: WORD,
wHighVersion: WORD,
diff --git a/lib/std/pdb.zig b/lib/std/pdb.zig
index e8c61f859d..91e22307d8 100644
--- a/lib/std/pdb.zig
+++ b/lib/std/pdb.zig
@@ -636,7 +636,7 @@ const MsfStream = struct {
blocks: []u32 = undefined,
block_size: u32 = undefined,
- pub const Error = @TypeOf(read).ReturnType.ErrorSet;
+ pub const Error = @typeInfo(@typeInfo(@TypeOf(read)).Fn.return_type.?).ErrorUnion.error_set;
fn init(block_size: u32, file: File, blocks: []u32) MsfStream {
const stream = MsfStream{
diff --git a/lib/std/process.zig b/lib/std/process.zig
index 69befa2fc8..9cb571714c 100644
--- a/lib/std/process.zig
+++ b/lib/std/process.zig
@@ -578,8 +578,8 @@ fn testWindowsCmdLine(input_cmd_line: [*]const u8, expected_args: []const []cons
}
pub const UserInfo = struct {
- uid: u32,
- gid: u32,
+ uid: os.uid_t,
+ gid: os.gid_t,
};
/// POSIX function which gets a uid from username.
@@ -607,8 +607,8 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo {
var buf: [std.mem.page_size]u8 = undefined;
var name_index: usize = 0;
var state = State.Start;
- var uid: u32 = 0;
- var gid: u32 = 0;
+ var uid: os.uid_t = 0;
+ var gid: os.gid_t = 0;
while (true) {
const amt_read = try reader.read(buf[0..]);
diff --git a/lib/std/progress.zig b/lib/std/progress.zig
index 654d8cc228..82f2801fa1 100644
--- a/lib/std/progress.zig
+++ b/lib/std/progress.zig
@@ -197,7 +197,7 @@ pub const Progress = struct {
var maybe_node: ?*Node = &self.root;
while (maybe_node) |node| {
if (need_ellipse) {
- self.bufWrite(&end, "...", .{});
+ self.bufWrite(&end, "... ", .{});
}
need_ellipse = false;
if (node.name.len != 0 or node.estimated_total_items != null) {
@@ -218,7 +218,7 @@ pub const Progress = struct {
maybe_node = node.recently_updated_child;
}
if (need_ellipse) {
- self.bufWrite(&end, "...", .{});
+ self.bufWrite(&end, "... ", .{});
}
}
@@ -253,7 +253,7 @@ pub const Progress = struct {
const bytes_needed_for_esc_codes_at_end = if (std.builtin.os.tag == .windows) 0 else 11;
const max_end = self.output_buffer.len - bytes_needed_for_esc_codes_at_end;
if (end.* > max_end) {
- const suffix = "...";
+ const suffix = "... ";
self.columns_written = self.columns_written - (end.* - max_end) + suffix.len;
std.mem.copy(u8, self.output_buffer[max_end..], suffix);
end.* = max_end + suffix.len;
diff --git a/lib/std/rand.zig b/lib/std/rand.zig
index 7988efffc9..7e05592869 100644
--- a/lib/std/rand.zig
+++ b/lib/std/rand.zig
@@ -51,8 +51,9 @@ pub const Random = struct {
/// Returns a random int `i` such that `0 <= i <= maxInt(T)`.
/// `i` is evenly distributed.
pub fn int(r: *Random, comptime T: type) T {
- const UnsignedT = std.meta.Int(false, T.bit_count);
- const ByteAlignedT = std.meta.Int(false, @divTrunc(T.bit_count + 7, 8) * 8);
+ const bits = @typeInfo(T).Int.bits;
+ const UnsignedT = std.meta.Int(false, bits);
+ const ByteAlignedT = std.meta.Int(false, @divTrunc(bits + 7, 8) * 8);
var rand_bytes: [@sizeOf(ByteAlignedT)]u8 = undefined;
r.bytes(rand_bytes[0..]);
@@ -68,10 +69,11 @@ pub const Random = struct {
/// Constant-time implementation off `uintLessThan`.
/// The results of this function may be biased.
pub fn uintLessThanBiased(r: *Random, comptime T: type, less_than: T) T {
- comptime assert(T.is_signed == false);
- comptime assert(T.bit_count <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
+ comptime assert(@typeInfo(T).Int.is_signed == false);
+ const bits = @typeInfo(T).Int.bits;
+ comptime assert(bits <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
assert(0 < less_than);
- if (T.bit_count <= 32) {
+ if (bits <= 32) {
return @intCast(T, limitRangeBiased(u32, r.int(u32), less_than));
} else {
return @intCast(T, limitRangeBiased(u64, r.int(u64), less_than));
@@ -87,13 +89,15 @@ pub const Random = struct {
/// this function is guaranteed to return.
/// If you need deterministic runtime bounds, use `uintLessThanBiased`.
pub fn uintLessThan(r: *Random, comptime T: type, less_than: T) T {
- comptime assert(T.is_signed == false);
- comptime assert(T.bit_count <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
+ comptime assert(@typeInfo(T).Int.is_signed == false);
+ const bits = @typeInfo(T).Int.bits;
+ comptime assert(bits <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
assert(0 < less_than);
// Small is typically u32
- const Small = std.meta.Int(false, @divTrunc(T.bit_count + 31, 32) * 32);
+ const small_bits = @divTrunc(bits + 31, 32) * 32;
+ const Small = std.meta.Int(false, small_bits);
// Large is typically u64
- const Large = std.meta.Int(false, Small.bit_count * 2);
+ const Large = std.meta.Int(false, small_bits * 2);
// adapted from:
// http://www.pcg-random.org/posts/bounded-rands.html
@@ -105,7 +109,7 @@ pub const Random = struct {
// TODO: workaround for https://github.com/ziglang/zig/issues/1770
// should be:
// var t: Small = -%less_than;
- var t: Small = @bitCast(Small, -%@bitCast(std.meta.Int(true, Small.bit_count), @as(Small, less_than)));
+ var t: Small = @bitCast(Small, -%@bitCast(std.meta.Int(true, small_bits), @as(Small, less_than)));
if (t >= less_than) {
t -= less_than;
@@ -119,13 +123,13 @@ pub const Random = struct {
l = @truncate(Small, m);
}
}
- return @intCast(T, m >> Small.bit_count);
+ return @intCast(T, m >> small_bits);
}
/// Constant-time implementation off `uintAtMost`.
/// The results of this function may be biased.
pub fn uintAtMostBiased(r: *Random, comptime T: type, at_most: T) T {
- assert(T.is_signed == false);
+ assert(@typeInfo(T).Int.is_signed == false);
if (at_most == maxInt(T)) {
// have the full range
return r.int(T);
@@ -137,7 +141,7 @@ pub const Random = struct {
/// See `uintLessThan`, which this function uses in most cases,
/// for commentary on the runtime of this function.
pub fn uintAtMost(r: *Random, comptime T: type, at_most: T) T {
- assert(T.is_signed == false);
+ assert(@typeInfo(T).Int.is_signed == false);
if (at_most == maxInt(T)) {
// have the full range
return r.int(T);
@@ -149,9 +153,10 @@ pub const Random = struct {
/// The results of this function may be biased.
pub fn intRangeLessThanBiased(r: *Random, comptime T: type, at_least: T, less_than: T) T {
assert(at_least < less_than);
- if (T.is_signed) {
+ const info = @typeInfo(T).Int;
+ if (info.is_signed) {
// Two's complement makes this math pretty easy.
- const UnsignedT = std.meta.Int(false, T.bit_count);
+ const UnsignedT = std.meta.Int(false, info.bits);
const lo = @bitCast(UnsignedT, at_least);
const hi = @bitCast(UnsignedT, less_than);
const result = lo +% r.uintLessThanBiased(UnsignedT, hi -% lo);
@@ -167,9 +172,10 @@ pub const Random = struct {
/// for commentary on the runtime of this function.
pub fn intRangeLessThan(r: *Random, comptime T: type, at_least: T, less_than: T) T {
assert(at_least < less_than);
- if (T.is_signed) {
+ const info = @typeInfo(T).Int;
+ if (info.is_signed) {
// Two's complement makes this math pretty easy.
- const UnsignedT = std.meta.Int(false, T.bit_count);
+ const UnsignedT = std.meta.Int(false, info.bits);
const lo = @bitCast(UnsignedT, at_least);
const hi = @bitCast(UnsignedT, less_than);
const result = lo +% r.uintLessThan(UnsignedT, hi -% lo);
@@ -184,9 +190,10 @@ pub const Random = struct {
/// The results of this function may be biased.
pub fn intRangeAtMostBiased(r: *Random, comptime T: type, at_least: T, at_most: T) T {
assert(at_least <= at_most);
- if (T.is_signed) {
+ const info = @typeInfo(T).Int;
+ if (info.is_signed) {
// Two's complement makes this math pretty easy.
- const UnsignedT = std.meta.Int(false, T.bit_count);
+ const UnsignedT = std.meta.Int(false, info.bits);
const lo = @bitCast(UnsignedT, at_least);
const hi = @bitCast(UnsignedT, at_most);
const result = lo +% r.uintAtMostBiased(UnsignedT, hi -% lo);
@@ -202,9 +209,10 @@ pub const Random = struct {
/// for commentary on the runtime of this function.
pub fn intRangeAtMost(r: *Random, comptime T: type, at_least: T, at_most: T) T {
assert(at_least <= at_most);
- if (T.is_signed) {
+ const info = @typeInfo(T).Int;
+ if (info.is_signed) {
// Two's complement makes this math pretty easy.
- const UnsignedT = std.meta.Int(false, T.bit_count);
+ const UnsignedT = std.meta.Int(false, info.bits);
const lo = @bitCast(UnsignedT, at_least);
const hi = @bitCast(UnsignedT, at_most);
const result = lo +% r.uintAtMost(UnsignedT, hi -% lo);
@@ -280,14 +288,15 @@ pub const Random = struct {
/// into an integer 0 <= result < less_than.
/// This function introduces a minor bias.
pub fn limitRangeBiased(comptime T: type, random_int: T, less_than: T) T {
- comptime assert(T.is_signed == false);
- const T2 = std.meta.Int(false, T.bit_count * 2);
+ comptime assert(@typeInfo(T).Int.is_signed == false);
+ const bits = @typeInfo(T).Int.bits;
+ const T2 = std.meta.Int(false, bits * 2);
// adapted from:
// http://www.pcg-random.org/posts/bounded-rands.html
// "Integer Multiplication (Biased)"
var m: T2 = @as(T2, random_int) * @as(T2, less_than);
- return @intCast(T, m >> T.bit_count);
+ return @intCast(T, m >> bits);
}
const SequentialPrng = struct {
diff --git a/lib/std/special/build_runner.zig b/lib/std/special/build_runner.zig
index 46d3b0b615..3ab74a11a2 100644
--- a/lib/std/special/build_runner.zig
+++ b/lib/std/special/build_runner.zig
@@ -133,7 +133,7 @@ pub fn main() !void {
}
fn runBuild(builder: *Builder) anyerror!void {
- switch (@typeInfo(@TypeOf(root.build).ReturnType)) {
+ switch (@typeInfo(@typeInfo(@TypeOf(root.build)).Fn.return_type.?)) {
.Void => root.build(builder),
.ErrorUnion => try root.build(builder),
else => @compileError("expected return type of build to be 'void' or '!void'"),
diff --git a/lib/std/special/c.zig b/lib/std/special/c.zig
index d5903ece02..ce8d1c29cc 100644
--- a/lib/std/special/c.zig
+++ b/lib/std/special/c.zig
@@ -516,11 +516,12 @@ export fn roundf(a: f32) f32 {
fn generic_fmod(comptime T: type, x: T, y: T) T {
@setRuntimeSafety(false);
- const uint = std.meta.Int(false, T.bit_count);
+ const bits = @typeInfo(T).Float.bits;
+ const uint = std.meta.Int(false, bits);
const log2uint = math.Log2Int(uint);
const digits = if (T == f32) 23 else 52;
const exp_bits = if (T == f32) 9 else 12;
- const bits_minus_1 = T.bit_count - 1;
+ const bits_minus_1 = bits - 1;
const mask = if (T == f32) 0xff else 0x7ff;
var ux = @bitCast(uint, x);
var uy = @bitCast(uint, y);
diff --git a/lib/std/special/compiler_rt/addXf3.zig b/lib/std/special/compiler_rt/addXf3.zig
index 6dd0faaebb..da1238010e 100644
--- a/lib/std/special/compiler_rt/addXf3.zig
+++ b/lib/std/special/compiler_rt/addXf3.zig
@@ -59,23 +59,25 @@ pub fn __aeabi_dsub(a: f64, b: f64) callconv(.AAPCS) f64 {
}
// TODO: restore inline keyword, see: https://github.com/ziglang/zig/issues/2154
-fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i32 {
- const Z = std.meta.Int(false, T.bit_count);
- const S = std.meta.Int(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
+fn normalize(comptime T: type, significand: *std.meta.Int(false, @typeInfo(T).Float.bits)) i32 {
+ const bits = @typeInfo(T).Float.bits;
+ const Z = std.meta.Int(false, bits);
+ const S = std.meta.Int(false, bits - @clz(Z, @as(Z, bits) - 1));
const significandBits = std.math.floatMantissaBits(T);
const implicitBit = @as(Z, 1) << significandBits;
- const shift = @clz(std.meta.Int(false, T.bit_count), significand.*) - @clz(Z, implicitBit);
+ const shift = @clz(std.meta.Int(false, bits), significand.*) - @clz(Z, implicitBit);
significand.* <<= @intCast(S, shift);
return 1 - shift;
}
// TODO: restore inline keyword, see: https://github.com/ziglang/zig/issues/2154
fn addXf3(comptime T: type, a: T, b: T) T {
- const Z = std.meta.Int(false, T.bit_count);
- const S = std.meta.Int(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
+ const bits = @typeInfo(T).Float.bits;
+ const Z = std.meta.Int(false, bits);
+ const S = std.meta.Int(false, bits - @clz(Z, @as(Z, bits) - 1));
- const typeWidth = T.bit_count;
+ const typeWidth = bits;
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);
@@ -187,7 +189,7 @@ fn addXf3(comptime T: type, a: T, b: T) T {
// If partial cancellation occured, we need to left-shift the result
// and adjust the exponent:
if (aSignificand < implicitBit << 3) {
- const shift = @intCast(i32, @clz(Z, aSignificand)) - @intCast(i32, @clz(std.meta.Int(false, T.bit_count), implicitBit << 3));
+ const shift = @intCast(i32, @clz(Z, aSignificand)) - @intCast(i32, @clz(std.meta.Int(false, bits), implicitBit << 3));
aSignificand <<= @intCast(S, shift);
aExponent -= shift;
}
diff --git a/lib/std/special/compiler_rt/aulldiv.zig b/lib/std/special/compiler_rt/aulldiv.zig
index cf9b26c5a6..321ff288bb 100644
--- a/lib/std/special/compiler_rt/aulldiv.zig
+++ b/lib/std/special/compiler_rt/aulldiv.zig
@@ -7,8 +7,8 @@ const builtin = @import("builtin");
pub fn _alldiv(a: i64, b: i64) callconv(.Stdcall) i64 {
@setRuntimeSafety(builtin.is_test);
- const s_a = a >> (i64.bit_count - 1);
- const s_b = b >> (i64.bit_count - 1);
+ const s_a = a >> (64 - 1);
+ const s_b = b >> (64 - 1);
const an = (a ^ s_a) -% s_a;
const bn = (b ^ s_b) -% s_b;
diff --git a/lib/std/special/compiler_rt/aullrem.zig b/lib/std/special/compiler_rt/aullrem.zig
index 7c981cc088..a14eb99be3 100644
--- a/lib/std/special/compiler_rt/aullrem.zig
+++ b/lib/std/special/compiler_rt/aullrem.zig
@@ -7,8 +7,8 @@ const builtin = @import("builtin");
pub fn _allrem(a: i64, b: i64) callconv(.Stdcall) i64 {
@setRuntimeSafety(builtin.is_test);
- const s_a = a >> (i64.bit_count - 1);
- const s_b = b >> (i64.bit_count - 1);
+ const s_a = a >> (64 - 1);
+ const s_b = b >> (64 - 1);
const an = (a ^ s_a) -% s_a;
const bn = (b ^ s_b) -% s_b;
diff --git a/lib/std/special/compiler_rt/compareXf2.zig b/lib/std/special/compiler_rt/compareXf2.zig
index f50dc67474..05af1e533c 100644
--- a/lib/std/special/compiler_rt/compareXf2.zig
+++ b/lib/std/special/compiler_rt/compareXf2.zig
@@ -27,8 +27,9 @@ const GE = extern enum(i32) {
pub fn cmp(comptime T: type, comptime RT: type, a: T, b: T) RT {
@setRuntimeSafety(builtin.is_test);
- const srep_t = std.meta.Int(true, T.bit_count);
- const rep_t = std.meta.Int(false, T.bit_count);
+ const bits = @typeInfo(T).Float.bits;
+ const srep_t = std.meta.Int(true, bits);
+ const rep_t = std.meta.Int(false, bits);
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);
@@ -73,7 +74,7 @@ pub fn cmp(comptime T: type, comptime RT: type, a: T, b: T) RT {
pub fn unordcmp(comptime T: type, a: T, b: T) i32 {
@setRuntimeSafety(builtin.is_test);
- const rep_t = std.meta.Int(false, T.bit_count);
+ const rep_t = std.meta.Int(false, @typeInfo(T).Float.bits);
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);
diff --git a/lib/std/special/compiler_rt/divdf3.zig b/lib/std/special/compiler_rt/divdf3.zig
index ad72f96057..11ede3af66 100644
--- a/lib/std/special/compiler_rt/divdf3.zig
+++ b/lib/std/special/compiler_rt/divdf3.zig
@@ -12,10 +12,9 @@ const builtin = @import("builtin");
pub fn __divdf3(a: f64, b: f64) callconv(.C) f64 {
@setRuntimeSafety(builtin.is_test);
- const Z = std.meta.Int(false, f64.bit_count);
- const SignedZ = std.meta.Int(true, f64.bit_count);
+ const Z = std.meta.Int(false, 64);
+ const SignedZ = std.meta.Int(true, 64);
- const typeWidth = f64.bit_count;
const significandBits = std.math.floatMantissaBits(f64);
const exponentBits = std.math.floatExponentBits(f64);
@@ -317,9 +316,9 @@ pub fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
}
}
-pub fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i32 {
+pub fn normalize(comptime T: type, significand: *std.meta.Int(false, @typeInfo(T).Float.bits)) i32 {
@setRuntimeSafety(builtin.is_test);
- const Z = std.meta.Int(false, T.bit_count);
+ const Z = std.meta.Int(false, @typeInfo(T).Float.bits);
const significandBits = std.math.floatMantissaBits(T);
const implicitBit = @as(Z, 1) << significandBits;
diff --git a/lib/std/special/compiler_rt/divsf3.zig b/lib/std/special/compiler_rt/divsf3.zig
index 80af806eb1..13f4d8e68d 100644
--- a/lib/std/special/compiler_rt/divsf3.zig
+++ b/lib/std/special/compiler_rt/divsf3.zig
@@ -12,9 +12,8 @@ const builtin = @import("builtin");
pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 {
@setRuntimeSafety(builtin.is_test);
- const Z = std.meta.Int(false, f32.bit_count);
+ const Z = std.meta.Int(false, 32);
- const typeWidth = f32.bit_count;
const significandBits = std.math.floatMantissaBits(f32);
const exponentBits = std.math.floatExponentBits(f32);
@@ -190,9 +189,9 @@ pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 {
}
}
-fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i32 {
+fn normalize(comptime T: type, significand: *std.meta.Int(false, @typeInfo(T).Float.bits)) i32 {
@setRuntimeSafety(builtin.is_test);
- const Z = std.meta.Int(false, T.bit_count);
+ const Z = std.meta.Int(false, @typeInfo(T).Float.bits);
const significandBits = std.math.floatMantissaBits(T);
const implicitBit = @as(Z, 1) << significandBits;
diff --git a/lib/std/special/compiler_rt/divtf3.zig b/lib/std/special/compiler_rt/divtf3.zig
index f6f7c1bf7d..0582400ce3 100644
--- a/lib/std/special/compiler_rt/divtf3.zig
+++ b/lib/std/special/compiler_rt/divtf3.zig
@@ -11,10 +11,9 @@ const wideMultiply = @import("divdf3.zig").wideMultiply;
pub fn __divtf3(a: f128, b: f128) callconv(.C) f128 {
@setRuntimeSafety(builtin.is_test);
- const Z = std.meta.Int(false, f128.bit_count);
- const SignedZ = std.meta.Int(true, f128.bit_count);
+ const Z = std.meta.Int(false, 128);
+ const SignedZ = std.meta.Int(true, 128);
- const typeWidth = f128.bit_count;
const significandBits = std.math.floatMantissaBits(f128);
const exponentBits = std.math.floatExponentBits(f128);
diff --git a/lib/std/special/compiler_rt/divti3.zig b/lib/std/special/compiler_rt/divti3.zig
index 4b7d459991..a065111510 100644
--- a/lib/std/special/compiler_rt/divti3.zig
+++ b/lib/std/special/compiler_rt/divti3.zig
@@ -9,8 +9,8 @@ const builtin = @import("builtin");
pub fn __divti3(a: i128, b: i128) callconv(.C) i128 {
@setRuntimeSafety(builtin.is_test);
- const s_a = a >> (i128.bit_count - 1);
- const s_b = b >> (i128.bit_count - 1);
+ const s_a = a >> (128 - 1);
+ const s_b = b >> (128 - 1);
const an = (a ^ s_a) -% s_a;
const bn = (b ^ s_b) -% s_b;
diff --git a/lib/std/special/compiler_rt/fixint.zig b/lib/std/special/compiler_rt/fixint.zig
index 0bf0c8be1e..1512641be4 100644
--- a/lib/std/special/compiler_rt/fixint.zig
+++ b/lib/std/special/compiler_rt/fixint.zig
@@ -28,7 +28,7 @@ pub fn fixint(comptime fp_t: type, comptime fixint_t: type, a: fp_t) fixint_t {
else => unreachable,
};
- const typeWidth = rep_t.bit_count;
+ const typeWidth = @typeInfo(rep_t).Int.bits;
const exponentBits = (typeWidth - significandBits - 1);
const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
const maxExponent = ((1 << exponentBits) - 1);
@@ -50,12 +50,13 @@ pub fn fixint(comptime fp_t: type, comptime fixint_t: type, a: fp_t) fixint_t {
if (exponent < 0) return 0;
// The unsigned result needs to be large enough to handle an fixint_t or rep_t
- const fixuint_t = std.meta.Int(false, fixint_t.bit_count);
- const UintResultType = if (fixint_t.bit_count > rep_t.bit_count) fixuint_t else rep_t;
+ const fixint_bits = @typeInfo(fixint_t).Int.bits;
+ const fixuint_t = std.meta.Int(false, fixint_bits);
+ const UintResultType = if (fixint_bits > typeWidth) fixuint_t else rep_t;
var uint_result: UintResultType = undefined;
// If the value is too large for the integer type, saturate.
- if (@intCast(usize, exponent) >= fixint_t.bit_count) {
+ if (@intCast(usize, exponent) >= fixint_bits) {
return if (negative) @as(fixint_t, minInt(fixint_t)) else @as(fixint_t, maxInt(fixint_t));
}
diff --git a/lib/std/special/compiler_rt/fixuint.zig b/lib/std/special/compiler_rt/fixuint.zig
index 01eb03baa5..3f2d661244 100644
--- a/lib/std/special/compiler_rt/fixuint.zig
+++ b/lib/std/special/compiler_rt/fixuint.zig
@@ -15,14 +15,14 @@ pub fn fixuint(comptime fp_t: type, comptime fixuint_t: type, a: fp_t) fixuint_t
f128 => u128,
else => unreachable,
};
- const srep_t = @import("std").meta.Int(true, rep_t.bit_count);
+ const typeWidth = @typeInfo(rep_t).Int.bits;
+ const srep_t = @import("std").meta.Int(true, typeWidth);
const significandBits = switch (fp_t) {
f32 => 23,
f64 => 52,
f128 => 112,
else => unreachable,
};
- const typeWidth = rep_t.bit_count;
const exponentBits = (typeWidth - significandBits - 1);
const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
const maxExponent = ((1 << exponentBits) - 1);
@@ -44,7 +44,7 @@ pub fn fixuint(comptime fp_t: type, comptime fixuint_t: type, a: fp_t) fixuint_t
if (sign == -1 or exponent < 0) return 0;
// If the value is too large for the integer type, saturate.
- if (@intCast(c_uint, exponent) >= fixuint_t.bit_count) return ~@as(fixuint_t, 0);
+ if (@intCast(c_uint, exponent) >= @typeInfo(fixuint_t).Int.bits) return ~@as(fixuint_t, 0);
// If 0 <= exponent < significandBits, right shift to get the result.
// Otherwise, shift left.
diff --git a/lib/std/special/compiler_rt/floatXisf.zig b/lib/std/special/compiler_rt/floatXisf.zig
index 650b948396..134a1eba61 100644
--- a/lib/std/special/compiler_rt/floatXisf.zig
+++ b/lib/std/special/compiler_rt/floatXisf.zig
@@ -12,15 +12,16 @@ const FLT_MANT_DIG = 24;
fn __floatXisf(comptime T: type, arg: T) f32 {
@setRuntimeSafety(builtin.is_test);
- const Z = std.meta.Int(false, T.bit_count);
- const S = std.meta.Int(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
+ const bits = @typeInfo(T).Int.bits;
+ const Z = std.meta.Int(false, bits);
+ const S = std.meta.Int(false, bits - @clz(Z, @as(Z, bits) - 1));
if (arg == 0) {
return @as(f32, 0.0);
}
var ai = arg;
- const N: u32 = T.bit_count;
+ const N: u32 = bits;
const si = ai >> @intCast(S, (N - 1));
ai = ((ai ^ si) -% si);
var a = @bitCast(Z, ai);
@@ -66,7 +67,7 @@ fn __floatXisf(comptime T: type, arg: T) f32 {
// a is now rounded to FLT_MANT_DIG bits
}
- const s = @bitCast(Z, arg) >> (T.bit_count - 32);
+ const s = @bitCast(Z, arg) >> (@typeInfo(T).Int.bits - 32);
const r = (@intCast(u32, s) & 0x80000000) | // sign
(@intCast(u32, (e + 127)) << 23) | // exponent
(@truncate(u32, a) & 0x007fffff); // mantissa-high
diff --git a/lib/std/special/compiler_rt/floatsiXf.zig b/lib/std/special/compiler_rt/floatsiXf.zig
index 75db3d7040..b6ce36b6f7 100644
--- a/lib/std/special/compiler_rt/floatsiXf.zig
+++ b/lib/std/special/compiler_rt/floatsiXf.zig
@@ -10,8 +10,9 @@ const maxInt = std.math.maxInt;
fn floatsiXf(comptime T: type, a: i32) T {
@setRuntimeSafety(builtin.is_test);
- const Z = std.meta.Int(false, T.bit_count);
- const S = std.meta.Int(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
+ const bits = @typeInfo(T).Float.bits;
+ const Z = std.meta.Int(false, bits);
+ const S = std.meta.Int(false, bits - @clz(Z, @as(Z, bits) - 1));
if (a == 0) {
return @as(T, 0.0);
@@ -22,7 +23,7 @@ fn floatsiXf(comptime T: type, a: i32) T {
const exponentBias = ((1 << exponentBits - 1) - 1);
const implicitBit = @as(Z, 1) << significandBits;
- const signBit = @as(Z, 1 << Z.bit_count - 1);
+ const signBit = @as(Z, 1 << bits - 1);
const sign = a >> 31;
// Take absolute value of a via abs(x) = (x^(x >> 31)) - (x >> 31).
diff --git a/lib/std/special/compiler_rt/floatundisf.zig b/lib/std/special/compiler_rt/floatundisf.zig
index b580ec91fd..67cd53b21c 100644
--- a/lib/std/special/compiler_rt/floatundisf.zig
+++ b/lib/std/special/compiler_rt/floatundisf.zig
@@ -15,7 +15,7 @@ pub fn __floatundisf(arg: u64) callconv(.C) f32 {
if (arg == 0) return 0;
var a = arg;
- const N: usize = @TypeOf(a).bit_count;
+ const N: usize = @typeInfo(@TypeOf(a)).Int.bits;
// Number of significant digits
const sd = N - @clz(u64, a);
// 8 exponent
diff --git a/lib/std/special/compiler_rt/floatunditf.zig b/lib/std/special/compiler_rt/floatunditf.zig
index 90191c6388..014a479c5f 100644
--- a/lib/std/special/compiler_rt/floatunditf.zig
+++ b/lib/std/special/compiler_rt/floatunditf.zig
@@ -19,7 +19,7 @@ pub fn __floatunditf(a: u64) callconv(.C) f128 {
const exponent_bias = (1 << (exponent_bits - 1)) - 1;
const implicit_bit = 1 << mantissa_bits;
- const exp: u128 = (u64.bit_count - 1) - @clz(u64, a);
+ const exp: u128 = (64 - 1) - @clz(u64, a);
const shift: u7 = mantissa_bits - @intCast(u7, exp);
var result: u128 = (@intCast(u128, a) << shift) ^ implicit_bit;
diff --git a/lib/std/special/compiler_rt/floatunsitf.zig b/lib/std/special/compiler_rt/floatunsitf.zig
index ceb55f12c8..f59446abac 100644
--- a/lib/std/special/compiler_rt/floatunsitf.zig
+++ b/lib/std/special/compiler_rt/floatunsitf.zig
@@ -19,7 +19,7 @@ pub fn __floatunsitf(a: u64) callconv(.C) f128 {
const exponent_bias = (1 << (exponent_bits - 1)) - 1;
const implicit_bit = 1 << mantissa_bits;
- const exp = (u64.bit_count - 1) - @clz(u64, a);
+ const exp = (64 - 1) - @clz(u64, a);
const shift = mantissa_bits - @intCast(u7, exp);
// TODO(#1148): @bitCast alignment error
diff --git a/lib/std/special/compiler_rt/int.zig b/lib/std/special/compiler_rt/int.zig
index 141c4e52c1..1fb2c263e1 100644
--- a/lib/std/special/compiler_rt/int.zig
+++ b/lib/std/special/compiler_rt/int.zig
@@ -219,7 +219,7 @@ fn test_one_divsi3(a: i32, b: i32, expected_q: i32) void {
pub fn __udivsi3(n: u32, d: u32) callconv(.C) u32 {
@setRuntimeSafety(builtin.is_test);
- const n_uword_bits: c_uint = u32.bit_count;
+ const n_uword_bits: c_uint = 32;
// special cases
if (d == 0) return 0; // ?!
if (n == 0) return 0;
diff --git a/lib/std/special/compiler_rt/modti3.zig b/lib/std/special/compiler_rt/modti3.zig
index 1f859c2329..9c3de44395 100644
--- a/lib/std/special/compiler_rt/modti3.zig
+++ b/lib/std/special/compiler_rt/modti3.zig
@@ -14,8 +14,8 @@ const compiler_rt = @import("../compiler_rt.zig");
pub fn __modti3(a: i128, b: i128) callconv(.C) i128 {
@setRuntimeSafety(builtin.is_test);
- const s_a = a >> (i128.bit_count - 1); // s = a < 0 ? -1 : 0
- const s_b = b >> (i128.bit_count - 1); // s = b < 0 ? -1 : 0
+ const s_a = a >> (128 - 1); // s = a < 0 ? -1 : 0
+ const s_b = b >> (128 - 1); // s = b < 0 ? -1 : 0
const an = (a ^ s_a) -% s_a; // negate if s == -1
const bn = (b ^ s_b) -% s_b; // negate if s == -1
diff --git a/lib/std/special/compiler_rt/mulXf3.zig b/lib/std/special/compiler_rt/mulXf3.zig
index b6984ebbb6..40b5b4f658 100644
--- a/lib/std/special/compiler_rt/mulXf3.zig
+++ b/lib/std/special/compiler_rt/mulXf3.zig
@@ -33,9 +33,9 @@ pub fn __aeabi_dmul(a: f64, b: f64) callconv(.C) f64 {
fn mulXf3(comptime T: type, a: T, b: T) T {
@setRuntimeSafety(builtin.is_test);
- const Z = std.meta.Int(false, T.bit_count);
+ const typeWidth = @typeInfo(T).Float.bits;
+ const Z = std.meta.Int(false, typeWidth);
- const typeWidth = T.bit_count;
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);
@@ -269,9 +269,9 @@ fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
}
}
-fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i32 {
+fn normalize(comptime T: type, significand: *std.meta.Int(false, @typeInfo(T).Float.bits)) i32 {
@setRuntimeSafety(builtin.is_test);
- const Z = std.meta.Int(false, T.bit_count);
+ const Z = std.meta.Int(false, @typeInfo(T).Float.bits);
const significandBits = std.math.floatMantissaBits(T);
const implicitBit = @as(Z, 1) << significandBits;
@@ -282,7 +282,7 @@ fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i
fn wideRightShiftWithSticky(comptime Z: type, hi: *Z, lo: *Z, count: u32) void {
@setRuntimeSafety(builtin.is_test);
- const typeWidth = Z.bit_count;
+ const typeWidth = @typeInfo(Z).Int.bits;
const S = std.math.Log2Int(Z);
if (count < typeWidth) {
const sticky = @truncate(u8, lo.* << @intCast(S, typeWidth -% count));
diff --git a/lib/std/special/compiler_rt/mulodi4.zig b/lib/std/special/compiler_rt/mulodi4.zig
index b05931e937..fab345fa47 100644
--- a/lib/std/special/compiler_rt/mulodi4.zig
+++ b/lib/std/special/compiler_rt/mulodi4.zig
@@ -11,7 +11,7 @@ const minInt = std.math.minInt;
pub fn __mulodi4(a: i64, b: i64, overflow: *c_int) callconv(.C) i64 {
@setRuntimeSafety(builtin.is_test);
- const min = @bitCast(i64, @as(u64, 1 << (i64.bit_count - 1)));
+ const min = @bitCast(i64, @as(u64, 1 << (64 - 1)));
const max = ~min;
overflow.* = 0;
diff --git a/lib/std/special/compiler_rt/muloti4.zig b/lib/std/special/compiler_rt/muloti4.zig
index 4beafa3e15..b1ad82da29 100644
--- a/lib/std/special/compiler_rt/muloti4.zig
+++ b/lib/std/special/compiler_rt/muloti4.zig
@@ -9,7 +9,7 @@ const compiler_rt = @import("../compiler_rt.zig");
pub fn __muloti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
@setRuntimeSafety(builtin.is_test);
- const min = @bitCast(i128, @as(u128, 1 << (i128.bit_count - 1)));
+ const min = @bitCast(i128, @as(u128, 1 << (128 - 1)));
const max = ~min;
overflow.* = 0;
@@ -27,9 +27,9 @@ pub fn __muloti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
return r;
}
- const sa = a >> (i128.bit_count - 1);
+ const sa = a >> (128 - 1);
const abs_a = (a ^ sa) -% sa;
- const sb = b >> (i128.bit_count - 1);
+ const sb = b >> (128 - 1);
const abs_b = (b ^ sb) -% sb;
if (abs_a < 2 or abs_b < 2) {
diff --git a/lib/std/special/compiler_rt/negXf2.zig b/lib/std/special/compiler_rt/negXf2.zig
index 11f9e401e9..ae01e10776 100644
--- a/lib/std/special/compiler_rt/negXf2.zig
+++ b/lib/std/special/compiler_rt/negXf2.zig
@@ -24,9 +24,8 @@ pub fn __aeabi_dneg(arg: f64) callconv(.AAPCS) f64 {
}
fn negXf2(comptime T: type, a: T) T {
- const Z = std.meta.Int(false, T.bit_count);
+ const Z = std.meta.Int(false, @typeInfo(T).Float.bits);
- const typeWidth = T.bit_count;
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);
diff --git a/lib/std/special/compiler_rt/shift.zig b/lib/std/special/compiler_rt/shift.zig
index 1609cb115c..acb14c969a 100644
--- a/lib/std/special/compiler_rt/shift.zig
+++ b/lib/std/special/compiler_rt/shift.zig
@@ -9,8 +9,9 @@ const Log2Int = std.math.Log2Int;
fn Dwords(comptime T: type, comptime signed_half: bool) type {
return extern union {
- pub const HalfTU = std.meta.Int(false, @divExact(T.bit_count, 2));
- pub const HalfTS = std.meta.Int(true, @divExact(T.bit_count, 2));
+ pub const bits = @divExact(@typeInfo(T).Int.bits, 2);
+ pub const HalfTU = std.meta.Int(false, bits);
+ pub const HalfTS = std.meta.Int(true, bits);
pub const HalfT = if (signed_half) HalfTS else HalfTU;
all: T,
@@ -30,15 +31,15 @@ pub fn ashlXi3(comptime T: type, a: T, b: i32) T {
const input = dwords{ .all = a };
var output: dwords = undefined;
- if (b >= dwords.HalfT.bit_count) {
+ if (b >= dwords.bits) {
output.s.low = 0;
- output.s.high = input.s.low << @intCast(S, b - dwords.HalfT.bit_count);
+ output.s.high = input.s.low << @intCast(S, b - dwords.bits);
} else if (b == 0) {
return a;
} else {
output.s.low = input.s.low << @intCast(S, b);
output.s.high = input.s.high << @intCast(S, b);
- output.s.high |= input.s.low >> @intCast(S, dwords.HalfT.bit_count - b);
+ output.s.high |= input.s.low >> @intCast(S, dwords.bits - b);
}
return output.all;
@@ -53,14 +54,14 @@ pub fn ashrXi3(comptime T: type, a: T, b: i32) T {
const input = dwords{ .all = a };
var output: dwords = undefined;
- if (b >= dwords.HalfT.bit_count) {
- output.s.high = input.s.high >> (dwords.HalfT.bit_count - 1);
- output.s.low = input.s.high >> @intCast(S, b - dwords.HalfT.bit_count);
+ if (b >= dwords.bits) {
+ output.s.high = input.s.high >> (dwords.bits - 1);
+ output.s.low = input.s.high >> @intCast(S, b - dwords.bits);
} else if (b == 0) {
return a;
} else {
output.s.high = input.s.high >> @intCast(S, b);
- output.s.low = input.s.high << @intCast(S, dwords.HalfT.bit_count - b);
+ output.s.low = input.s.high << @intCast(S, dwords.bits - b);
// Avoid sign-extension here
output.s.low |= @bitCast(
dwords.HalfT,
@@ -80,14 +81,14 @@ pub fn lshrXi3(comptime T: type, a: T, b: i32) T {
const input = dwords{ .all = a };
var output: dwords = undefined;
- if (b >= dwords.HalfT.bit_count) {
+ if (b >= dwords.bits) {
output.s.high = 0;
- output.s.low = input.s.high >> @intCast(S, b - dwords.HalfT.bit_count);
+ output.s.low = input.s.high >> @intCast(S, b - dwords.bits);
} else if (b == 0) {
return a;
} else {
output.s.high = input.s.high >> @intCast(S, b);
- output.s.low = input.s.high << @intCast(S, dwords.HalfT.bit_count - b);
+ output.s.low = input.s.high << @intCast(S, dwords.bits - b);
output.s.low |= input.s.low >> @intCast(S, b);
}
diff --git a/lib/std/special/compiler_rt/truncXfYf2.zig b/lib/std/special/compiler_rt/truncXfYf2.zig
index e096e7e4f0..b5823607ea 100644
--- a/lib/std/special/compiler_rt/truncXfYf2.zig
+++ b/lib/std/special/compiler_rt/truncXfYf2.zig
@@ -50,7 +50,7 @@ fn truncXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
// Various constants whose values follow from the type parameters.
// Any reasonable optimizer will fold and propagate all of these.
- const srcBits = src_t.bit_count;
+ const srcBits = @typeInfo(src_t).Float.bits;
const srcExpBits = srcBits - srcSigBits - 1;
const srcInfExp = (1 << srcExpBits) - 1;
const srcExpBias = srcInfExp >> 1;
@@ -65,7 +65,7 @@ fn truncXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
const srcQNaN = 1 << (srcSigBits - 1);
const srcNaNCode = srcQNaN - 1;
- const dstBits = dst_t.bit_count;
+ const dstBits = @typeInfo(dst_t).Float.bits;
const dstExpBits = dstBits - dstSigBits - 1;
const dstInfExp = (1 << dstExpBits) - 1;
const dstExpBias = dstInfExp >> 1;
diff --git a/lib/std/special/compiler_rt/udivmod.zig b/lib/std/special/compiler_rt/udivmod.zig
index 2836f34c85..f8c7e1298b 100644
--- a/lib/std/special/compiler_rt/udivmod.zig
+++ b/lib/std/special/compiler_rt/udivmod.zig
@@ -15,8 +15,10 @@ const high = 1 - low;
pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: ?*DoubleInt) DoubleInt {
@setRuntimeSafety(is_test);
- const SingleInt = @import("std").meta.Int(false, @divExact(DoubleInt.bit_count, 2));
- const SignedDoubleInt = @import("std").meta.Int(true, DoubleInt.bit_count);
+ const double_int_bits = @typeInfo(DoubleInt).Int.bits;
+ const single_int_bits = @divExact(double_int_bits, 2);
+ const SingleInt = @import("std").meta.Int(false, single_int_bits);
+ const SignedDoubleInt = @import("std").meta.Int(true, double_int_bits);
const Log2SingleInt = @import("std").math.Log2Int(SingleInt);
const n = @ptrCast(*const [2]SingleInt, &a).*; // TODO issue #421
@@ -82,21 +84,21 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
// ---
// K 0
sr = @bitCast(c_uint, @as(c_int, @clz(SingleInt, d[high])) - @as(c_int, @clz(SingleInt, n[high])));
- // 0 <= sr <= SingleInt.bit_count - 2 or sr large
- if (sr > SingleInt.bit_count - 2) {
+ // 0 <= sr <= single_int_bits - 2 or sr large
+ if (sr > single_int_bits - 2) {
if (maybe_rem) |rem| {
rem.* = a;
}
return 0;
}
sr += 1;
- // 1 <= sr <= SingleInt.bit_count - 1
- // q.all = a << (DoubleInt.bit_count - sr);
+ // 1 <= sr <= single_int_bits - 1
+ // q.all = a << (double_int_bits - sr);
q[low] = 0;
- q[high] = n[low] << @intCast(Log2SingleInt, SingleInt.bit_count - sr);
+ q[high] = n[low] << @intCast(Log2SingleInt, single_int_bits - sr);
// r.all = a >> sr;
r[high] = n[high] >> @intCast(Log2SingleInt, sr);
- r[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
+ r[low] = (n[high] << @intCast(Log2SingleInt, single_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
} else {
// d[low] != 0
if (d[high] == 0) {
@@ -113,74 +115,74 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
}
sr = @ctz(SingleInt, d[low]);
q[high] = n[high] >> @intCast(Log2SingleInt, sr);
- q[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
+ q[low] = (n[high] << @intCast(Log2SingleInt, single_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
return @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &q[0]).*; // TODO issue #421
}
// K X
// ---
// 0 K
- sr = 1 + SingleInt.bit_count + @as(c_uint, @clz(SingleInt, d[low])) - @as(c_uint, @clz(SingleInt, n[high]));
- // 2 <= sr <= DoubleInt.bit_count - 1
- // q.all = a << (DoubleInt.bit_count - sr);
+ sr = 1 + single_int_bits + @as(c_uint, @clz(SingleInt, d[low])) - @as(c_uint, @clz(SingleInt, n[high]));
+ // 2 <= sr <= double_int_bits - 1
+ // q.all = a << (double_int_bits - sr);
// r.all = a >> sr;
- if (sr == SingleInt.bit_count) {
+ if (sr == single_int_bits) {
q[low] = 0;
q[high] = n[low];
r[high] = 0;
r[low] = n[high];
- } else if (sr < SingleInt.bit_count) {
- // 2 <= sr <= SingleInt.bit_count - 1
+ } else if (sr < single_int_bits) {
+ // 2 <= sr <= single_int_bits - 1
q[low] = 0;
- q[high] = n[low] << @intCast(Log2SingleInt, SingleInt.bit_count - sr);
+ q[high] = n[low] << @intCast(Log2SingleInt, single_int_bits - sr);
r[high] = n[high] >> @intCast(Log2SingleInt, sr);
- r[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
+ r[low] = (n[high] << @intCast(Log2SingleInt, single_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
} else {
- // SingleInt.bit_count + 1 <= sr <= DoubleInt.bit_count - 1
- q[low] = n[low] << @intCast(Log2SingleInt, DoubleInt.bit_count - sr);
- q[high] = (n[high] << @intCast(Log2SingleInt, DoubleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr - SingleInt.bit_count));
+ // single_int_bits + 1 <= sr <= double_int_bits - 1
+ q[low] = n[low] << @intCast(Log2SingleInt, double_int_bits - sr);
+ q[high] = (n[high] << @intCast(Log2SingleInt, double_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr - single_int_bits));
r[high] = 0;
- r[low] = n[high] >> @intCast(Log2SingleInt, sr - SingleInt.bit_count);
+ r[low] = n[high] >> @intCast(Log2SingleInt, sr - single_int_bits);
}
} else {
// K X
// ---
// K K
sr = @bitCast(c_uint, @as(c_int, @clz(SingleInt, d[high])) - @as(c_int, @clz(SingleInt, n[high])));
- // 0 <= sr <= SingleInt.bit_count - 1 or sr large
- if (sr > SingleInt.bit_count - 1) {
+ // 0 <= sr <= single_int_bits - 1 or sr large
+ if (sr > single_int_bits - 1) {
if (maybe_rem) |rem| {
rem.* = a;
}
return 0;
}
sr += 1;
- // 1 <= sr <= SingleInt.bit_count
- // q.all = a << (DoubleInt.bit_count - sr);
+ // 1 <= sr <= single_int_bits
+ // q.all = a << (double_int_bits - sr);
// r.all = a >> sr;
q[low] = 0;
- if (sr == SingleInt.bit_count) {
+ if (sr == single_int_bits) {
q[high] = n[low];
r[high] = 0;
r[low] = n[high];
} else {
r[high] = n[high] >> @intCast(Log2SingleInt, sr);
- r[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
- q[high] = n[low] << @intCast(Log2SingleInt, SingleInt.bit_count - sr);
+ r[low] = (n[high] << @intCast(Log2SingleInt, single_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
+ q[high] = n[low] << @intCast(Log2SingleInt, single_int_bits - sr);
}
}
}
// Not a special case
// q and r are initialized with:
- // q.all = a << (DoubleInt.bit_count - sr);
+ // q.all = a << (double_int_bits - sr);
// r.all = a >> sr;
- // 1 <= sr <= DoubleInt.bit_count - 1
+ // 1 <= sr <= double_int_bits - 1
var carry: u32 = 0;
var r_all: DoubleInt = undefined;
while (sr > 0) : (sr -= 1) {
// r:q = ((r:q) << 1) | carry
- r[high] = (r[high] << 1) | (r[low] >> (SingleInt.bit_count - 1));
- r[low] = (r[low] << 1) | (q[high] >> (SingleInt.bit_count - 1));
- q[high] = (q[high] << 1) | (q[low] >> (SingleInt.bit_count - 1));
+ r[high] = (r[high] << 1) | (r[low] >> (single_int_bits - 1));
+ r[low] = (r[low] << 1) | (q[high] >> (single_int_bits - 1));
+ q[high] = (q[high] << 1) | (q[low] >> (single_int_bits - 1));
q[low] = (q[low] << 1) | carry;
// carry = 0;
// if (r.all >= b)
@@ -189,7 +191,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
// carry = 1;
// }
r_all = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
- const s: SignedDoubleInt = @bitCast(SignedDoubleInt, b -% r_all -% 1) >> (DoubleInt.bit_count - 1);
+ const s: SignedDoubleInt = @bitCast(SignedDoubleInt, b -% r_all -% 1) >> (double_int_bits - 1);
carry = @intCast(u32, s & 1);
r_all -= b & @bitCast(DoubleInt, s);
r = @ptrCast(*[2]SingleInt, &r_all).*; // TODO issue #421
diff --git a/lib/std/special/test_runner.zig b/lib/std/special/test_runner.zig
index 87b011ede8..b9452b79cc 100644
--- a/lib/std/special/test_runner.zig
+++ b/lib/std/special/test_runner.zig
@@ -40,7 +40,7 @@ pub fn main() anyerror!void {
test_node.activate();
progress.refresh();
if (progress.terminal == null) {
- std.debug.print("{}/{} {}...", .{ i + 1, test_fn_list.len, test_fn.name });
+ std.debug.print("{}/{} {}... ", .{ i + 1, test_fn_list.len, test_fn.name });
}
const result = if (test_fn.async_frame_size) |size| switch (io_mode) {
.evented => blk: {
diff --git a/lib/std/start.zig b/lib/std/start.zig
index 8e443a7c77..c65cd08981 100644
--- a/lib/std/start.zig
+++ b/lib/std/start.zig
@@ -67,7 +67,7 @@ fn EfiMain(handle: uefi.Handle, system_table: *uefi.tables.SystemTable) callconv
uefi.handle = handle;
uefi.system_table = system_table;
- switch (@TypeOf(root.main).ReturnType) {
+ switch (@typeInfo(@TypeOf(root.main)).Fn.return_type.?) {
noreturn => {
root.main();
},
@@ -239,7 +239,7 @@ fn callMainAsync(loop: *std.event.Loop) callconv(.Async) u8 {
// This is not marked inline because it is called with @asyncCall when
// there is an event loop.
pub fn callMain() u8 {
- switch (@typeInfo(@TypeOf(root.main).ReturnType)) {
+ switch (@typeInfo(@typeInfo(@TypeOf(root.main)).Fn.return_type.?)) {
.NoReturn => {
root.main();
},
diff --git a/lib/std/std.zig b/lib/std/std.zig
index 330f3c253b..4236b29298 100644
--- a/lib/std/std.zig
+++ b/lib/std/std.zig
@@ -50,6 +50,7 @@ pub const builtin = @import("builtin.zig");
pub const c = @import("c.zig");
pub const cache_hash = @import("cache_hash.zig");
pub const coff = @import("coff.zig");
+pub const compress = @import("compress.zig");
pub const crypto = @import("crypto.zig");
pub const cstr = @import("cstr.zig");
pub const debug = @import("debug.zig");
diff --git a/lib/std/target.zig b/lib/std/target.zig
index deb7c85984..37425a9a29 100644
--- a/lib/std/target.zig
+++ b/lib/std/target.zig
@@ -101,7 +101,7 @@ pub const Target = struct {
/// Latest Windows version that the Zig Standard Library is aware of
pub const latest = WindowsVersion.win10_20h1;
-
+
pub const Range = struct {
min: WindowsVersion,
max: WindowsVersion,
@@ -468,6 +468,7 @@ pub const Target = struct {
/// TODO Get rid of this one.
unknown,
coff,
+ pe,
elf,
macho,
wasm,
@@ -771,6 +772,63 @@ pub const Target = struct {
};
}
+ pub fn toCoffMachine(arch: Arch) std.coff.MachineType {
+ return switch (arch) {
+ .avr => .Unknown,
+ .msp430 => .Unknown,
+ .arc => .Unknown,
+ .arm => .ARM,
+ .armeb => .Unknown,
+ .hexagon => .Unknown,
+ .le32 => .Unknown,
+ .mips => .Unknown,
+ .mipsel => .Unknown,
+ .powerpc => .POWERPC,
+ .r600 => .Unknown,
+ .riscv32 => .RISCV32,
+ .sparc => .Unknown,
+ .sparcel => .Unknown,
+ .tce => .Unknown,
+ .tcele => .Unknown,
+ .thumb => .Thumb,
+ .thumbeb => .Thumb,
+ .i386 => .I386,
+ .xcore => .Unknown,
+ .nvptx => .Unknown,
+ .amdil => .Unknown,
+ .hsail => .Unknown,
+ .spir => .Unknown,
+ .kalimba => .Unknown,
+ .shave => .Unknown,
+ .lanai => .Unknown,
+ .wasm32 => .Unknown,
+ .renderscript32 => .Unknown,
+ .aarch64_32 => .ARM64,
+ .aarch64 => .ARM64,
+ .aarch64_be => .Unknown,
+ .mips64 => .Unknown,
+ .mips64el => .Unknown,
+ .powerpc64 => .Unknown,
+ .powerpc64le => .Unknown,
+ .riscv64 => .RISCV64,
+ .x86_64 => .X64,
+ .nvptx64 => .Unknown,
+ .le64 => .Unknown,
+ .amdil64 => .Unknown,
+ .hsail64 => .Unknown,
+ .spir64 => .Unknown,
+ .wasm64 => .Unknown,
+ .renderscript64 => .Unknown,
+ .amdgcn => .Unknown,
+ .bpfel => .Unknown,
+ .bpfeb => .Unknown,
+ .sparcv9 => .Unknown,
+ .s390x => .Unknown,
+ .ve => .Unknown,
+ .spu_2 => .Unknown,
+ };
+ }
+
pub fn endian(arch: Arch) builtin.Endian {
return switch (arch) {
.avr,
diff --git a/lib/std/thread.zig b/lib/std/thread.zig
index d73907690e..330c425dd6 100644
--- a/lib/std/thread.zig
+++ b/lib/std/thread.zig
@@ -166,7 +166,7 @@ pub const Thread = struct {
fn threadMain(raw_arg: windows.LPVOID) callconv(.C) windows.DWORD {
const arg = if (@sizeOf(Context) == 0) {} else @ptrCast(*Context, @alignCast(@alignOf(Context), raw_arg)).*;
- switch (@typeInfo(@TypeOf(startFn).ReturnType)) {
+ switch (@typeInfo(@typeInfo(@TypeOf(startFn)).Fn.return_type.?)) {
.NoReturn => {
startFn(arg);
},
@@ -227,7 +227,7 @@ pub const Thread = struct {
fn linuxThreadMain(ctx_addr: usize) callconv(.C) u8 {
const arg = if (@sizeOf(Context) == 0) {} else @intToPtr(*const Context, ctx_addr).*;
- switch (@typeInfo(@TypeOf(startFn).ReturnType)) {
+ switch (@typeInfo(@typeInfo(@TypeOf(startFn)).Fn.return_type.?)) {
.NoReturn => {
startFn(arg);
},
@@ -259,7 +259,7 @@ pub const Thread = struct {
fn posixThreadMain(ctx: ?*c_void) callconv(.C) ?*c_void {
const arg = if (@sizeOf(Context) == 0) {} else @ptrCast(*Context, @alignCast(@alignOf(Context), ctx)).*;
- switch (@typeInfo(@TypeOf(startFn).ReturnType)) {
+ switch (@typeInfo(@typeInfo(@TypeOf(startFn)).Fn.return_type.?)) {
.NoReturn => {
startFn(arg);
},
diff --git a/lib/std/zig.zig b/lib/std/zig.zig
index e86a12884f..1dedce4067 100644
--- a/lib/std/zig.zig
+++ b/lib/std/zig.zig
@@ -22,7 +22,7 @@ pub const SrcHash = [16]u8;
/// If it is long, blake3 hash is computed.
pub fn hashSrc(src: []const u8) SrcHash {
var out: SrcHash = undefined;
- if (src.len <= SrcHash.len) {
+ if (src.len <= @typeInfo(SrcHash).Array.len) {
std.mem.copy(u8, &out, src);
std.mem.set(u8, out[src.len..], 0);
} else {
diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig
index 8259af32a6..36ceb400dc 100644
--- a/lib/std/zig/parser_test.zig
+++ b/lib/std/zig/parser_test.zig
@@ -615,6 +615,17 @@ test "zig fmt: infix operator and then multiline string literal" {
);
}
+test "zig fmt: infix operator and then multiline string literal" {
+ try testCanonical(
+ \\const x = "" ++
+ \\ \\ hi0
+ \\ \\ hi1
+ \\ \\ hi2
+ \\;
+ \\
+ );
+}
+
test "zig fmt: C pointers" {
try testCanonical(
\\const Ptr = [*c]i32;
@@ -885,6 +896,28 @@ test "zig fmt: 2nd arg multiline string" {
);
}
+test "zig fmt: 2nd arg multiline string many args" {
+ try testCanonical(
+ \\comptime {
+ \\ cases.addAsm("hello world linux x86_64",
+ \\ \\.text
+ \\ , "Hello, world!\n", "Hello, world!\n");
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: final arg multiline string" {
+ try testCanonical(
+ \\comptime {
+ \\ cases.addAsm("hello world linux x86_64", "Hello, world!\n",
+ \\ \\.text
+ \\ );
+ \\}
+ \\
+ );
+}
+
test "zig fmt: if condition wraps" {
try testTransform(
\\comptime {
@@ -915,6 +948,11 @@ test "zig fmt: if condition wraps" {
\\ var a = if (a) |*f| x: {
\\ break :x &a.b;
\\ } else |err| err;
+ \\ var a = if (cond and
+ \\ cond) |*f|
+ \\ x: {
+ \\ break :x &a.b;
+ \\ } else |err| err;
\\}
,
\\comptime {
@@ -951,6 +989,35 @@ test "zig fmt: if condition wraps" {
\\ var a = if (a) |*f| x: {
\\ break :x &a.b;
\\ } else |err| err;
+ \\ var a = if (cond and
+ \\ cond) |*f|
+ \\ x: {
+ \\ break :x &a.b;
+ \\ } else |err| err;
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: if condition has line break but must not wrap" {
+ try testCanonical(
+ \\comptime {
+ \\ if (self.user_input_options.put(
+ \\ name,
+ \\ UserInputOption{
+ \\ .name = name,
+ \\ .used = false,
+ \\ },
+ \\ ) catch unreachable) |*prev_value| {
+ \\ foo();
+ \\ bar();
+ \\ }
+ \\ if (put(
+ \\ a,
+ \\ b,
+ \\ )) {
+ \\ foo();
+ \\ }
\\}
\\
);
@@ -977,6 +1044,18 @@ test "zig fmt: if condition has line break but must not wrap" {
);
}
+test "zig fmt: function call with multiline argument" {
+ try testCanonical(
+ \\comptime {
+ \\ self.user_input_options.put(name, UserInputOption{
+ \\ .name = name,
+ \\ .used = false,
+ \\ });
+ \\}
+ \\
+ );
+}
+
test "zig fmt: same-line doc comment on variable declaration" {
try testTransform(
\\pub const MAP_ANONYMOUS = 0x1000; /// allocated from memory, swap space
@@ -1228,7 +1307,7 @@ test "zig fmt: array literal with hint" {
\\const a = []u8{
\\ 1, 2,
\\ 3, //
- \\ 4,
+ \\ 4,
\\ 5, 6,
\\ 7,
\\};
@@ -1293,7 +1372,7 @@ test "zig fmt: multiline string parameter in fn call with trailing comma" {
\\ \\ZIG_C_HEADER_FILES {}
\\ \\ZIG_DIA_GUIDS_LIB {}
\\ \\
- \\ ,
+ \\ ,
\\ std.cstr.toSliceConst(c.ZIG_CMAKE_BINARY_DIR),
\\ std.cstr.toSliceConst(c.ZIG_CXX_COMPILER),
\\ std.cstr.toSliceConst(c.ZIG_DIA_GUIDS_LIB),
@@ -2885,20 +2964,20 @@ test "zig fmt: multiline string in array" {
try testCanonical(
\\const Foo = [][]const u8{
\\ \\aaa
- \\,
+ \\ ,
\\ \\bbb
\\};
\\
\\fn bar() void {
\\ const Foo = [][]const u8{
\\ \\aaa
- \\ ,
+ \\ ,
\\ \\bbb
\\ };
\\ const Bar = [][]const u8{ // comment here
\\ \\aaa
\\ \\
- \\ , // and another comment can go here
+ \\ , // and another comment can go here
\\ \\bbb
\\ };
\\}
@@ -3214,6 +3293,34 @@ test "zig fmt: C var args" {
);
}
+test "zig fmt: Only indent multiline string literals in function calls" {
+ try testCanonical(
+ \\test "zig fmt:" {
+ \\ try testTransform(
+ \\ \\const X = struct {
+ \\ \\ foo: i32, bar: i8 };
+ \\ ,
+ \\ \\const X = struct {
+ \\ \\ foo: i32, bar: i8
+ \\ \\};
+ \\ \\
+ \\ );
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: Don't add extra newline after if" {
+ try testCanonical(
+ \\pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) !void {
+ \\ if (cwd().symLink(existing_path, new_path, .{})) {
+ \\ return;
+ \\ }
+ \\}
+ \\
+ );
+}
+
const std = @import("std");
const mem = std.mem;
const warn = std.debug.warn;
@@ -3256,7 +3363,8 @@ fn testParse(source: []const u8, allocator: *mem.Allocator, anything_changed: *b
var buffer = std.ArrayList(u8).init(allocator);
errdefer buffer.deinit();
- anything_changed.* = try std.zig.render(allocator, buffer.outStream(), tree);
+ const outStream = buffer.outStream();
+ anything_changed.* = try std.zig.render(allocator, outStream, tree);
return buffer.toOwnedSlice();
}
fn testTransform(source: []const u8, expected_source: []const u8) !void {
diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig
index 5a1f904be6..237ca07d2b 100644
--- a/lib/std/zig/render.zig
+++ b/lib/std/zig/render.zig
@@ -6,10 +6,12 @@
const std = @import("../std.zig");
const assert = std.debug.assert;
const mem = std.mem;
+const meta = std.meta;
const ast = std.zig.ast;
const Token = std.zig.Token;
const indent_delta = 4;
+const asm_indent_delta = 2;
pub const Error = error{
/// Ran out of memory allocating call stack frames to complete rendering.
@@ -21,70 +23,32 @@ pub fn render(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree) (@Typ
// cannot render an invalid tree
std.debug.assert(tree.errors.len == 0);
- // make a passthrough stream that checks whether something changed
- const MyStream = struct {
- const MyStream = @This();
- const StreamError = @TypeOf(stream).Error;
-
- child_stream: @TypeOf(stream),
- anything_changed: bool,
- source_index: usize,
- source: []const u8,
-
- fn write(self: *MyStream, bytes: []const u8) StreamError!usize {
- if (!self.anything_changed) {
- const end = self.source_index + bytes.len;
- if (end > self.source.len) {
- self.anything_changed = true;
- } else {
- const src_slice = self.source[self.source_index..end];
- self.source_index += bytes.len;
- if (!mem.eql(u8, bytes, src_slice)) {
- self.anything_changed = true;
- }
- }
- }
-
- return self.child_stream.write(bytes);
- }
- };
- var my_stream = MyStream{
- .child_stream = stream,
- .anything_changed = false,
- .source_index = 0,
- .source = tree.source,
- };
- const my_stream_stream: std.io.Writer(*MyStream, MyStream.StreamError, MyStream.write) = .{
- .context = &my_stream,
- };
+ var change_detection_stream = std.io.changeDetectionStream(tree.source, stream);
+ var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, change_detection_stream.writer());
- try renderRoot(allocator, my_stream_stream, tree);
+ try renderRoot(allocator, &auto_indenting_stream, tree);
- if (my_stream.source_index != my_stream.source.len) {
- my_stream.anything_changed = true;
- }
-
- return my_stream.anything_changed;
+ return change_detection_stream.changeDetected();
}
fn renderRoot(
allocator: *mem.Allocator,
- stream: anytype,
+ ais: anytype,
tree: *ast.Tree,
-) (@TypeOf(stream).Error || Error)!void {
+) (@TypeOf(ais.*).Error || Error)!void {
+
// render all the line comments at the beginning of the file
for (tree.token_ids) |token_id, i| {
if (token_id != .LineComment) break;
const token_loc = tree.token_locs[i];
- try stream.print("{}\n", .{mem.trimRight(u8, tree.tokenSliceLoc(token_loc), " ")});
+ try ais.writer().print("{}\n", .{mem.trimRight(u8, tree.tokenSliceLoc(token_loc), " ")});
const next_token = tree.token_locs[i + 1];
const loc = tree.tokenLocationLoc(token_loc.end, next_token);
if (loc.line >= 2) {
- try stream.writeByte('\n');
+ try ais.insertNewline();
}
}
- var start_col: usize = 0;
var decl_i: ast.NodeIndex = 0;
const root_decls = tree.root_node.decls();
@@ -145,7 +109,7 @@ fn renderRoot(
// If there's no next reformatted `decl`, just copy the
// remaining input tokens and bail out.
const start = tree.token_locs[copy_start_token_index].start;
- try copyFixingWhitespace(stream, tree.source[start..]);
+ try copyFixingWhitespace(ais, tree.source[start..]);
return;
}
decl = root_decls[decl_i];
@@ -186,26 +150,25 @@ fn renderRoot(
const start = tree.token_locs[copy_start_token_index].start;
const end = tree.token_locs[copy_end_token_index].start;
- try copyFixingWhitespace(stream, tree.source[start..end]);
+ try copyFixingWhitespace(ais, tree.source[start..end]);
}
- try renderTopLevelDecl(allocator, stream, tree, 0, &start_col, decl);
+ try renderTopLevelDecl(allocator, ais, tree, decl);
decl_i += 1;
if (decl_i >= root_decls.len) return;
- try renderExtraNewline(tree, stream, &start_col, root_decls[decl_i]);
+ try renderExtraNewline(tree, ais, root_decls[decl_i]);
}
}
-fn renderExtraNewline(tree: *ast.Tree, stream: anytype, start_col: *usize, node: *ast.Node) @TypeOf(stream).Error!void {
- return renderExtraNewlineToken(tree, stream, start_col, node.firstToken());
+fn renderExtraNewline(tree: *ast.Tree, ais: anytype, node: *ast.Node) @TypeOf(ais.*).Error!void {
+ return renderExtraNewlineToken(tree, ais, node.firstToken());
}
fn renderExtraNewlineToken(
tree: *ast.Tree,
- stream: anytype,
- start_col: *usize,
+ ais: anytype,
first_token: ast.TokenIndex,
-) @TypeOf(stream).Error!void {
+) @TypeOf(ais.*).Error!void {
var prev_token = first_token;
if (prev_token == 0) return;
var newline_threshold: usize = 2;
@@ -218,28 +181,27 @@ fn renderExtraNewlineToken(
const prev_token_end = tree.token_locs[prev_token - 1].end;
const loc = tree.tokenLocation(prev_token_end, first_token);
if (loc.line >= newline_threshold) {
- try stream.writeByte('\n');
- start_col.* = 0;
+ try ais.insertNewline();
}
}
-fn renderTopLevelDecl(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree, indent: usize, start_col: *usize, decl: *ast.Node) (@TypeOf(stream).Error || Error)!void {
- try renderContainerDecl(allocator, stream, tree, indent, start_col, decl, .Newline);
+fn renderTopLevelDecl(allocator: *mem.Allocator, ais: anytype, tree: *ast.Tree, decl: *ast.Node) (@TypeOf(ais.*).Error || Error)!void {
+ try renderContainerDecl(allocator, ais, tree, decl, .Newline);
}
-fn renderContainerDecl(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree, indent: usize, start_col: *usize, decl: *ast.Node, space: Space) (@TypeOf(stream).Error || Error)!void {
+fn renderContainerDecl(allocator: *mem.Allocator, ais: anytype, tree: *ast.Tree, decl: *ast.Node, space: Space) (@TypeOf(ais.*).Error || Error)!void {
switch (decl.tag) {
.FnProto => {
const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl);
- try renderDocComments(tree, stream, fn_proto, fn_proto.getDocComments(), indent, start_col);
+ try renderDocComments(tree, ais, fn_proto, fn_proto.getDocComments());
if (fn_proto.getBodyNode()) |body_node| {
- try renderExpression(allocator, stream, tree, indent, start_col, decl, .Space);
- try renderExpression(allocator, stream, tree, indent, start_col, body_node, space);
+ try renderExpression(allocator, ais, tree, decl, .Space);
+ try renderExpression(allocator, ais, tree, body_node, space);
} else {
- try renderExpression(allocator, stream, tree, indent, start_col, decl, .None);
- try renderToken(tree, stream, tree.nextToken(decl.lastToken()), indent, start_col, space);
+ try renderExpression(allocator, ais, tree, decl, .None);
+ try renderToken(tree, ais, tree.nextToken(decl.lastToken()), space);
}
},
@@ -247,35 +209,35 @@ fn renderContainerDecl(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tr
const use_decl = @fieldParentPtr(ast.Node.Use, "base", decl);
if (use_decl.visib_token) |visib_token| {
- try renderToken(tree, stream, visib_token, indent, start_col, .Space); // pub
+ try renderToken(tree, ais, visib_token, .Space); // pub
}
- try renderToken(tree, stream, use_decl.use_token, indent, start_col, .Space); // usingnamespace
- try renderExpression(allocator, stream, tree, indent, start_col, use_decl.expr, .None);
- try renderToken(tree, stream, use_decl.semicolon_token, indent, start_col, space); // ;
+ try renderToken(tree, ais, use_decl.use_token, .Space); // usingnamespace
+ try renderExpression(allocator, ais, tree, use_decl.expr, .None);
+ try renderToken(tree, ais, use_decl.semicolon_token, space); // ;
},
.VarDecl => {
const var_decl = @fieldParentPtr(ast.Node.VarDecl, "base", decl);
- try renderDocComments(tree, stream, var_decl, var_decl.getDocComments(), indent, start_col);
- try renderVarDecl(allocator, stream, tree, indent, start_col, var_decl);
+ try renderDocComments(tree, ais, var_decl, var_decl.getDocComments());
+ try renderVarDecl(allocator, ais, tree, var_decl);
},
.TestDecl => {
const test_decl = @fieldParentPtr(ast.Node.TestDecl, "base", decl);
- try renderDocComments(tree, stream, test_decl, test_decl.doc_comments, indent, start_col);
- try renderToken(tree, stream, test_decl.test_token, indent, start_col, .Space);
- try renderExpression(allocator, stream, tree, indent, start_col, test_decl.name, .Space);
- try renderExpression(allocator, stream, tree, indent, start_col, test_decl.body_node, space);
+ try renderDocComments(tree, ais, test_decl, test_decl.doc_comments);
+ try renderToken(tree, ais, test_decl.test_token, .Space);
+ try renderExpression(allocator, ais, tree, test_decl.name, .Space);
+ try renderExpression(allocator, ais, tree, test_decl.body_node, space);
},
.ContainerField => {
const field = @fieldParentPtr(ast.Node.ContainerField, "base", decl);
- try renderDocComments(tree, stream, field, field.doc_comments, indent, start_col);
+ try renderDocComments(tree, ais, field, field.doc_comments);
if (field.comptime_token) |t| {
- try renderToken(tree, stream, t, indent, start_col, .Space); // comptime
+ try renderToken(tree, ais, t, .Space); // comptime
}
const src_has_trailing_comma = blk: {
@@ -288,68 +250,67 @@ fn renderContainerDecl(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tr
const last_token_space: Space = if (src_has_trailing_comma) .None else space;
if (field.type_expr == null and field.value_expr == null) {
- try renderToken(tree, stream, field.name_token, indent, start_col, last_token_space); // name
+ try renderToken(tree, ais, field.name_token, last_token_space); // name
} else if (field.type_expr != null and field.value_expr == null) {
- try renderToken(tree, stream, field.name_token, indent, start_col, .None); // name
- try renderToken(tree, stream, tree.nextToken(field.name_token), indent, start_col, .Space); // :
+ try renderToken(tree, ais, field.name_token, .None); // name
+ try renderToken(tree, ais, tree.nextToken(field.name_token), .Space); // :
if (field.align_expr) |align_value_expr| {
- try renderExpression(allocator, stream, tree, indent, start_col, field.type_expr.?, .Space); // type
+ try renderExpression(allocator, ais, tree, field.type_expr.?, .Space); // type
const lparen_token = tree.prevToken(align_value_expr.firstToken());
const align_kw = tree.prevToken(lparen_token);
const rparen_token = tree.nextToken(align_value_expr.lastToken());
- try renderToken(tree, stream, align_kw, indent, start_col, .None); // align
- try renderToken(tree, stream, lparen_token, indent, start_col, .None); // (
- try renderExpression(allocator, stream, tree, indent, start_col, align_value_expr, .None); // alignment
- try renderToken(tree, stream, rparen_token, indent, start_col, last_token_space); // )
+ try renderToken(tree, ais, align_kw, .None); // align
+ try renderToken(tree, ais, lparen_token, .None); // (
+ try renderExpression(allocator, ais, tree, align_value_expr, .None); // alignment
+ try renderToken(tree, ais, rparen_token, last_token_space); // )
} else {
- try renderExpression(allocator, stream, tree, indent, start_col, field.type_expr.?, last_token_space); // type
+ try renderExpression(allocator, ais, tree, field.type_expr.?, last_token_space); // type
}
} else if (field.type_expr == null and field.value_expr != null) {
- try renderToken(tree, stream, field.name_token, indent, start_col, .Space); // name
- try renderToken(tree, stream, tree.nextToken(field.name_token), indent, start_col, .Space); // =
- try renderExpression(allocator, stream, tree, indent, start_col, field.value_expr.?, last_token_space); // value
+ try renderToken(tree, ais, field.name_token, .Space); // name
+ try renderToken(tree, ais, tree.nextToken(field.name_token), .Space); // =
+ try renderExpression(allocator, ais, tree, field.value_expr.?, last_token_space); // value
} else {
- try renderToken(tree, stream, field.name_token, indent, start_col, .None); // name
- try renderToken(tree, stream, tree.nextToken(field.name_token), indent, start_col, .Space); // :
+ try renderToken(tree, ais, field.name_token, .None); // name
+ try renderToken(tree, ais, tree.nextToken(field.name_token), .Space); // :
if (field.align_expr) |align_value_expr| {
- try renderExpression(allocator, stream, tree, indent, start_col, field.type_expr.?, .Space); // type
+ try renderExpression(allocator, ais, tree, field.type_expr.?, .Space); // type
const lparen_token = tree.prevToken(align_value_expr.firstToken());
const align_kw = tree.prevToken(lparen_token);
const rparen_token = tree.nextToken(align_value_expr.lastToken());
- try renderToken(tree, stream, align_kw, indent, start_col, .None); // align
- try renderToken(tree, stream, lparen_token, indent, start_col, .None); // (
- try renderExpression(allocator, stream, tree, indent, start_col, align_value_expr, .None); // alignment
- try renderToken(tree, stream, rparen_token, indent, start_col, .Space); // )
+ try renderToken(tree, ais, align_kw, .None); // align
+ try renderToken(tree, ais, lparen_token, .None); // (
+ try renderExpression(allocator, ais, tree, align_value_expr, .None); // alignment
+ try renderToken(tree, ais, rparen_token, .Space); // )
} else {
- try renderExpression(allocator, stream, tree, indent, start_col, field.type_expr.?, .Space); // type
+ try renderExpression(allocator, ais, tree, field.type_expr.?, .Space); // type
}
- try renderToken(tree, stream, tree.prevToken(field.value_expr.?.firstToken()), indent, start_col, .Space); // =
- try renderExpression(allocator, stream, tree, indent, start_col, field.value_expr.?, last_token_space); // value
+ try renderToken(tree, ais, tree.prevToken(field.value_expr.?.firstToken()), .Space); // =
+ try renderExpression(allocator, ais, tree, field.value_expr.?, last_token_space); // value
}
if (src_has_trailing_comma) {
const comma = tree.nextToken(field.lastToken());
- try renderToken(tree, stream, comma, indent, start_col, space);
+ try renderToken(tree, ais, comma, space);
}
},
.Comptime => {
assert(!decl.requireSemiColon());
- try renderExpression(allocator, stream, tree, indent, start_col, decl, space);
+ try renderExpression(allocator, ais, tree, decl, space);
},
.DocComment => {
const comment = @fieldParentPtr(ast.Node.DocComment, "base", decl);
const kind = tree.token_ids[comment.first_line];
- try renderToken(tree, stream, comment.first_line, indent, start_col, .Newline);
+ try renderToken(tree, ais, comment.first_line, .Newline);
var tok_i = comment.first_line + 1;
while (true) : (tok_i += 1) {
const tok_id = tree.token_ids[tok_i];
if (tok_id == kind) {
- try stream.writeByteNTimes(' ', indent);
- try renderToken(tree, stream, tok_i, indent, start_col, .Newline);
+ try renderToken(tree, ais, tok_i, .Newline);
} else if (tok_id == .LineComment) {
continue;
} else {
@@ -363,13 +324,11 @@ fn renderContainerDecl(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tr
fn renderExpression(
allocator: *mem.Allocator,
- stream: anytype,
+ ais: anytype,
tree: *ast.Tree,
- indent: usize,
- start_col: *usize,
base: *ast.Node,
space: Space,
-) (@TypeOf(stream).Error || Error)!void {
+) (@TypeOf(ais.*).Error || Error)!void {
switch (base.tag) {
.Identifier,
.IntegerLiteral,
@@ -383,18 +342,18 @@ fn renderExpression(
.UndefinedLiteral,
=> {
const casted_node = base.cast(ast.Node.OneToken).?;
- return renderToken(tree, stream, casted_node.token, indent, start_col, space);
+ return renderToken(tree, ais, casted_node.token, space);
},
.AnyType => {
const any_type = base.castTag(.AnyType).?;
if (mem.eql(u8, tree.tokenSlice(any_type.token), "var")) {
// TODO remove in next release cycle
- try stream.writeAll("anytype");
- if (space == .Comma) try stream.writeAll(",\n");
+ try ais.writer().writeAll("anytype");
+ if (space == .Comma) try ais.writer().writeAll(",\n");
return;
}
- return renderToken(tree, stream, any_type.token, indent, start_col, space);
+ return renderToken(tree, ais, any_type.token, space);
},
.Block, .LabeledBlock => {
@@ -424,65 +383,65 @@ fn renderExpression(
};
if (block.label) |label| {
- try renderToken(tree, stream, label, indent, start_col, Space.None);
- try renderToken(tree, stream, tree.nextToken(label), indent, start_col, Space.Space);
+ try renderToken(tree, ais, label, Space.None);
+ try renderToken(tree, ais, tree.nextToken(label), Space.Space);
}
if (block.statements.len == 0) {
- try renderToken(tree, stream, block.lbrace, indent + indent_delta, start_col, Space.None);
- return renderToken(tree, stream, block.rbrace, indent, start_col, space);
+ ais.pushIndentNextLine();
+ defer ais.popIndent();
+ try renderToken(tree, ais, block.lbrace, Space.None);
} else {
- const block_indent = indent + indent_delta;
- try renderToken(tree, stream, block.lbrace, block_indent, start_col, Space.Newline);
+ ais.pushIndentNextLine();
+ defer ais.popIndent();
+
+ try renderToken(tree, ais, block.lbrace, Space.Newline);
for (block.statements) |statement, i| {
- try stream.writeByteNTimes(' ', block_indent);
- try renderStatement(allocator, stream, tree, block_indent, start_col, statement);
+ try renderStatement(allocator, ais, tree, statement);
if (i + 1 < block.statements.len) {
- try renderExtraNewline(tree, stream, start_col, block.statements[i + 1]);
+ try renderExtraNewline(tree, ais, block.statements[i + 1]);
}
}
-
- try stream.writeByteNTimes(' ', indent);
- return renderToken(tree, stream, block.rbrace, indent, start_col, space);
}
+ return renderToken(tree, ais, block.rbrace, space);
},
.Defer => {
const defer_node = @fieldParentPtr(ast.Node.Defer, "base", base);
- try renderToken(tree, stream, defer_node.defer_token, indent, start_col, Space.Space);
+ try renderToken(tree, ais, defer_node.defer_token, Space.Space);
if (defer_node.payload) |payload| {
- try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space);
+ try renderExpression(allocator, ais, tree, payload, Space.Space);
}
- return renderExpression(allocator, stream, tree, indent, start_col, defer_node.expr, space);
+ return renderExpression(allocator, ais, tree, defer_node.expr, space);
},
.Comptime => {
const comptime_node = @fieldParentPtr(ast.Node.Comptime, "base", base);
- try renderToken(tree, stream, comptime_node.comptime_token, indent, start_col, Space.Space);
- return renderExpression(allocator, stream, tree, indent, start_col, comptime_node.expr, space);
+ try renderToken(tree, ais, comptime_node.comptime_token, Space.Space);
+ return renderExpression(allocator, ais, tree, comptime_node.expr, space);
},
.Nosuspend => {
const nosuspend_node = @fieldParentPtr(ast.Node.Nosuspend, "base", base);
if (mem.eql(u8, tree.tokenSlice(nosuspend_node.nosuspend_token), "noasync")) {
// TODO: remove this
- try stream.writeAll("nosuspend ");
+ try ais.writer().writeAll("nosuspend ");
} else {
- try renderToken(tree, stream, nosuspend_node.nosuspend_token, indent, start_col, Space.Space);
+ try renderToken(tree, ais, nosuspend_node.nosuspend_token, Space.Space);
}
- return renderExpression(allocator, stream, tree, indent, start_col, nosuspend_node.expr, space);
+ return renderExpression(allocator, ais, tree, nosuspend_node.expr, space);
},
.Suspend => {
const suspend_node = @fieldParentPtr(ast.Node.Suspend, "base", base);
if (suspend_node.body) |body| {
- try renderToken(tree, stream, suspend_node.suspend_token, indent, start_col, Space.Space);
- return renderExpression(allocator, stream, tree, indent, start_col, body, space);
+ try renderToken(tree, ais, suspend_node.suspend_token, Space.Space);
+ return renderExpression(allocator, ais, tree, body, space);
} else {
- return renderToken(tree, stream, suspend_node.suspend_token, indent, start_col, space);
+ return renderToken(tree, ais, suspend_node.suspend_token, space);
}
},
@@ -490,26 +449,21 @@ fn renderExpression(
const infix_op_node = @fieldParentPtr(ast.Node.Catch, "base", base);
const op_space = Space.Space;
- try renderExpression(allocator, stream, tree, indent, start_col, infix_op_node.lhs, op_space);
+ try renderExpression(allocator, ais, tree, infix_op_node.lhs, op_space);
const after_op_space = blk: {
- const loc = tree.tokenLocation(tree.token_locs[infix_op_node.op_token].end, tree.nextToken(infix_op_node.op_token));
- break :blk if (loc.line == 0) op_space else Space.Newline;
+ const same_line = tree.tokensOnSameLine(infix_op_node.op_token, tree.nextToken(infix_op_node.op_token));
+ break :blk if (same_line) op_space else Space.Newline;
};
- try renderToken(tree, stream, infix_op_node.op_token, indent, start_col, after_op_space);
- if (after_op_space == Space.Newline and
- tree.token_ids[tree.nextToken(infix_op_node.op_token)] != .MultilineStringLiteralLine)
- {
- try stream.writeByteNTimes(' ', indent + indent_delta);
- start_col.* = indent + indent_delta;
- }
+ try renderToken(tree, ais, infix_op_node.op_token, after_op_space);
if (infix_op_node.payload) |payload| {
- try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space);
+ try renderExpression(allocator, ais, tree, payload, Space.Space);
}
- return renderExpression(allocator, stream, tree, indent, start_col, infix_op_node.rhs, space);
+ ais.pushIndentOneShot();
+ return renderExpression(allocator, ais, tree, infix_op_node.rhs, space);
},
.Add,
@@ -561,22 +515,16 @@ fn renderExpression(
.Period, .ErrorUnion, .Range => Space.None,
else => Space.Space,
};
- try renderExpression(allocator, stream, tree, indent, start_col, infix_op_node.lhs, op_space);
+ try renderExpression(allocator, ais, tree, infix_op_node.lhs, op_space);
const after_op_space = blk: {
const loc = tree.tokenLocation(tree.token_locs[infix_op_node.op_token].end, tree.nextToken(infix_op_node.op_token));
break :blk if (loc.line == 0) op_space else Space.Newline;
};
- try renderToken(tree, stream, infix_op_node.op_token, indent, start_col, after_op_space);
- if (after_op_space == Space.Newline and
- tree.token_ids[tree.nextToken(infix_op_node.op_token)] != .MultilineStringLiteralLine)
- {
- try stream.writeByteNTimes(' ', indent + indent_delta);
- start_col.* = indent + indent_delta;
- }
-
- return renderExpression(allocator, stream, tree, indent, start_col, infix_op_node.rhs, space);
+ try renderToken(tree, ais, infix_op_node.op_token, after_op_space);
+ ais.pushIndentOneShot();
+ return renderExpression(allocator, ais, tree, infix_op_node.rhs, space);
},
.BitNot,
@@ -587,8 +535,8 @@ fn renderExpression(
.AddressOf,
=> {
const casted_node = @fieldParentPtr(ast.Node.SimplePrefixOp, "base", base);
- try renderToken(tree, stream, casted_node.op_token, indent, start_col, Space.None);
- return renderExpression(allocator, stream, tree, indent, start_col, casted_node.rhs, space);
+ try renderToken(tree, ais, casted_node.op_token, Space.None);
+ return renderExpression(allocator, ais, tree, casted_node.rhs, space);
},
.Try,
@@ -596,18 +544,16 @@ fn renderExpression(
.Await,
=> {
const casted_node = @fieldParentPtr(ast.Node.SimplePrefixOp, "base", base);
- try renderToken(tree, stream, casted_node.op_token, indent, start_col, Space.Space);
- return renderExpression(allocator, stream, tree, indent, start_col, casted_node.rhs, space);
+ try renderToken(tree, ais, casted_node.op_token, Space.Space);
+ return renderExpression(allocator, ais, tree, casted_node.rhs, space);
},
.ArrayType => {
const array_type = @fieldParentPtr(ast.Node.ArrayType, "base", base);
return renderArrayType(
allocator,
- stream,
+ ais,
tree,
- indent,
- start_col,
array_type.op_token,
array_type.rhs,
array_type.len_expr,
@@ -619,10 +565,8 @@ fn renderExpression(
const array_type = @fieldParentPtr(ast.Node.ArrayTypeSentinel, "base", base);
return renderArrayType(
allocator,
- stream,
+ ais,
tree,
- indent,
- start_col,
array_type.op_token,
array_type.rhs,
array_type.len_expr,
@@ -635,111 +579,111 @@ fn renderExpression(
const ptr_type = @fieldParentPtr(ast.Node.PtrType, "base", base);
const op_tok_id = tree.token_ids[ptr_type.op_token];
switch (op_tok_id) {
- .Asterisk, .AsteriskAsterisk => try stream.writeByte('*'),
+ .Asterisk, .AsteriskAsterisk => try ais.writer().writeByte('*'),
.LBracket => if (tree.token_ids[ptr_type.op_token + 2] == .Identifier)
- try stream.writeAll("[*c")
+ try ais.writer().writeAll("[*c")
else
- try stream.writeAll("[*"),
+ try ais.writer().writeAll("[*"),
else => unreachable,
}
if (ptr_type.ptr_info.sentinel) |sentinel| {
const colon_token = tree.prevToken(sentinel.firstToken());
- try renderToken(tree, stream, colon_token, indent, start_col, Space.None); // :
+ try renderToken(tree, ais, colon_token, Space.None); // :
const sentinel_space = switch (op_tok_id) {
.LBracket => Space.None,
else => Space.Space,
};
- try renderExpression(allocator, stream, tree, indent, start_col, sentinel, sentinel_space);
+ try renderExpression(allocator, ais, tree, sentinel, sentinel_space);
}
switch (op_tok_id) {
.Asterisk, .AsteriskAsterisk => {},
- .LBracket => try stream.writeByte(']'),
+ .LBracket => try ais.writer().writeByte(']'),
else => unreachable,
}
if (ptr_type.ptr_info.allowzero_token) |allowzero_token| {
- try renderToken(tree, stream, allowzero_token, indent, start_col, Space.Space); // allowzero
+ try renderToken(tree, ais, allowzero_token, Space.Space); // allowzero
}
if (ptr_type.ptr_info.align_info) |align_info| {
const lparen_token = tree.prevToken(align_info.node.firstToken());
const align_token = tree.prevToken(lparen_token);
- try renderToken(tree, stream, align_token, indent, start_col, Space.None); // align
- try renderToken(tree, stream, lparen_token, indent, start_col, Space.None); // (
+ try renderToken(tree, ais, align_token, Space.None); // align
+ try renderToken(tree, ais, lparen_token, Space.None); // (
- try renderExpression(allocator, stream, tree, indent, start_col, align_info.node, Space.None);
+ try renderExpression(allocator, ais, tree, align_info.node, Space.None);
if (align_info.bit_range) |bit_range| {
const colon1 = tree.prevToken(bit_range.start.firstToken());
const colon2 = tree.prevToken(bit_range.end.firstToken());
- try renderToken(tree, stream, colon1, indent, start_col, Space.None); // :
- try renderExpression(allocator, stream, tree, indent, start_col, bit_range.start, Space.None);
- try renderToken(tree, stream, colon2, indent, start_col, Space.None); // :
- try renderExpression(allocator, stream, tree, indent, start_col, bit_range.end, Space.None);
+ try renderToken(tree, ais, colon1, Space.None); // :
+ try renderExpression(allocator, ais, tree, bit_range.start, Space.None);
+ try renderToken(tree, ais, colon2, Space.None); // :
+ try renderExpression(allocator, ais, tree, bit_range.end, Space.None);
const rparen_token = tree.nextToken(bit_range.end.lastToken());
- try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // )
+ try renderToken(tree, ais, rparen_token, Space.Space); // )
} else {
const rparen_token = tree.nextToken(align_info.node.lastToken());
- try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // )
+ try renderToken(tree, ais, rparen_token, Space.Space); // )
}
}
if (ptr_type.ptr_info.const_token) |const_token| {
- try renderToken(tree, stream, const_token, indent, start_col, Space.Space); // const
+ try renderToken(tree, ais, const_token, Space.Space); // const
}
if (ptr_type.ptr_info.volatile_token) |volatile_token| {
- try renderToken(tree, stream, volatile_token, indent, start_col, Space.Space); // volatile
+ try renderToken(tree, ais, volatile_token, Space.Space); // volatile
}
- return renderExpression(allocator, stream, tree, indent, start_col, ptr_type.rhs, space);
+ return renderExpression(allocator, ais, tree, ptr_type.rhs, space);
},
.SliceType => {
const slice_type = @fieldParentPtr(ast.Node.SliceType, "base", base);
- try renderToken(tree, stream, slice_type.op_token, indent, start_col, Space.None); // [
+ try renderToken(tree, ais, slice_type.op_token, Space.None); // [
if (slice_type.ptr_info.sentinel) |sentinel| {
const colon_token = tree.prevToken(sentinel.firstToken());
- try renderToken(tree, stream, colon_token, indent, start_col, Space.None); // :
- try renderExpression(allocator, stream, tree, indent, start_col, sentinel, Space.None);
- try renderToken(tree, stream, tree.nextToken(sentinel.lastToken()), indent, start_col, Space.None); // ]
+ try renderToken(tree, ais, colon_token, Space.None); // :
+ try renderExpression(allocator, ais, tree, sentinel, Space.None);
+ try renderToken(tree, ais, tree.nextToken(sentinel.lastToken()), Space.None); // ]
} else {
- try renderToken(tree, stream, tree.nextToken(slice_type.op_token), indent, start_col, Space.None); // ]
+ try renderToken(tree, ais, tree.nextToken(slice_type.op_token), Space.None); // ]
}
if (slice_type.ptr_info.allowzero_token) |allowzero_token| {
- try renderToken(tree, stream, allowzero_token, indent, start_col, Space.Space); // allowzero
+ try renderToken(tree, ais, allowzero_token, Space.Space); // allowzero
}
if (slice_type.ptr_info.align_info) |align_info| {
const lparen_token = tree.prevToken(align_info.node.firstToken());
const align_token = tree.prevToken(lparen_token);
- try renderToken(tree, stream, align_token, indent, start_col, Space.None); // align
- try renderToken(tree, stream, lparen_token, indent, start_col, Space.None); // (
+ try renderToken(tree, ais, align_token, Space.None); // align
+ try renderToken(tree, ais, lparen_token, Space.None); // (
- try renderExpression(allocator, stream, tree, indent, start_col, align_info.node, Space.None);
+ try renderExpression(allocator, ais, tree, align_info.node, Space.None);
if (align_info.bit_range) |bit_range| {
const colon1 = tree.prevToken(bit_range.start.firstToken());
const colon2 = tree.prevToken(bit_range.end.firstToken());
- try renderToken(tree, stream, colon1, indent, start_col, Space.None); // :
- try renderExpression(allocator, stream, tree, indent, start_col, bit_range.start, Space.None);
- try renderToken(tree, stream, colon2, indent, start_col, Space.None); // :
- try renderExpression(allocator, stream, tree, indent, start_col, bit_range.end, Space.None);
+ try renderToken(tree, ais, colon1, Space.None); // :
+ try renderExpression(allocator, ais, tree, bit_range.start, Space.None);
+ try renderToken(tree, ais, colon2, Space.None); // :
+ try renderExpression(allocator, ais, tree, bit_range.end, Space.None);
const rparen_token = tree.nextToken(bit_range.end.lastToken());
- try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // )
+ try renderToken(tree, ais, rparen_token, Space.Space); // )
} else {
const rparen_token = tree.nextToken(align_info.node.lastToken());
- try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // )
+ try renderToken(tree, ais, rparen_token, Space.Space); // )
}
}
if (slice_type.ptr_info.const_token) |const_token| {
- try renderToken(tree, stream, const_token, indent, start_col, Space.Space);
+ try renderToken(tree, ais, const_token, Space.Space);
}
if (slice_type.ptr_info.volatile_token) |volatile_token| {
- try renderToken(tree, stream, volatile_token, indent, start_col, Space.Space);
+ try renderToken(tree, ais, volatile_token, Space.Space);
}
- return renderExpression(allocator, stream, tree, indent, start_col, slice_type.rhs, space);
+ return renderExpression(allocator, ais, tree, slice_type.rhs, space);
},
.ArrayInitializer, .ArrayInitializerDot => {
@@ -768,27 +712,33 @@ fn renderExpression(
if (exprs.len == 0) {
switch (lhs) {
- .dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None),
- .node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None),
+ .dot => |dot| try renderToken(tree, ais, dot, Space.None),
+ .node => |node| try renderExpression(allocator, ais, tree, node, Space.None),
+ }
+
+ {
+ ais.pushIndent();
+ defer ais.popIndent();
+ try renderToken(tree, ais, lbrace, Space.None);
}
- try renderToken(tree, stream, lbrace, indent, start_col, Space.None);
- return renderToken(tree, stream, rtoken, indent, start_col, space);
- }
- if (exprs.len == 1 and tree.token_ids[exprs[0].lastToken() + 1] == .RBrace) {
+ return renderToken(tree, ais, rtoken, space);
+ }
+ if (exprs.len == 1 and tree.token_ids[exprs[0].*.lastToken() + 1] == .RBrace) {
const expr = exprs[0];
+
switch (lhs) {
- .dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None),
- .node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None),
+ .dot => |dot| try renderToken(tree, ais, dot, Space.None),
+ .node => |node| try renderExpression(allocator, ais, tree, node, Space.None),
}
- try renderToken(tree, stream, lbrace, indent, start_col, Space.None);
- try renderExpression(allocator, stream, tree, indent, start_col, expr, Space.None);
- return renderToken(tree, stream, rtoken, indent, start_col, space);
+ try renderToken(tree, ais, lbrace, Space.None);
+ try renderExpression(allocator, ais, tree, expr, Space.None);
+ return renderToken(tree, ais, rtoken, space);
}
switch (lhs) {
- .dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None),
- .node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None),
+ .dot => |dot| try renderToken(tree, ais, dot, Space.None),
+ .node => |node| try renderExpression(allocator, ais, tree, node, Space.None),
}
// scan to find row size
@@ -830,79 +780,70 @@ fn renderExpression(
var expr_widths = widths[0 .. widths.len - row_size];
var column_widths = widths[widths.len - row_size ..];
- // Null stream for counting the printed length of each expression
+ // Null ais for counting the printed length of each expression
var counting_stream = std.io.countingOutStream(std.io.null_out_stream);
+ var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, counting_stream.writer());
for (exprs) |expr, i| {
counting_stream.bytes_written = 0;
- var dummy_col: usize = 0;
- try renderExpression(allocator, counting_stream.outStream(), tree, indent, &dummy_col, expr, Space.None);
+ try renderExpression(allocator, &auto_indenting_stream, tree, expr, Space.None);
const width = @intCast(usize, counting_stream.bytes_written);
const col = i % row_size;
column_widths[col] = std.math.max(column_widths[col], width);
expr_widths[i] = width;
}
- var new_indent = indent + indent_delta;
+ {
+ ais.pushIndentNextLine();
+ defer ais.popIndent();
+ try renderToken(tree, ais, lbrace, Space.Newline);
- if (tree.token_ids[tree.nextToken(lbrace)] != .MultilineStringLiteralLine) {
- try renderToken(tree, stream, lbrace, new_indent, start_col, Space.Newline);
- try stream.writeByteNTimes(' ', new_indent);
- } else {
- new_indent -= indent_delta;
- try renderToken(tree, stream, lbrace, new_indent, start_col, Space.None);
- }
+ var col: usize = 1;
+ for (exprs) |expr, i| {
+ if (i + 1 < exprs.len) {
+ const next_expr = exprs[i + 1];
+ try renderExpression(allocator, ais, tree, expr, Space.None);
- var col: usize = 1;
- for (exprs) |expr, i| {
- if (i + 1 < exprs.len) {
- const next_expr = exprs[i + 1];
- try renderExpression(allocator, stream, tree, new_indent, start_col, expr, Space.None);
+ const comma = tree.nextToken(expr.*.lastToken());
- const comma = tree.nextToken(expr.lastToken());
+ if (col != row_size) {
+ try renderToken(tree, ais, comma, Space.Space); // ,
- if (col != row_size) {
- try renderToken(tree, stream, comma, new_indent, start_col, Space.Space); // ,
+ const padding = column_widths[i % row_size] - expr_widths[i];
+ try ais.writer().writeByteNTimes(' ', padding);
- const padding = column_widths[i % row_size] - expr_widths[i];
- try stream.writeByteNTimes(' ', padding);
+ col += 1;
+ continue;
+ }
+ col = 1;
- col += 1;
- continue;
- }
- col = 1;
+ if (tree.token_ids[tree.nextToken(comma)] != .MultilineStringLiteralLine) {
+ try renderToken(tree, ais, comma, Space.Newline); // ,
+ } else {
+ try renderToken(tree, ais, comma, Space.None); // ,
+ }
- if (tree.token_ids[tree.nextToken(comma)] != .MultilineStringLiteralLine) {
- try renderToken(tree, stream, comma, new_indent, start_col, Space.Newline); // ,
+ try renderExtraNewline(tree, ais, next_expr);
} else {
- try renderToken(tree, stream, comma, new_indent, start_col, Space.None); // ,
- }
-
- try renderExtraNewline(tree, stream, start_col, next_expr);
- if (next_expr.tag != .MultilineStringLiteral) {
- try stream.writeByteNTimes(' ', new_indent);
+ try renderExpression(allocator, ais, tree, expr, Space.Comma); // ,
}
- } else {
- try renderExpression(allocator, stream, tree, new_indent, start_col, expr, Space.Comma); // ,
}
}
- if (exprs[exprs.len - 1].tag != .MultilineStringLiteral) {
- try stream.writeByteNTimes(' ', indent);
- }
- return renderToken(tree, stream, rtoken, indent, start_col, space);
+ return renderToken(tree, ais, rtoken, space);
} else {
- try renderToken(tree, stream, lbrace, indent, start_col, Space.Space);
+ try renderToken(tree, ais, lbrace, Space.Space);
for (exprs) |expr, i| {
if (i + 1 < exprs.len) {
- try renderExpression(allocator, stream, tree, indent, start_col, expr, Space.None);
- const comma = tree.nextToken(expr.lastToken());
- try renderToken(tree, stream, comma, indent, start_col, Space.Space); // ,
+ const next_expr = exprs[i + 1];
+ try renderExpression(allocator, ais, tree, expr, Space.None);
+ const comma = tree.nextToken(expr.*.lastToken());
+ try renderToken(tree, ais, comma, Space.Space); // ,
} else {
- try renderExpression(allocator, stream, tree, indent, start_col, expr, Space.Space);
+ try renderExpression(allocator, ais, tree, expr, Space.Space);
}
}
- return renderToken(tree, stream, rtoken, indent, start_col, space);
+ return renderToken(tree, ais, rtoken, space);
}
},
@@ -932,11 +873,17 @@ fn renderExpression(
if (field_inits.len == 0) {
switch (lhs) {
- .dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None),
- .node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None),
+ .dot => |dot| try renderToken(tree, ais, dot, Space.None),
+ .node => |node| try renderExpression(allocator, ais, tree, node, Space.None),
}
- try renderToken(tree, stream, lbrace, indent + indent_delta, start_col, Space.None);
- return renderToken(tree, stream, rtoken, indent, start_col, space);
+
+ {
+ ais.pushIndentNextLine();
+ defer ais.popIndent();
+ try renderToken(tree, ais, lbrace, Space.None);
+ }
+
+ return renderToken(tree, ais, rtoken, space);
}
const src_has_trailing_comma = blk: {
@@ -952,9 +899,10 @@ fn renderExpression(
const expr_outputs_one_line = blk: {
// render field expressions until a LF is found
for (field_inits) |field_init| {
- var find_stream = FindByteOutStream.init('\n');
- var dummy_col: usize = 0;
- try renderExpression(allocator, find_stream.outStream(), tree, 0, &dummy_col, field_init, Space.None);
+ var find_stream = std.io.findByteOutStream('\n', std.io.null_out_stream);
+ var auto_indenting_stream = std.io.autoIndentingStream(indent_delta, find_stream.writer());
+
+ try renderExpression(allocator, &auto_indenting_stream, tree, field_init, Space.None);
if (find_stream.byte_found) break :blk false;
}
break :blk true;
@@ -967,7 +915,6 @@ fn renderExpression(
.StructInitializer,
.StructInitializerDot,
=> break :blk,
-
else => {},
}
@@ -977,76 +924,78 @@ fn renderExpression(
}
switch (lhs) {
- .dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None),
- .node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None),
+ .dot => |dot| try renderToken(tree, ais, dot, Space.None),
+ .node => |node| try renderExpression(allocator, ais, tree, node, Space.None),
}
- try renderToken(tree, stream, lbrace, indent, start_col, Space.Space);
- try renderExpression(allocator, stream, tree, indent, start_col, &field_init.base, Space.Space);
- return renderToken(tree, stream, rtoken, indent, start_col, space);
+ try renderToken(tree, ais, lbrace, Space.Space);
+ try renderExpression(allocator, ais, tree, &field_init.base, Space.Space);
+ return renderToken(tree, ais, rtoken, space);
}
if (!src_has_trailing_comma and src_same_line and expr_outputs_one_line) {
// render all on one line, no trailing comma
switch (lhs) {
- .dot => |dot| try renderToken(tree, stream, dot, indent, start_col, Space.None),
- .node => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None),
+ .dot => |dot| try renderToken(tree, ais, dot, Space.None),
+ .node => |node| try renderExpression(allocator, ais, tree, node, Space.None),
}
- try renderToken(tree, stream, lbrace, indent, start_col, Space.Space);
+ try renderToken(tree, ais, lbrace, Space.Space);
for (field_inits) |field_init, i| {
if (i + 1 < field_inits.len) {
- try renderExpression(allocator, stream, tree, indent, start_col, field_init, Space.None);
+ try renderExpression(allocator, ais, tree, field_init, Space.None);
const comma = tree.nextToken(field_init.lastToken());
- try renderToken(tree, stream, comma, indent, start_col, Space.Space);
+ try renderToken(tree, ais, comma, Space.Space);
} else {
- try renderExpression(allocator, stream, tree, indent, start_col, field_init, Space.Space);
+ try renderExpression(allocator, ais, tree, field_init, Space.Space);
}
}
- return renderToken(tree, stream, rtoken, indent, start_col, space);
+ return renderToken(tree, ais, rtoken, space);
}
- const new_indent = indent + indent_delta;
+ {
+ switch (lhs) {
+ .dot => |dot| try renderToken(tree, ais, dot, Space.None),
+ .node => |node| try renderExpression(allocator, ais, tree, node, Space.None),
+ }
- switch (lhs) {
- .dot => |dot| try renderToken(tree, stream, dot, new_indent, start_col, Space.None),
- .node => |node| try renderExpression(allocator, stream, tree, new_indent, start_col, node, Space.None),
- }
- try renderToken(tree, stream, lbrace, new_indent, start_col, Space.Newline);
+ ais.pushIndentNextLine();
+ defer ais.popIndent();
- for (field_inits) |field_init, i| {
- try stream.writeByteNTimes(' ', new_indent);
+ try renderToken(tree, ais, lbrace, Space.Newline);
- if (i + 1 < field_inits.len) {
- try renderExpression(allocator, stream, tree, new_indent, start_col, field_init, Space.None);
+ for (field_inits) |field_init, i| {
+ if (i + 1 < field_inits.len) {
+ const next_field_init = field_inits[i + 1];
+ try renderExpression(allocator, ais, tree, field_init, Space.None);
- const comma = tree.nextToken(field_init.lastToken());
- try renderToken(tree, stream, comma, new_indent, start_col, Space.Newline);
+ const comma = tree.nextToken(field_init.lastToken());
+ try renderToken(tree, ais, comma, Space.Newline);
- try renderExtraNewline(tree, stream, start_col, field_inits[i + 1]);
- } else {
- try renderExpression(allocator, stream, tree, new_indent, start_col, field_init, Space.Comma);
+ try renderExtraNewline(tree, ais, next_field_init);
+ } else {
+ try renderExpression(allocator, ais, tree, field_init, Space.Comma);
+ }
}
}
- try stream.writeByteNTimes(' ', indent);
- return renderToken(tree, stream, rtoken, indent, start_col, space);
+ return renderToken(tree, ais, rtoken, space);
},
.Call => {
const call = @fieldParentPtr(ast.Node.Call, "base", base);
if (call.async_token) |async_token| {
- try renderToken(tree, stream, async_token, indent, start_col, Space.Space);
+ try renderToken(tree, ais, async_token, Space.Space);
}
- try renderExpression(allocator, stream, tree, indent, start_col, call.lhs, Space.None);
+ try renderExpression(allocator, ais, tree, call.lhs, Space.None);
const lparen = tree.nextToken(call.lhs.lastToken());
if (call.params_len == 0) {
- try renderToken(tree, stream, lparen, indent, start_col, Space.None);
- return renderToken(tree, stream, call.rtoken, indent, start_col, space);
+ try renderToken(tree, ais, lparen, Space.None);
+ return renderToken(tree, ais, call.rtoken, space);
}
const src_has_trailing_comma = blk: {
@@ -1055,43 +1004,41 @@ fn renderExpression(
};
if (src_has_trailing_comma) {
- const new_indent = indent + indent_delta;
- try renderToken(tree, stream, lparen, new_indent, start_col, Space.Newline);
+ try renderToken(tree, ais, lparen, Space.Newline);
const params = call.params();
for (params) |param_node, i| {
- const param_node_new_indent = if (param_node.tag == .MultilineStringLiteral) blk: {
- break :blk indent;
- } else blk: {
- try stream.writeByteNTimes(' ', new_indent);
- break :blk new_indent;
- };
+ ais.pushIndent();
+ defer ais.popIndent();
if (i + 1 < params.len) {
- try renderExpression(allocator, stream, tree, param_node_new_indent, start_col, param_node, Space.None);
+ const next_node = params[i + 1];
+ try renderExpression(allocator, ais, tree, param_node, Space.None);
const comma = tree.nextToken(param_node.lastToken());
- try renderToken(tree, stream, comma, new_indent, start_col, Space.Newline); // ,
- try renderExtraNewline(tree, stream, start_col, params[i + 1]);
+ try renderToken(tree, ais, comma, Space.Newline); // ,
+ try renderExtraNewline(tree, ais, next_node);
} else {
- try renderExpression(allocator, stream, tree, param_node_new_indent, start_col, param_node, Space.Comma);
- try stream.writeByteNTimes(' ', indent);
- return renderToken(tree, stream, call.rtoken, indent, start_col, space);
+ try renderExpression(allocator, ais, tree, param_node, Space.Comma);
}
}
+ return renderToken(tree, ais, call.rtoken, space);
}
- try renderToken(tree, stream, lparen, indent, start_col, Space.None); // (
+ try renderToken(tree, ais, lparen, Space.None); // (
const params = call.params();
for (params) |param_node, i| {
- try renderExpression(allocator, stream, tree, indent, start_col, param_node, Space.None);
+ if (param_node.*.tag == .MultilineStringLiteral) ais.pushIndentOneShot();
+
+ try renderExpression(allocator, ais, tree, param_node, Space.None);
if (i + 1 < params.len) {
+ const next_param = params[i + 1];
const comma = tree.nextToken(param_node.lastToken());
- try renderToken(tree, stream, comma, indent, start_col, Space.Space);
+ try renderToken(tree, ais, comma, Space.Space);
}
}
- return renderToken(tree, stream, call.rtoken, indent, start_col, space);
+ return renderToken(tree, ais, call.rtoken, space);
},
.ArrayAccess => {
@@ -1100,26 +1047,25 @@ fn renderExpression(
const lbracket = tree.nextToken(suffix_op.lhs.lastToken());
const rbracket = tree.nextToken(suffix_op.index_expr.lastToken());
- try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
- try renderToken(tree, stream, lbracket, indent, start_col, Space.None); // [
+ try renderExpression(allocator, ais, tree, suffix_op.lhs, Space.None);
+ try renderToken(tree, ais, lbracket, Space.None); // [
const starts_with_comment = tree.token_ids[lbracket + 1] == .LineComment;
const ends_with_comment = tree.token_ids[rbracket - 1] == .LineComment;
- const new_indent = if (ends_with_comment) indent + indent_delta else indent;
- const new_space = if (ends_with_comment) Space.Newline else Space.None;
- try renderExpression(allocator, stream, tree, new_indent, start_col, suffix_op.index_expr, new_space);
- if (starts_with_comment) {
- try stream.writeByte('\n');
- }
- if (ends_with_comment or starts_with_comment) {
- try stream.writeByteNTimes(' ', indent);
+ {
+ const new_space = if (ends_with_comment) Space.Newline else Space.None;
+
+ ais.pushIndent();
+ defer ais.popIndent();
+ try renderExpression(allocator, ais, tree, suffix_op.index_expr, new_space);
}
- return renderToken(tree, stream, rbracket, indent, start_col, space); // ]
+ if (starts_with_comment) try ais.maybeInsertNewline();
+ return renderToken(tree, ais, rbracket, space); // ]
},
+
.Slice => {
const suffix_op = base.castTag(.Slice).?;
-
- try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
+ try renderExpression(allocator, ais, tree, suffix_op.lhs, Space.None);
const lbracket = tree.prevToken(suffix_op.start.firstToken());
const dotdot = tree.nextToken(suffix_op.start.lastToken());
@@ -1129,32 +1075,33 @@ fn renderExpression(
const after_start_space = if (after_start_space_bool) Space.Space else Space.None;
const after_op_space = if (suffix_op.end != null) after_start_space else Space.None;
- try renderToken(tree, stream, lbracket, indent, start_col, Space.None); // [
- try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.start, after_start_space);
- try renderToken(tree, stream, dotdot, indent, start_col, after_op_space); // ..
+ try renderToken(tree, ais, lbracket, Space.None); // [
+ try renderExpression(allocator, ais, tree, suffix_op.start, after_start_space);
+ try renderToken(tree, ais, dotdot, after_op_space); // ..
if (suffix_op.end) |end| {
const after_end_space = if (suffix_op.sentinel != null) Space.Space else Space.None;
- try renderExpression(allocator, stream, tree, indent, start_col, end, after_end_space);
+ try renderExpression(allocator, ais, tree, end, after_end_space);
}
if (suffix_op.sentinel) |sentinel| {
const colon = tree.prevToken(sentinel.firstToken());
- try renderToken(tree, stream, colon, indent, start_col, Space.None); // :
- try renderExpression(allocator, stream, tree, indent, start_col, sentinel, Space.None);
+ try renderToken(tree, ais, colon, Space.None); // :
+ try renderExpression(allocator, ais, tree, sentinel, Space.None);
}
- return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); // ]
+ return renderToken(tree, ais, suffix_op.rtoken, space); // ]
},
+
.Deref => {
const suffix_op = base.castTag(.Deref).?;
- try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
- return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); // .*
+ try renderExpression(allocator, ais, tree, suffix_op.lhs, Space.None);
+ return renderToken(tree, ais, suffix_op.rtoken, space); // .*
},
.UnwrapOptional => {
const suffix_op = base.castTag(.UnwrapOptional).?;
- try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
- try renderToken(tree, stream, tree.prevToken(suffix_op.rtoken), indent, start_col, Space.None); // .
- return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); // ?
+ try renderExpression(allocator, ais, tree, suffix_op.lhs, Space.None);
+ try renderToken(tree, ais, tree.prevToken(suffix_op.rtoken), Space.None); // .
+ return renderToken(tree, ais, suffix_op.rtoken, space); // ?
},
.Break => {
@@ -1163,145 +1110,152 @@ fn renderExpression(
const maybe_label = flow_expr.getLabel();
if (maybe_label == null and maybe_rhs == null) {
- return renderToken(tree, stream, flow_expr.ltoken, indent, start_col, space); // break
+ return renderToken(tree, ais, flow_expr.ltoken, space); // break
}
- try renderToken(tree, stream, flow_expr.ltoken, indent, start_col, Space.Space); // break
+ try renderToken(tree, ais, flow_expr.ltoken, Space.Space); // break
if (maybe_label) |label| {
const colon = tree.nextToken(flow_expr.ltoken);
- try renderToken(tree, stream, colon, indent, start_col, Space.None); // :
+ try renderToken(tree, ais, colon, Space.None); // :
if (maybe_rhs == null) {
- return renderToken(tree, stream, label, indent, start_col, space); // label
+ return renderToken(tree, ais, label, space); // label
}
- try renderToken(tree, stream, label, indent, start_col, Space.Space); // label
+ try renderToken(tree, ais, label, Space.Space); // label
}
- return renderExpression(allocator, stream, tree, indent, start_col, maybe_rhs.?, space);
+ return renderExpression(allocator, ais, tree, maybe_rhs.?, space);
},
.Continue => {
const flow_expr = base.castTag(.Continue).?;
if (flow_expr.getLabel()) |label| {
- try renderToken(tree, stream, flow_expr.ltoken, indent, start_col, Space.Space); // continue
+ try renderToken(tree, ais, flow_expr.ltoken, Space.Space); // continue
const colon = tree.nextToken(flow_expr.ltoken);
- try renderToken(tree, stream, colon, indent, start_col, Space.None); // :
- return renderToken(tree, stream, label, indent, start_col, space); // label
+ try renderToken(tree, ais, colon, Space.None); // :
+ return renderToken(tree, ais, label, space); // label
} else {
- return renderToken(tree, stream, flow_expr.ltoken, indent, start_col, space); // continue
+ return renderToken(tree, ais, flow_expr.ltoken, space); // continue
}
},
.Return => {
const flow_expr = base.castTag(.Return).?;
if (flow_expr.getRHS()) |rhs| {
- try renderToken(tree, stream, flow_expr.ltoken, indent, start_col, Space.Space);
- return renderExpression(allocator, stream, tree, indent, start_col, rhs, space);
+ try renderToken(tree, ais, flow_expr.ltoken, Space.Space);
+ return renderExpression(allocator, ais, tree, rhs, space);
} else {
- return renderToken(tree, stream, flow_expr.ltoken, indent, start_col, space);
+ return renderToken(tree, ais, flow_expr.ltoken, space);
}
},
.Payload => {
const payload = @fieldParentPtr(ast.Node.Payload, "base", base);
- try renderToken(tree, stream, payload.lpipe, indent, start_col, Space.None);
- try renderExpression(allocator, stream, tree, indent, start_col, payload.error_symbol, Space.None);
- return renderToken(tree, stream, payload.rpipe, indent, start_col, space);
+ try renderToken(tree, ais, payload.lpipe, Space.None);
+ try renderExpression(allocator, ais, tree, payload.error_symbol, Space.None);
+ return renderToken(tree, ais, payload.rpipe, space);
},
.PointerPayload => {
const payload = @fieldParentPtr(ast.Node.PointerPayload, "base", base);
- try renderToken(tree, stream, payload.lpipe, indent, start_col, Space.None);
+ try renderToken(tree, ais, payload.lpipe, Space.None);
if (payload.ptr_token) |ptr_token| {
- try renderToken(tree, stream, ptr_token, indent, start_col, Space.None);
+ try renderToken(tree, ais, ptr_token, Space.None);
}
- try renderExpression(allocator, stream, tree, indent, start_col, payload.value_symbol, Space.None);
- return renderToken(tree, stream, payload.rpipe, indent, start_col, space);
+ try renderExpression(allocator, ais, tree, payload.value_symbol, Space.None);
+ return renderToken(tree, ais, payload.rpipe, space);
},
.PointerIndexPayload => {
const payload = @fieldParentPtr(ast.Node.PointerIndexPayload, "base", base);
- try renderToken(tree, stream, payload.lpipe, indent, start_col, Space.None);
+ try renderToken(tree, ais, payload.lpipe, Space.None);
if (payload.ptr_token) |ptr_token| {
- try renderToken(tree, stream, ptr_token, indent, start_col, Space.None);
+ try renderToken(tree, ais, ptr_token, Space.None);
}
- try renderExpression(allocator, stream, tree, indent, start_col, payload.value_symbol, Space.None);
+ try renderExpression(allocator, ais, tree, payload.value_symbol, Space.None);
if (payload.index_symbol) |index_symbol| {
const comma = tree.nextToken(payload.value_symbol.lastToken());
- try renderToken(tree, stream, comma, indent, start_col, Space.Space);
- try renderExpression(allocator, stream, tree, indent, start_col, index_symbol, Space.None);
+ try renderToken(tree, ais, comma, Space.Space);
+ try renderExpression(allocator, ais, tree, index_symbol, Space.None);
}
- return renderToken(tree, stream, payload.rpipe, indent, start_col, space);
+ return renderToken(tree, ais, payload.rpipe, space);
},
.GroupedExpression => {
const grouped_expr = @fieldParentPtr(ast.Node.GroupedExpression, "base", base);
- try renderToken(tree, stream, grouped_expr.lparen, indent, start_col, Space.None);
- try renderExpression(allocator, stream, tree, indent, start_col, grouped_expr.expr, Space.None);
- return renderToken(tree, stream, grouped_expr.rparen, indent, start_col, space);
+ try renderToken(tree, ais, grouped_expr.lparen, Space.None);
+ {
+ ais.pushIndentOneShot();
+ try renderExpression(allocator, ais, tree, grouped_expr.expr, Space.None);
+ }
+ return renderToken(tree, ais, grouped_expr.rparen, space);
},
.FieldInitializer => {
const field_init = @fieldParentPtr(ast.Node.FieldInitializer, "base", base);
- try renderToken(tree, stream, field_init.period_token, indent, start_col, Space.None); // .
- try renderToken(tree, stream, field_init.name_token, indent, start_col, Space.Space); // name
- try renderToken(tree, stream, tree.nextToken(field_init.name_token), indent, start_col, Space.Space); // =
- return renderExpression(allocator, stream, tree, indent, start_col, field_init.expr, space);
+ try renderToken(tree, ais, field_init.period_token, Space.None); // .
+ try renderToken(tree, ais, field_init.name_token, Space.Space); // name
+ try renderToken(tree, ais, tree.nextToken(field_init.name_token), Space.Space); // =
+ return renderExpression(allocator, ais, tree, field_init.expr, space);
},
.ContainerDecl => {
const container_decl = @fieldParentPtr(ast.Node.ContainerDecl, "base", base);
if (container_decl.layout_token) |layout_token| {
- try renderToken(tree, stream, layout_token, indent, start_col, Space.Space);
+ try renderToken(tree, ais, layout_token, Space.Space);
}
switch (container_decl.init_arg_expr) {
.None => {
- try renderToken(tree, stream, container_decl.kind_token, indent, start_col, Space.Space); // union
+ try renderToken(tree, ais, container_decl.kind_token, Space.Space); // union
},
.Enum => |enum_tag_type| {
- try renderToken(tree, stream, container_decl.kind_token, indent, start_col, Space.None); // union
+ try renderToken(tree, ais, container_decl.kind_token, Space.None); // union
const lparen = tree.nextToken(container_decl.kind_token);
const enum_token = tree.nextToken(lparen);
- try renderToken(tree, stream, lparen, indent, start_col, Space.None); // (
- try renderToken(tree, stream, enum_token, indent, start_col, Space.None); // enum
+ try renderToken(tree, ais, lparen, Space.None); // (
+ try renderToken(tree, ais, enum_token, Space.None); // enum
if (enum_tag_type) |expr| {
- try renderToken(tree, stream, tree.nextToken(enum_token), indent, start_col, Space.None); // (
- try renderExpression(allocator, stream, tree, indent, start_col, expr, Space.None);
+ try renderToken(tree, ais, tree.nextToken(enum_token), Space.None); // (
+ try renderExpression(allocator, ais, tree, expr, Space.None);
const rparen = tree.nextToken(expr.lastToken());
- try renderToken(tree, stream, rparen, indent, start_col, Space.None); // )
- try renderToken(tree, stream, tree.nextToken(rparen), indent, start_col, Space.Space); // )
+ try renderToken(tree, ais, rparen, Space.None); // )
+ try renderToken(tree, ais, tree.nextToken(rparen), Space.Space); // )
} else {
- try renderToken(tree, stream, tree.nextToken(enum_token), indent, start_col, Space.Space); // )
+ try renderToken(tree, ais, tree.nextToken(enum_token), Space.Space); // )
}
},
.Type => |type_expr| {
- try renderToken(tree, stream, container_decl.kind_token, indent, start_col, Space.None); // union
+ try renderToken(tree, ais, container_decl.kind_token, Space.None); // union
const lparen = tree.nextToken(container_decl.kind_token);
const rparen = tree.nextToken(type_expr.lastToken());
- try renderToken(tree, stream, lparen, indent, start_col, Space.None); // (
- try renderExpression(allocator, stream, tree, indent, start_col, type_expr, Space.None);
- try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // )
+ try renderToken(tree, ais, lparen, Space.None); // (
+ try renderExpression(allocator, ais, tree, type_expr, Space.None);
+ try renderToken(tree, ais, rparen, Space.Space); // )
},
}
if (container_decl.fields_and_decls_len == 0) {
- try renderToken(tree, stream, container_decl.lbrace_token, indent + indent_delta, start_col, Space.None); // {
- return renderToken(tree, stream, container_decl.rbrace_token, indent, start_col, space); // }
+ {
+ ais.pushIndentNextLine();
+ defer ais.popIndent();
+ try renderToken(tree, ais, container_decl.lbrace_token, Space.None); // {
+ }
+ return renderToken(tree, ais, container_decl.rbrace_token, space); // }
}
const src_has_trailing_comma = blk: {
@@ -1332,43 +1286,39 @@ fn renderExpression(
if (src_has_trailing_comma or !src_has_only_fields) {
// One declaration per line
- const new_indent = indent + indent_delta;
- try renderToken(tree, stream, container_decl.lbrace_token, new_indent, start_col, .Newline); // {
+ ais.pushIndentNextLine();
+ defer ais.popIndent();
+ try renderToken(tree, ais, container_decl.lbrace_token, .Newline); // {
for (fields_and_decls) |decl, i| {
- try stream.writeByteNTimes(' ', new_indent);
- try renderContainerDecl(allocator, stream, tree, new_indent, start_col, decl, .Newline);
+ try renderContainerDecl(allocator, ais, tree, decl, .Newline);
if (i + 1 < fields_and_decls.len) {
- try renderExtraNewline(tree, stream, start_col, fields_and_decls[i + 1]);
+ try renderExtraNewline(tree, ais, fields_and_decls[i + 1]);
}
}
-
- try stream.writeByteNTimes(' ', indent);
} else if (src_has_newline) {
// All the declarations on the same line, but place the items on
// their own line
- try renderToken(tree, stream, container_decl.lbrace_token, indent, start_col, .Newline); // {
+ try renderToken(tree, ais, container_decl.lbrace_token, .Newline); // {
- const new_indent = indent + indent_delta;
- try stream.writeByteNTimes(' ', new_indent);
+ ais.pushIndent();
+ defer ais.popIndent();
for (fields_and_decls) |decl, i| {
const space_after_decl: Space = if (i + 1 >= fields_and_decls.len) .Newline else .Space;
- try renderContainerDecl(allocator, stream, tree, new_indent, start_col, decl, space_after_decl);
+ try renderContainerDecl(allocator, ais, tree, decl, space_after_decl);
}
-
- try stream.writeByteNTimes(' ', indent);
} else {
// All the declarations on the same line
- try renderToken(tree, stream, container_decl.lbrace_token, indent, start_col, .Space); // {
+ try renderToken(tree, ais, container_decl.lbrace_token, .Space); // {
for (fields_and_decls) |decl| {
- try renderContainerDecl(allocator, stream, tree, indent, start_col, decl, .Space);
+ try renderContainerDecl(allocator, ais, tree, decl, .Space);
}
}
- return renderToken(tree, stream, container_decl.rbrace_token, indent, start_col, space); // }
+ return renderToken(tree, ais, container_decl.rbrace_token, space); // }
},
.ErrorSetDecl => {
@@ -1377,9 +1327,9 @@ fn renderExpression(
const lbrace = tree.nextToken(err_set_decl.error_token);
if (err_set_decl.decls_len == 0) {
- try renderToken(tree, stream, err_set_decl.error_token, indent, start_col, Space.None);
- try renderToken(tree, stream, lbrace, indent, start_col, Space.None);
- return renderToken(tree, stream, err_set_decl.rbrace_token, indent, start_col, space);
+ try renderToken(tree, ais, err_set_decl.error_token, Space.None);
+ try renderToken(tree, ais, lbrace, Space.None);
+ return renderToken(tree, ais, err_set_decl.rbrace_token, space);
}
if (err_set_decl.decls_len == 1) blk: {
@@ -1393,13 +1343,13 @@ fn renderExpression(
break :blk;
}
- try renderToken(tree, stream, err_set_decl.error_token, indent, start_col, Space.None); // error
- try renderToken(tree, stream, lbrace, indent, start_col, Space.None); // {
- try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None);
- return renderToken(tree, stream, err_set_decl.rbrace_token, indent, start_col, space); // }
+ try renderToken(tree, ais, err_set_decl.error_token, Space.None); // error
+ try renderToken(tree, ais, lbrace, Space.None); // {
+ try renderExpression(allocator, ais, tree, node, Space.None);
+ return renderToken(tree, ais, err_set_decl.rbrace_token, space); // }
}
- try renderToken(tree, stream, err_set_decl.error_token, indent, start_col, Space.None); // error
+ try renderToken(tree, ais, err_set_decl.error_token, Space.None); // error
const src_has_trailing_comma = blk: {
const maybe_comma = tree.prevToken(err_set_decl.rbrace_token);
@@ -1407,72 +1357,66 @@ fn renderExpression(
};
if (src_has_trailing_comma) {
- try renderToken(tree, stream, lbrace, indent, start_col, Space.Newline); // {
- const new_indent = indent + indent_delta;
-
- const decls = err_set_decl.decls();
- for (decls) |node, i| {
- try stream.writeByteNTimes(' ', new_indent);
-
- if (i + 1 < decls.len) {
- try renderExpression(allocator, stream, tree, new_indent, start_col, node, Space.None);
- try renderToken(tree, stream, tree.nextToken(node.lastToken()), new_indent, start_col, Space.Newline); // ,
-
- try renderExtraNewline(tree, stream, start_col, decls[i + 1]);
- } else {
- try renderExpression(allocator, stream, tree, new_indent, start_col, node, Space.Comma);
+ {
+ ais.pushIndent();
+ defer ais.popIndent();
+
+ try renderToken(tree, ais, lbrace, Space.Newline); // {
+ const decls = err_set_decl.decls();
+ for (decls) |node, i| {
+ if (i + 1 < decls.len) {
+ try renderExpression(allocator, ais, tree, node, Space.None);
+ try renderToken(tree, ais, tree.nextToken(node.lastToken()), Space.Newline); // ,
+
+ try renderExtraNewline(tree, ais, decls[i + 1]);
+ } else {
+ try renderExpression(allocator, ais, tree, node, Space.Comma);
+ }
}
}
- try stream.writeByteNTimes(' ', indent);
- return renderToken(tree, stream, err_set_decl.rbrace_token, indent, start_col, space); // }
+ return renderToken(tree, ais, err_set_decl.rbrace_token, space); // }
} else {
- try renderToken(tree, stream, lbrace, indent, start_col, Space.Space); // {
+ try renderToken(tree, ais, lbrace, Space.Space); // {
const decls = err_set_decl.decls();
for (decls) |node, i| {
if (i + 1 < decls.len) {
- try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None);
+ try renderExpression(allocator, ais, tree, node, Space.None);
const comma_token = tree.nextToken(node.lastToken());
assert(tree.token_ids[comma_token] == .Comma);
- try renderToken(tree, stream, comma_token, indent, start_col, Space.Space); // ,
- try renderExtraNewline(tree, stream, start_col, decls[i + 1]);
+ try renderToken(tree, ais, comma_token, Space.Space); // ,
+ try renderExtraNewline(tree, ais, decls[i + 1]);
} else {
- try renderExpression(allocator, stream, tree, indent, start_col, node, Space.Space);
+ try renderExpression(allocator, ais, tree, node, Space.Space);
}
}
- return renderToken(tree, stream, err_set_decl.rbrace_token, indent, start_col, space); // }
+ return renderToken(tree, ais, err_set_decl.rbrace_token, space); // }
}
},
.ErrorTag => {
const tag = @fieldParentPtr(ast.Node.ErrorTag, "base", base);
- try renderDocComments(tree, stream, tag, tag.doc_comments, indent, start_col);
- return renderToken(tree, stream, tag.name_token, indent, start_col, space); // name
+ try renderDocComments(tree, ais, tag, tag.doc_comments);
+ return renderToken(tree, ais, tag.name_token, space); // name
},
.MultilineStringLiteral => {
- // TODO: Don't indent in this function, but let the caller indent.
- // If this has been implemented, a lot of hacky solutions in i.e. ArrayInit and FunctionCall can be removed
const multiline_str_literal = @fieldParentPtr(ast.Node.MultilineStringLiteral, "base", base);
- var skip_first_indent = true;
- if (tree.token_ids[multiline_str_literal.firstToken() - 1] != .LineComment) {
- try stream.print("\n", .{});
- skip_first_indent = false;
- }
-
- for (multiline_str_literal.lines()) |t| {
- if (!skip_first_indent) {
- try stream.writeByteNTimes(' ', indent + indent_delta);
+ {
+ const locked_indents = ais.lockOneShotIndent();
+ defer {
+ var i: u8 = 0;
+ while (i < locked_indents) : (i += 1) ais.popIndent();
}
- try renderToken(tree, stream, t, indent, start_col, Space.None);
- skip_first_indent = false;
+ try ais.maybeInsertNewline();
+
+ for (multiline_str_literal.lines()) |t| try renderToken(tree, ais, t, Space.None);
}
- try stream.writeByteNTimes(' ', indent);
},
.BuiltinCall => {
@@ -1480,9 +1424,9 @@ fn renderExpression(
// TODO remove after 0.7.0 release
if (mem.eql(u8, tree.tokenSlice(builtin_call.builtin_token), "@OpaqueType"))
- return stream.writeAll("@Type(.Opaque)");
+ return ais.writer().writeAll("@Type(.Opaque)");
- try renderToken(tree, stream, builtin_call.builtin_token, indent, start_col, Space.None); // @name
+ try renderToken(tree, ais, builtin_call.builtin_token, Space.None); // @name
const src_params_trailing_comma = blk: {
if (builtin_call.params_len < 2) break :blk false;
@@ -1494,31 +1438,30 @@ fn renderExpression(
const lparen = tree.nextToken(builtin_call.builtin_token);
if (!src_params_trailing_comma) {
- try renderToken(tree, stream, lparen, indent, start_col, Space.None); // (
+ try renderToken(tree, ais, lparen, Space.None); // (
// render all on one line, no trailing comma
const params = builtin_call.params();
for (params) |param_node, i| {
- try renderExpression(allocator, stream, tree, indent, start_col, param_node, Space.None);
+ try renderExpression(allocator, ais, tree, param_node, Space.None);
if (i + 1 < params.len) {
const comma_token = tree.nextToken(param_node.lastToken());
- try renderToken(tree, stream, comma_token, indent, start_col, Space.Space); // ,
+ try renderToken(tree, ais, comma_token, Space.Space); // ,
}
}
} else {
// one param per line
- const new_indent = indent + indent_delta;
- try renderToken(tree, stream, lparen, new_indent, start_col, Space.Newline); // (
+ ais.pushIndent();
+ defer ais.popIndent();
+ try renderToken(tree, ais, lparen, Space.Newline); // (
for (builtin_call.params()) |param_node| {
- try stream.writeByteNTimes(' ', new_indent);
- try renderExpression(allocator, stream, tree, indent, start_col, param_node, Space.Comma);
+ try renderExpression(allocator, ais, tree, param_node, Space.Comma);
}
- try stream.writeByteNTimes(' ', indent);
}
- return renderToken(tree, stream, builtin_call.rparen_token, indent, start_col, space); // )
+ return renderToken(tree, ais, builtin_call.rparen_token, space); // )
},
.FnProto => {
@@ -1528,24 +1471,24 @@ fn renderExpression(
const visib_token = tree.token_ids[visib_token_index];
assert(visib_token == .Keyword_pub or visib_token == .Keyword_export);
- try renderToken(tree, stream, visib_token_index, indent, start_col, Space.Space); // pub
+ try renderToken(tree, ais, visib_token_index, Space.Space); // pub
}
if (fn_proto.getExternExportInlineToken()) |extern_export_inline_token| {
if (fn_proto.getIsExternPrototype() == null)
- try renderToken(tree, stream, extern_export_inline_token, indent, start_col, Space.Space); // extern/export/inline
+ try renderToken(tree, ais, extern_export_inline_token, Space.Space); // extern/export/inline
}
if (fn_proto.getLibName()) |lib_name| {
- try renderExpression(allocator, stream, tree, indent, start_col, lib_name, Space.Space);
+ try renderExpression(allocator, ais, tree, lib_name, Space.Space);
}
const lparen = if (fn_proto.getNameToken()) |name_token| blk: {
- try renderToken(tree, stream, fn_proto.fn_token, indent, start_col, Space.Space); // fn
- try renderToken(tree, stream, name_token, indent, start_col, Space.None); // name
+ try renderToken(tree, ais, fn_proto.fn_token, Space.Space); // fn
+ try renderToken(tree, ais, name_token, Space.None); // name
break :blk tree.nextToken(name_token);
} else blk: {
- try renderToken(tree, stream, fn_proto.fn_token, indent, start_col, Space.Space); // fn
+ try renderToken(tree, ais, fn_proto.fn_token, Space.Space); // fn
break :blk tree.nextToken(fn_proto.fn_token);
};
assert(tree.token_ids[lparen] == .LParen);
@@ -1572,47 +1515,45 @@ fn renderExpression(
};
if (!src_params_trailing_comma) {
- try renderToken(tree, stream, lparen, indent, start_col, Space.None); // (
+ try renderToken(tree, ais, lparen, Space.None); // (
// render all on one line, no trailing comma
for (fn_proto.params()) |param_decl, i| {
- try renderParamDecl(allocator, stream, tree, indent, start_col, param_decl, Space.None);
+ try renderParamDecl(allocator, ais, tree, param_decl, Space.None);
if (i + 1 < fn_proto.params_len or fn_proto.getVarArgsToken() != null) {
const comma = tree.nextToken(param_decl.lastToken());
- try renderToken(tree, stream, comma, indent, start_col, Space.Space); // ,
+ try renderToken(tree, ais, comma, Space.Space); // ,
}
}
if (fn_proto.getVarArgsToken()) |var_args_token| {
- try renderToken(tree, stream, var_args_token, indent, start_col, Space.None);
+ try renderToken(tree, ais, var_args_token, Space.None);
}
} else {
// one param per line
- const new_indent = indent + indent_delta;
- try renderToken(tree, stream, lparen, new_indent, start_col, Space.Newline); // (
+ ais.pushIndent();
+ defer ais.popIndent();
+ try renderToken(tree, ais, lparen, Space.Newline); // (
for (fn_proto.params()) |param_decl| {
- try stream.writeByteNTimes(' ', new_indent);
- try renderParamDecl(allocator, stream, tree, new_indent, start_col, param_decl, Space.Comma);
+ try renderParamDecl(allocator, ais, tree, param_decl, Space.Comma);
}
if (fn_proto.getVarArgsToken()) |var_args_token| {
- try stream.writeByteNTimes(' ', new_indent);
- try renderToken(tree, stream, var_args_token, new_indent, start_col, Space.Comma);
+ try renderToken(tree, ais, var_args_token, Space.Comma);
}
- try stream.writeByteNTimes(' ', indent);
}
- try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // )
+ try renderToken(tree, ais, rparen, Space.Space); // )
if (fn_proto.getAlignExpr()) |align_expr| {
const align_rparen = tree.nextToken(align_expr.lastToken());
const align_lparen = tree.prevToken(align_expr.firstToken());
const align_kw = tree.prevToken(align_lparen);
- try renderToken(tree, stream, align_kw, indent, start_col, Space.None); // align
- try renderToken(tree, stream, align_lparen, indent, start_col, Space.None); // (
- try renderExpression(allocator, stream, tree, indent, start_col, align_expr, Space.None);
- try renderToken(tree, stream, align_rparen, indent, start_col, Space.Space); // )
+ try renderToken(tree, ais, align_kw, Space.None); // align
+ try renderToken(tree, ais, align_lparen, Space.None); // (
+ try renderExpression(allocator, ais, tree, align_expr, Space.None);
+ try renderToken(tree, ais, align_rparen, Space.Space); // )
}
if (fn_proto.getSectionExpr()) |section_expr| {
@@ -1620,10 +1561,10 @@ fn renderExpression(
const section_lparen = tree.prevToken(section_expr.firstToken());
const section_kw = tree.prevToken(section_lparen);
- try renderToken(tree, stream, section_kw, indent, start_col, Space.None); // section
- try renderToken(tree, stream, section_lparen, indent, start_col, Space.None); // (
- try renderExpression(allocator, stream, tree, indent, start_col, section_expr, Space.None);
- try renderToken(tree, stream, section_rparen, indent, start_col, Space.Space); // )
+ try renderToken(tree, ais, section_kw, Space.None); // section
+ try renderToken(tree, ais, section_lparen, Space.None); // (
+ try renderExpression(allocator, ais, tree, section_expr, Space.None);
+ try renderToken(tree, ais, section_rparen, Space.Space); // )
}
if (fn_proto.getCallconvExpr()) |callconv_expr| {
@@ -1631,23 +1572,23 @@ fn renderExpression(
const callconv_lparen = tree.prevToken(callconv_expr.firstToken());
const callconv_kw = tree.prevToken(callconv_lparen);
- try renderToken(tree, stream, callconv_kw, indent, start_col, Space.None); // callconv
- try renderToken(tree, stream, callconv_lparen, indent, start_col, Space.None); // (
- try renderExpression(allocator, stream, tree, indent, start_col, callconv_expr, Space.None);
- try renderToken(tree, stream, callconv_rparen, indent, start_col, Space.Space); // )
+ try renderToken(tree, ais, callconv_kw, Space.None); // callconv
+ try renderToken(tree, ais, callconv_lparen, Space.None); // (
+ try renderExpression(allocator, ais, tree, callconv_expr, Space.None);
+ try renderToken(tree, ais, callconv_rparen, Space.Space); // )
} else if (fn_proto.getIsExternPrototype() != null) {
- try stream.writeAll("callconv(.C) ");
+ try ais.writer().writeAll("callconv(.C) ");
} else if (fn_proto.getIsAsync() != null) {
- try stream.writeAll("callconv(.Async) ");
+ try ais.writer().writeAll("callconv(.Async) ");
}
switch (fn_proto.return_type) {
.Explicit => |node| {
- return renderExpression(allocator, stream, tree, indent, start_col, node, space);
+ return renderExpression(allocator, ais, tree, node, space);
},
.InferErrorSet => |node| {
- try renderToken(tree, stream, tree.prevToken(node.firstToken()), indent, start_col, Space.None); // !
- return renderExpression(allocator, stream, tree, indent, start_col, node, space);
+ try renderToken(tree, ais, tree.prevToken(node.firstToken()), Space.None); // !
+ return renderExpression(allocator, ais, tree, node, space);
},
.Invalid => unreachable,
}
@@ -1657,11 +1598,11 @@ fn renderExpression(
const anyframe_type = @fieldParentPtr(ast.Node.AnyFrameType, "base", base);
if (anyframe_type.result) |result| {
- try renderToken(tree, stream, anyframe_type.anyframe_token, indent, start_col, Space.None); // anyframe
- try renderToken(tree, stream, result.arrow_token, indent, start_col, Space.None); // ->
- return renderExpression(allocator, stream, tree, indent, start_col, result.return_type, space);
+ try renderToken(tree, ais, anyframe_type.anyframe_token, Space.None); // anyframe
+ try renderToken(tree, ais, result.arrow_token, Space.None); // ->
+ return renderExpression(allocator, ais, tree, result.return_type, space);
} else {
- return renderToken(tree, stream, anyframe_type.anyframe_token, indent, start_col, space); // anyframe
+ return renderToken(tree, ais, anyframe_type.anyframe_token, space); // anyframe
}
},
@@ -1670,38 +1611,38 @@ fn renderExpression(
.Switch => {
const switch_node = @fieldParentPtr(ast.Node.Switch, "base", base);
- try renderToken(tree, stream, switch_node.switch_token, indent, start_col, Space.Space); // switch
- try renderToken(tree, stream, tree.nextToken(switch_node.switch_token), indent, start_col, Space.None); // (
+ try renderToken(tree, ais, switch_node.switch_token, Space.Space); // switch
+ try renderToken(tree, ais, tree.nextToken(switch_node.switch_token), Space.None); // (
const rparen = tree.nextToken(switch_node.expr.lastToken());
const lbrace = tree.nextToken(rparen);
if (switch_node.cases_len == 0) {
- try renderExpression(allocator, stream, tree, indent, start_col, switch_node.expr, Space.None);
- try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // )
- try renderToken(tree, stream, lbrace, indent, start_col, Space.None); // {
- return renderToken(tree, stream, switch_node.rbrace, indent, start_col, space); // }
+ try renderExpression(allocator, ais, tree, switch_node.expr, Space.None);
+ try renderToken(tree, ais, rparen, Space.Space); // )
+ try renderToken(tree, ais, lbrace, Space.None); // {
+ return renderToken(tree, ais, switch_node.rbrace, space); // }
}
- try renderExpression(allocator, stream, tree, indent, start_col, switch_node.expr, Space.None);
-
- const new_indent = indent + indent_delta;
+ try renderExpression(allocator, ais, tree, switch_node.expr, Space.None);
+ try renderToken(tree, ais, rparen, Space.Space); // )
- try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // )
- try renderToken(tree, stream, lbrace, new_indent, start_col, Space.Newline); // {
+ {
+ ais.pushIndentNextLine();
+ defer ais.popIndent();
+ try renderToken(tree, ais, lbrace, Space.Newline); // {
- const cases = switch_node.cases();
- for (cases) |node, i| {
- try stream.writeByteNTimes(' ', new_indent);
- try renderExpression(allocator, stream, tree, new_indent, start_col, node, Space.Comma);
+ const cases = switch_node.cases();
+ for (cases) |node, i| {
+ try renderExpression(allocator, ais, tree, node, Space.Comma);
- if (i + 1 < cases.len) {
- try renderExtraNewline(tree, stream, start_col, cases[i + 1]);
+ if (i + 1 < cases.len) {
+ try renderExtraNewline(tree, ais, cases[i + 1]);
+ }
}
}
- try stream.writeByteNTimes(' ', indent);
- return renderToken(tree, stream, switch_node.rbrace, indent, start_col, space); // }
+ return renderToken(tree, ais, switch_node.rbrace, space); // }
},
.SwitchCase => {
@@ -1718,43 +1659,41 @@ fn renderExpression(
const items = switch_case.items();
for (items) |node, i| {
if (i + 1 < items.len) {
- try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None);
+ try renderExpression(allocator, ais, tree, node, Space.None);
const comma_token = tree.nextToken(node.lastToken());
- try renderToken(tree, stream, comma_token, indent, start_col, Space.Space); // ,
- try renderExtraNewline(tree, stream, start_col, items[i + 1]);
+ try renderToken(tree, ais, comma_token, Space.Space); // ,
+ try renderExtraNewline(tree, ais, items[i + 1]);
} else {
- try renderExpression(allocator, stream, tree, indent, start_col, node, Space.Space);
+ try renderExpression(allocator, ais, tree, node, Space.Space);
}
}
} else {
const items = switch_case.items();
for (items) |node, i| {
if (i + 1 < items.len) {
- try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None);
+ try renderExpression(allocator, ais, tree, node, Space.None);
const comma_token = tree.nextToken(node.lastToken());
- try renderToken(tree, stream, comma_token, indent, start_col, Space.Newline); // ,
- try renderExtraNewline(tree, stream, start_col, items[i + 1]);
- try stream.writeByteNTimes(' ', indent);
+ try renderToken(tree, ais, comma_token, Space.Newline); // ,
+ try renderExtraNewline(tree, ais, items[i + 1]);
} else {
- try renderExpression(allocator, stream, tree, indent, start_col, node, Space.Comma);
- try stream.writeByteNTimes(' ', indent);
+ try renderExpression(allocator, ais, tree, node, Space.Comma);
}
}
}
- try renderToken(tree, stream, switch_case.arrow_token, indent, start_col, Space.Space); // =>
+ try renderToken(tree, ais, switch_case.arrow_token, Space.Space); // =>
if (switch_case.payload) |payload| {
- try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space);
+ try renderExpression(allocator, ais, tree, payload, Space.Space);
}
- return renderExpression(allocator, stream, tree, indent, start_col, switch_case.expr, space);
+ return renderExpression(allocator, ais, tree, switch_case.expr, space);
},
.SwitchElse => {
const switch_else = @fieldParentPtr(ast.Node.SwitchElse, "base", base);
- return renderToken(tree, stream, switch_else.token, indent, start_col, space);
+ return renderToken(tree, ais, switch_else.token, space);
},
.Else => {
const else_node = @fieldParentPtr(ast.Node.Else, "base", base);
@@ -1763,37 +1702,37 @@ fn renderExpression(
const same_line = body_is_block or tree.tokensOnSameLine(else_node.else_token, else_node.body.lastToken());
const after_else_space = if (same_line or else_node.payload != null) Space.Space else Space.Newline;
- try renderToken(tree, stream, else_node.else_token, indent, start_col, after_else_space);
+ try renderToken(tree, ais, else_node.else_token, after_else_space);
if (else_node.payload) |payload| {
const payload_space = if (same_line) Space.Space else Space.Newline;
- try renderExpression(allocator, stream, tree, indent, start_col, payload, payload_space);
+ try renderExpression(allocator, ais, tree, payload, payload_space);
}
if (same_line) {
- return renderExpression(allocator, stream, tree, indent, start_col, else_node.body, space);
+ return renderExpression(allocator, ais, tree, else_node.body, space);
+ } else {
+ ais.pushIndent();
+ defer ais.popIndent();
+ return renderExpression(allocator, ais, tree, else_node.body, space);
}
-
- try stream.writeByteNTimes(' ', indent + indent_delta);
- start_col.* = indent + indent_delta;
- return renderExpression(allocator, stream, tree, indent, start_col, else_node.body, space);
},
.While => {
const while_node = @fieldParentPtr(ast.Node.While, "base", base);
if (while_node.label) |label| {
- try renderToken(tree, stream, label, indent, start_col, Space.None); // label
- try renderToken(tree, stream, tree.nextToken(label), indent, start_col, Space.Space); // :
+ try renderToken(tree, ais, label, Space.None); // label
+ try renderToken(tree, ais, tree.nextToken(label), Space.Space); // :
}
if (while_node.inline_token) |inline_token| {
- try renderToken(tree, stream, inline_token, indent, start_col, Space.Space); // inline
+ try renderToken(tree, ais, inline_token, Space.Space); // inline
}
- try renderToken(tree, stream, while_node.while_token, indent, start_col, Space.Space); // while
- try renderToken(tree, stream, tree.nextToken(while_node.while_token), indent, start_col, Space.None); // (
- try renderExpression(allocator, stream, tree, indent, start_col, while_node.condition, Space.None);
+ try renderToken(tree, ais, while_node.while_token, Space.Space); // while
+ try renderToken(tree, ais, tree.nextToken(while_node.while_token), Space.None); // (
+ try renderExpression(allocator, ais, tree, while_node.condition, Space.None);
const cond_rparen = tree.nextToken(while_node.condition.lastToken());
@@ -1815,12 +1754,12 @@ fn renderExpression(
{
const rparen_space = if (while_node.payload != null or while_node.continue_expr != null) Space.Space else block_start_space;
- try renderToken(tree, stream, cond_rparen, indent, start_col, rparen_space); // )
+ try renderToken(tree, ais, cond_rparen, rparen_space); // )
}
if (while_node.payload) |payload| {
- const payload_space = if (while_node.continue_expr != null) Space.Space else block_start_space;
- try renderExpression(allocator, stream, tree, indent, start_col, payload, payload_space);
+ const payload_space = Space.Space; //if (while_node.continue_expr != null) Space.Space else block_start_space;
+ try renderExpression(allocator, ais, tree, payload, payload_space);
}
if (while_node.continue_expr) |continue_expr| {
@@ -1828,29 +1767,22 @@ fn renderExpression(
const lparen = tree.prevToken(continue_expr.firstToken());
const colon = tree.prevToken(lparen);
- try renderToken(tree, stream, colon, indent, start_col, Space.Space); // :
- try renderToken(tree, stream, lparen, indent, start_col, Space.None); // (
+ try renderToken(tree, ais, colon, Space.Space); // :
+ try renderToken(tree, ais, lparen, Space.None); // (
- try renderExpression(allocator, stream, tree, indent, start_col, continue_expr, Space.None);
+ try renderExpression(allocator, ais, tree, continue_expr, Space.None);
- try renderToken(tree, stream, rparen, indent, start_col, block_start_space); // )
+ try renderToken(tree, ais, rparen, block_start_space); // )
}
- var new_indent = indent;
- if (block_start_space == Space.Newline) {
- new_indent += indent_delta;
- try stream.writeByteNTimes(' ', new_indent);
- start_col.* = new_indent;
+ {
+ if (!body_is_block) ais.pushIndent();
+ defer if (!body_is_block) ais.popIndent();
+ try renderExpression(allocator, ais, tree, while_node.body, after_body_space);
}
- try renderExpression(allocator, stream, tree, indent, start_col, while_node.body, after_body_space);
-
if (while_node.@"else") |@"else"| {
- if (after_body_space == Space.Newline) {
- try stream.writeByteNTimes(' ', indent);
- start_col.* = indent;
- }
- return renderExpression(allocator, stream, tree, indent, start_col, &@"else".base, space);
+ return renderExpression(allocator, ais, tree, &@"else".base, space);
}
},
@@ -1858,17 +1790,17 @@ fn renderExpression(
const for_node = @fieldParentPtr(ast.Node.For, "base", base);
if (for_node.label) |label| {
- try renderToken(tree, stream, label, indent, start_col, Space.None); // label
- try renderToken(tree, stream, tree.nextToken(label), indent, start_col, Space.Space); // :
+ try renderToken(tree, ais, label, Space.None); // label
+ try renderToken(tree, ais, tree.nextToken(label), Space.Space); // :
}
if (for_node.inline_token) |inline_token| {
- try renderToken(tree, stream, inline_token, indent, start_col, Space.Space); // inline
+ try renderToken(tree, ais, inline_token, Space.Space); // inline
}
- try renderToken(tree, stream, for_node.for_token, indent, start_col, Space.Space); // for
- try renderToken(tree, stream, tree.nextToken(for_node.for_token), indent, start_col, Space.None); // (
- try renderExpression(allocator, stream, tree, indent, start_col, for_node.array_expr, Space.None);
+ try renderToken(tree, ais, for_node.for_token, Space.Space); // for
+ try renderToken(tree, ais, tree.nextToken(for_node.for_token), Space.None); // (
+ try renderExpression(allocator, ais, tree, for_node.array_expr, Space.None);
const rparen = tree.nextToken(for_node.array_expr.lastToken());
@@ -1876,10 +1808,10 @@ fn renderExpression(
const src_one_line_to_body = !body_is_block and tree.tokensOnSameLine(rparen, for_node.body.firstToken());
const body_on_same_line = body_is_block or src_one_line_to_body;
- try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // )
+ try renderToken(tree, ais, rparen, Space.Space); // )
const space_after_payload = if (body_on_same_line) Space.Space else Space.Newline;
- try renderExpression(allocator, stream, tree, indent, start_col, for_node.payload, space_after_payload); // |x|
+ try renderExpression(allocator, ais, tree, for_node.payload, space_after_payload); // |x|
const space_after_body = blk: {
if (for_node.@"else") |@"else"| {
@@ -1894,13 +1826,14 @@ fn renderExpression(
}
};
- const body_indent = if (body_on_same_line) indent else indent + indent_delta;
- if (!body_on_same_line) try stream.writeByteNTimes(' ', body_indent);
- try renderExpression(allocator, stream, tree, body_indent, start_col, for_node.body, space_after_body); // { body }
+ {
+ if (!body_on_same_line) ais.pushIndent();
+ defer if (!body_on_same_line) ais.popIndent();
+ try renderExpression(allocator, ais, tree, for_node.body, space_after_body); // { body }
+ }
if (for_node.@"else") |@"else"| {
- if (space_after_body == Space.Newline) try stream.writeByteNTimes(' ', indent);
- return renderExpression(allocator, stream, tree, indent, start_col, &@"else".base, space); // else
+ return renderExpression(allocator, ais, tree, &@"else".base, space); // else
}
},
@@ -1910,29 +1843,29 @@ fn renderExpression(
const lparen = tree.nextToken(if_node.if_token);
const rparen = tree.nextToken(if_node.condition.lastToken());
- try renderToken(tree, stream, if_node.if_token, indent, start_col, Space.Space); // if
- try renderToken(tree, stream, lparen, indent, start_col, Space.None); // (
+ try renderToken(tree, ais, if_node.if_token, Space.Space); // if
+ try renderToken(tree, ais, lparen, Space.None); // (
- try renderExpression(allocator, stream, tree, indent, start_col, if_node.condition, Space.None); // condition
+ try renderExpression(allocator, ais, tree, if_node.condition, Space.None); // condition
const body_is_if_block = if_node.body.tag == .If;
const body_is_block = nodeIsBlock(if_node.body);
if (body_is_if_block) {
- try renderExtraNewline(tree, stream, start_col, if_node.body);
+ try renderExtraNewline(tree, ais, if_node.body);
} else if (body_is_block) {
const after_rparen_space = if (if_node.payload == null) Space.BlockStart else Space.Space;
- try renderToken(tree, stream, rparen, indent, start_col, after_rparen_space); // )
+ try renderToken(tree, ais, rparen, after_rparen_space); // )
if (if_node.payload) |payload| {
- try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.BlockStart); // |x|
+ try renderExpression(allocator, ais, tree, payload, Space.BlockStart); // |x|
}
if (if_node.@"else") |@"else"| {
- try renderExpression(allocator, stream, tree, indent, start_col, if_node.body, Space.SpaceOrOutdent);
- return renderExpression(allocator, stream, tree, indent, start_col, &@"else".base, space);
+ try renderExpression(allocator, ais, tree, if_node.body, Space.SpaceOrOutdent);
+ return renderExpression(allocator, ais, tree, &@"else".base, space);
} else {
- return renderExpression(allocator, stream, tree, indent, start_col, if_node.body, space);
+ return renderExpression(allocator, ais, tree, if_node.body, space);
}
}
@@ -1940,186 +1873,184 @@ fn renderExpression(
if (src_has_newline) {
const after_rparen_space = if (if_node.payload == null) Space.Newline else Space.Space;
- try renderToken(tree, stream, rparen, indent, start_col, after_rparen_space); // )
+ try renderToken(tree, ais, rparen, after_rparen_space); // )
if (if_node.payload) |payload| {
- try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Newline);
+ try renderExpression(allocator, ais, tree, payload, Space.Newline);
}
- const new_indent = indent + indent_delta;
- try stream.writeByteNTimes(' ', new_indent);
-
if (if_node.@"else") |@"else"| {
const else_is_block = nodeIsBlock(@"else".body);
- try renderExpression(allocator, stream, tree, new_indent, start_col, if_node.body, Space.Newline);
- try stream.writeByteNTimes(' ', indent);
+
+ {
+ ais.pushIndent();
+ defer ais.popIndent();
+ try renderExpression(allocator, ais, tree, if_node.body, Space.Newline);
+ }
if (else_is_block) {
- try renderToken(tree, stream, @"else".else_token, indent, start_col, Space.Space); // else
+ try renderToken(tree, ais, @"else".else_token, Space.Space); // else
if (@"else".payload) |payload| {
- try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space);
+ try renderExpression(allocator, ais, tree, payload, Space.Space);
}
- return renderExpression(allocator, stream, tree, indent, start_col, @"else".body, space);
+ return renderExpression(allocator, ais, tree, @"else".body, space);
} else {
const after_else_space = if (@"else".payload == null) Space.Newline else Space.Space;
- try renderToken(tree, stream, @"else".else_token, indent, start_col, after_else_space); // else
+ try renderToken(tree, ais, @"else".else_token, after_else_space); // else
if (@"else".payload) |payload| {
- try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Newline);
+ try renderExpression(allocator, ais, tree, payload, Space.Newline);
}
- try stream.writeByteNTimes(' ', new_indent);
- return renderExpression(allocator, stream, tree, new_indent, start_col, @"else".body, space);
+ ais.pushIndent();
+ defer ais.popIndent();
+ return renderExpression(allocator, ais, tree, @"else".body, space);
}
} else {
- return renderExpression(allocator, stream, tree, new_indent, start_col, if_node.body, space);
+ ais.pushIndent();
+ defer ais.popIndent();
+ return renderExpression(allocator, ais, tree, if_node.body, space);
}
}
- try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // )
+ // Single line if statement
+
+ try renderToken(tree, ais, rparen, Space.Space); // )
if (if_node.payload) |payload| {
- try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space);
+ try renderExpression(allocator, ais, tree, payload, Space.Space);
}
if (if_node.@"else") |@"else"| {
- try renderExpression(allocator, stream, tree, indent, start_col, if_node.body, Space.Space);
- try renderToken(tree, stream, @"else".else_token, indent, start_col, Space.Space);
+ try renderExpression(allocator, ais, tree, if_node.body, Space.Space);
+ try renderToken(tree, ais, @"else".else_token, Space.Space);
if (@"else".payload) |payload| {
- try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space);
+ try renderExpression(allocator, ais, tree, payload, Space.Space);
}
- return renderExpression(allocator, stream, tree, indent, start_col, @"else".body, space);
+ return renderExpression(allocator, ais, tree, @"else".body, space);
} else {
- return renderExpression(allocator, stream, tree, indent, start_col, if_node.body, space);
+ return renderExpression(allocator, ais, tree, if_node.body, space);
}
},
.Asm => {
const asm_node = @fieldParentPtr(ast.Node.Asm, "base", base);
- try renderToken(tree, stream, asm_node.asm_token, indent, start_col, Space.Space); // asm
+ try renderToken(tree, ais, asm_node.asm_token, Space.Space); // asm
if (asm_node.volatile_token) |volatile_token| {
- try renderToken(tree, stream, volatile_token, indent, start_col, Space.Space); // volatile
- try renderToken(tree, stream, tree.nextToken(volatile_token), indent, start_col, Space.None); // (
+ try renderToken(tree, ais, volatile_token, Space.Space); // volatile
+ try renderToken(tree, ais, tree.nextToken(volatile_token), Space.None); // (
} else {
- try renderToken(tree, stream, tree.nextToken(asm_node.asm_token), indent, start_col, Space.None); // (
+ try renderToken(tree, ais, tree.nextToken(asm_node.asm_token), Space.None); // (
}
- if (asm_node.outputs.len == 0 and asm_node.inputs.len == 0 and asm_node.clobbers.len == 0) {
- try renderExpression(allocator, stream, tree, indent, start_col, asm_node.template, Space.None);
- return renderToken(tree, stream, asm_node.rparen, indent, start_col, space);
- }
+ asmblk: {
+ ais.pushIndent();
+ defer ais.popIndent();
- try renderExpression(allocator, stream, tree, indent, start_col, asm_node.template, Space.Newline);
+ if (asm_node.outputs.len == 0 and asm_node.inputs.len == 0 and asm_node.clobbers.len == 0) {
+ try renderExpression(allocator, ais, tree, asm_node.template, Space.None);
+ break :asmblk;
+ }
- const indent_once = indent + indent_delta;
+ try renderExpression(allocator, ais, tree, asm_node.template, Space.Newline);
- if (asm_node.template.tag == .MultilineStringLiteral) {
- // After rendering a multiline string literal the cursor is
- // already offset by indent
- try stream.writeByteNTimes(' ', indent_delta);
- } else {
- try stream.writeByteNTimes(' ', indent_once);
- }
+ ais.setIndentDelta(asm_indent_delta);
+ defer ais.setIndentDelta(indent_delta);
- const colon1 = tree.nextToken(asm_node.template.lastToken());
- const indent_extra = indent_once + 2;
+ const colon1 = tree.nextToken(asm_node.template.lastToken());
- const colon2 = if (asm_node.outputs.len == 0) blk: {
- try renderToken(tree, stream, colon1, indent, start_col, Space.Newline); // :
- try stream.writeByteNTimes(' ', indent_once);
+ const colon2 = if (asm_node.outputs.len == 0) blk: {
+ try renderToken(tree, ais, colon1, Space.Newline); // :
- break :blk tree.nextToken(colon1);
- } else blk: {
- try renderToken(tree, stream, colon1, indent, start_col, Space.Space); // :
-
- for (asm_node.outputs) |*asm_output, i| {
- if (i + 1 < asm_node.outputs.len) {
- const next_asm_output = asm_node.outputs[i + 1];
- try renderAsmOutput(allocator, stream, tree, indent_extra, start_col, asm_output, Space.None);
-
- const comma = tree.prevToken(next_asm_output.firstToken());
- try renderToken(tree, stream, comma, indent_extra, start_col, Space.Newline); // ,
- try renderExtraNewlineToken(tree, stream, start_col, next_asm_output.firstToken());
-
- try stream.writeByteNTimes(' ', indent_extra);
- } else if (asm_node.inputs.len == 0 and asm_node.clobbers.len == 0) {
- try renderAsmOutput(allocator, stream, tree, indent_extra, start_col, asm_output, Space.Newline);
- try stream.writeByteNTimes(' ', indent);
- return renderToken(tree, stream, asm_node.rparen, indent, start_col, space);
- } else {
- try renderAsmOutput(allocator, stream, tree, indent_extra, start_col, asm_output, Space.Newline);
- try stream.writeByteNTimes(' ', indent_once);
- const comma_or_colon = tree.nextToken(asm_output.lastToken());
- break :blk switch (tree.token_ids[comma_or_colon]) {
- .Comma => tree.nextToken(comma_or_colon),
- else => comma_or_colon,
- };
- }
- }
- unreachable;
- };
+ break :blk tree.nextToken(colon1);
+ } else blk: {
+ try renderToken(tree, ais, colon1, Space.Space); // :
- const colon3 = if (asm_node.inputs.len == 0) blk: {
- try renderToken(tree, stream, colon2, indent, start_col, Space.Newline); // :
- try stream.writeByteNTimes(' ', indent_once);
+ ais.pushIndent();
+ defer ais.popIndent();
- break :blk tree.nextToken(colon2);
- } else blk: {
- try renderToken(tree, stream, colon2, indent, start_col, Space.Space); // :
-
- for (asm_node.inputs) |*asm_input, i| {
- if (i + 1 < asm_node.inputs.len) {
- const next_asm_input = &asm_node.inputs[i + 1];
- try renderAsmInput(allocator, stream, tree, indent_extra, start_col, asm_input, Space.None);
-
- const comma = tree.prevToken(next_asm_input.firstToken());
- try renderToken(tree, stream, comma, indent_extra, start_col, Space.Newline); // ,
- try renderExtraNewlineToken(tree, stream, start_col, next_asm_input.firstToken());
-
- try stream.writeByteNTimes(' ', indent_extra);
- } else if (asm_node.clobbers.len == 0) {
- try renderAsmInput(allocator, stream, tree, indent_extra, start_col, asm_input, Space.Newline);
- try stream.writeByteNTimes(' ', indent);
- return renderToken(tree, stream, asm_node.rparen, indent, start_col, space); // )
- } else {
- try renderAsmInput(allocator, stream, tree, indent_extra, start_col, asm_input, Space.Newline);
- try stream.writeByteNTimes(' ', indent_once);
- const comma_or_colon = tree.nextToken(asm_input.lastToken());
- break :blk switch (tree.token_ids[comma_or_colon]) {
- .Comma => tree.nextToken(comma_or_colon),
- else => comma_or_colon,
- };
+ for (asm_node.outputs) |*asm_output, i| {
+ if (i + 1 < asm_node.outputs.len) {
+ const next_asm_output = asm_node.outputs[i + 1];
+ try renderAsmOutput(allocator, ais, tree, asm_output, Space.None);
+
+ const comma = tree.prevToken(next_asm_output.firstToken());
+ try renderToken(tree, ais, comma, Space.Newline); // ,
+ try renderExtraNewlineToken(tree, ais, next_asm_output.firstToken());
+ } else if (asm_node.inputs.len == 0 and asm_node.clobbers.len == 0) {
+ try renderAsmOutput(allocator, ais, tree, asm_output, Space.Newline);
+ break :asmblk;
+ } else {
+ try renderAsmOutput(allocator, ais, tree, asm_output, Space.Newline);
+ const comma_or_colon = tree.nextToken(asm_output.lastToken());
+ break :blk switch (tree.token_ids[comma_or_colon]) {
+ .Comma => tree.nextToken(comma_or_colon),
+ else => comma_or_colon,
+ };
+ }
}
- }
- unreachable;
- };
+ unreachable;
+ };
- try renderToken(tree, stream, colon3, indent, start_col, Space.Space); // :
+ const colon3 = if (asm_node.inputs.len == 0) blk: {
+ try renderToken(tree, ais, colon2, Space.Newline); // :
+ break :blk tree.nextToken(colon2);
+ } else blk: {
+ try renderToken(tree, ais, colon2, Space.Space); // :
+ ais.pushIndent();
+ defer ais.popIndent();
+ for (asm_node.inputs) |*asm_input, i| {
+ if (i + 1 < asm_node.inputs.len) {
+ const next_asm_input = &asm_node.inputs[i + 1];
+ try renderAsmInput(allocator, ais, tree, asm_input, Space.None);
+
+ const comma = tree.prevToken(next_asm_input.firstToken());
+ try renderToken(tree, ais, comma, Space.Newline); // ,
+ try renderExtraNewlineToken(tree, ais, next_asm_input.firstToken());
+ } else if (asm_node.clobbers.len == 0) {
+ try renderAsmInput(allocator, ais, tree, asm_input, Space.Newline);
+ break :asmblk;
+ } else {
+ try renderAsmInput(allocator, ais, tree, asm_input, Space.Newline);
+ const comma_or_colon = tree.nextToken(asm_input.lastToken());
+ break :blk switch (tree.token_ids[comma_or_colon]) {
+ .Comma => tree.nextToken(comma_or_colon),
+ else => comma_or_colon,
+ };
+ }
+ }
+ unreachable;
+ };
- for (asm_node.clobbers) |clobber_node, i| {
- if (i + 1 >= asm_node.clobbers.len) {
- try renderExpression(allocator, stream, tree, indent_extra, start_col, clobber_node, Space.Newline);
- try stream.writeByteNTimes(' ', indent);
- return renderToken(tree, stream, asm_node.rparen, indent, start_col, space);
- } else {
- try renderExpression(allocator, stream, tree, indent_extra, start_col, clobber_node, Space.None);
- const comma = tree.nextToken(clobber_node.lastToken());
- try renderToken(tree, stream, comma, indent_once, start_col, Space.Space); // ,
+ try renderToken(tree, ais, colon3, Space.Space); // :
+ ais.pushIndent();
+ defer ais.popIndent();
+ for (asm_node.clobbers) |clobber_node, i| {
+ if (i + 1 >= asm_node.clobbers.len) {
+ try renderExpression(allocator, ais, tree, clobber_node, Space.Newline);
+ break :asmblk;
+ } else {
+ try renderExpression(allocator, ais, tree, clobber_node, Space.None);
+ const comma = tree.nextToken(clobber_node.lastToken());
+ try renderToken(tree, ais, comma, Space.Space); // ,
+ }
}
}
+
+ return renderToken(tree, ais, asm_node.rparen, space);
},
.EnumLiteral => {
const enum_literal = @fieldParentPtr(ast.Node.EnumLiteral, "base", base);
- try renderToken(tree, stream, enum_literal.dot, indent, start_col, Space.None); // .
- return renderToken(tree, stream, enum_literal.name, indent, start_col, space); // name
+ try renderToken(tree, ais, enum_literal.dot, Space.None); // .
+ return renderToken(tree, ais, enum_literal.name, space); // name
},
.ContainerField,
@@ -2133,118 +2064,115 @@ fn renderExpression(
fn renderArrayType(
allocator: *mem.Allocator,
- stream: anytype,
+ ais: anytype,
tree: *ast.Tree,
- indent: usize,
- start_col: *usize,
lbracket: ast.TokenIndex,
rhs: *ast.Node,
len_expr: *ast.Node,
opt_sentinel: ?*ast.Node,
space: Space,
-) (@TypeOf(stream).Error || Error)!void {
+) (@TypeOf(ais.*).Error || Error)!void {
const rbracket = tree.nextToken(if (opt_sentinel) |sentinel|
sentinel.lastToken()
else
len_expr.lastToken());
- try renderToken(tree, stream, lbracket, indent, start_col, Space.None); // [
-
const starts_with_comment = tree.token_ids[lbracket + 1] == .LineComment;
const ends_with_comment = tree.token_ids[rbracket - 1] == .LineComment;
- const new_indent = if (ends_with_comment) indent + indent_delta else indent;
const new_space = if (ends_with_comment) Space.Newline else Space.None;
- try renderExpression(allocator, stream, tree, new_indent, start_col, len_expr, new_space);
- if (starts_with_comment) {
- try stream.writeByte('\n');
- }
- if (ends_with_comment or starts_with_comment) {
- try stream.writeByteNTimes(' ', indent);
- }
- if (opt_sentinel) |sentinel| {
- const colon_token = tree.prevToken(sentinel.firstToken());
- try renderToken(tree, stream, colon_token, indent, start_col, Space.None); // :
- try renderExpression(allocator, stream, tree, indent, start_col, sentinel, Space.None);
+ {
+ const do_indent = (starts_with_comment or ends_with_comment);
+ if (do_indent) ais.pushIndent();
+ defer if (do_indent) ais.popIndent();
+
+ try renderToken(tree, ais, lbracket, Space.None); // [
+ try renderExpression(allocator, ais, tree, len_expr, new_space);
+
+ if (starts_with_comment) {
+ try ais.maybeInsertNewline();
+ }
+ if (opt_sentinel) |sentinel| {
+ const colon_token = tree.prevToken(sentinel.firstToken());
+ try renderToken(tree, ais, colon_token, Space.None); // :
+ try renderExpression(allocator, ais, tree, sentinel, Space.None);
+ }
+ if (starts_with_comment) {
+ try ais.maybeInsertNewline();
+ }
}
- try renderToken(tree, stream, rbracket, indent, start_col, Space.None); // ]
+ try renderToken(tree, ais, rbracket, Space.None); // ]
- return renderExpression(allocator, stream, tree, indent, start_col, rhs, space);
+ return renderExpression(allocator, ais, tree, rhs, space);
}
fn renderAsmOutput(
allocator: *mem.Allocator,
- stream: anytype,
+ ais: anytype,
tree: *ast.Tree,
- indent: usize,
- start_col: *usize,
asm_output: *const ast.Node.Asm.Output,
space: Space,
-) (@TypeOf(stream).Error || Error)!void {
- try stream.writeAll("[");
- try renderExpression(allocator, stream, tree, indent, start_col, asm_output.symbolic_name, Space.None);
- try stream.writeAll("] ");
- try renderExpression(allocator, stream, tree, indent, start_col, asm_output.constraint, Space.None);
- try stream.writeAll(" (");
+) (@TypeOf(ais.*).Error || Error)!void {
+ try ais.writer().writeAll("[");
+ try renderExpression(allocator, ais, tree, asm_output.symbolic_name, Space.None);
+ try ais.writer().writeAll("] ");
+ try renderExpression(allocator, ais, tree, asm_output.constraint, Space.None);
+ try ais.writer().writeAll(" (");
switch (asm_output.kind) {
ast.Node.Asm.Output.Kind.Variable => |variable_name| {
- try renderExpression(allocator, stream, tree, indent, start_col, &variable_name.base, Space.None);
+ try renderExpression(allocator, ais, tree, &variable_name.base, Space.None);
},
ast.Node.Asm.Output.Kind.Return => |return_type| {
- try stream.writeAll("-> ");
- try renderExpression(allocator, stream, tree, indent, start_col, return_type, Space.None);
+ try ais.writer().writeAll("-> ");
+ try renderExpression(allocator, ais, tree, return_type, Space.None);
},
}
- return renderToken(tree, stream, asm_output.lastToken(), indent, start_col, space); // )
+ return renderToken(tree, ais, asm_output.lastToken(), space); // )
}
fn renderAsmInput(
allocator: *mem.Allocator,
- stream: anytype,
+ ais: anytype,
tree: *ast.Tree,
- indent: usize,
- start_col: *usize,
asm_input: *const ast.Node.Asm.Input,
space: Space,
-) (@TypeOf(stream).Error || Error)!void {
- try stream.writeAll("[");
- try renderExpression(allocator, stream, tree, indent, start_col, asm_input.symbolic_name, Space.None);
- try stream.writeAll("] ");
- try renderExpression(allocator, stream, tree, indent, start_col, asm_input.constraint, Space.None);
- try stream.writeAll(" (");
- try renderExpression(allocator, stream, tree, indent, start_col, asm_input.expr, Space.None);
- return renderToken(tree, stream, asm_input.lastToken(), indent, start_col, space); // )
+) (@TypeOf(ais.*).Error || Error)!void {
+ try ais.writer().writeAll("[");
+ try renderExpression(allocator, ais, tree, asm_input.symbolic_name, Space.None);
+ try ais.writer().writeAll("] ");
+ try renderExpression(allocator, ais, tree, asm_input.constraint, Space.None);
+ try ais.writer().writeAll(" (");
+ try renderExpression(allocator, ais, tree, asm_input.expr, Space.None);
+ return renderToken(tree, ais, asm_input.lastToken(), space); // )
}
fn renderVarDecl(
allocator: *mem.Allocator,
- stream: anytype,
+ ais: anytype,
tree: *ast.Tree,
- indent: usize,
- start_col: *usize,
var_decl: *ast.Node.VarDecl,
-) (@TypeOf(stream).Error || Error)!void {
+) (@TypeOf(ais.*).Error || Error)!void {
if (var_decl.getVisibToken()) |visib_token| {
- try renderToken(tree, stream, visib_token, indent, start_col, Space.Space); // pub
+ try renderToken(tree, ais, visib_token, Space.Space); // pub
}
if (var_decl.getExternExportToken()) |extern_export_token| {
- try renderToken(tree, stream, extern_export_token, indent, start_col, Space.Space); // extern
+ try renderToken(tree, ais, extern_export_token, Space.Space); // extern
if (var_decl.getLibName()) |lib_name| {
- try renderExpression(allocator, stream, tree, indent, start_col, lib_name, Space.Space); // "lib"
+ try renderExpression(allocator, ais, tree, lib_name, Space.Space); // "lib"
}
}
if (var_decl.getComptimeToken()) |comptime_token| {
- try renderToken(tree, stream, comptime_token, indent, start_col, Space.Space); // comptime
+ try renderToken(tree, ais, comptime_token, Space.Space); // comptime
}
if (var_decl.getThreadLocalToken()) |thread_local_token| {
- try renderToken(tree, stream, thread_local_token, indent, start_col, Space.Space); // threadlocal
+ try renderToken(tree, ais, thread_local_token, Space.Space); // threadlocal
}
- try renderToken(tree, stream, var_decl.mut_token, indent, start_col, Space.Space); // var
+ try renderToken(tree, ais, var_decl.mut_token, Space.Space); // var
const name_space = if (var_decl.getTypeNode() == null and
(var_decl.getAlignNode() != null or
@@ -2253,95 +2181,92 @@ fn renderVarDecl(
Space.Space
else
Space.None;
- try renderToken(tree, stream, var_decl.name_token, indent, start_col, name_space);
+ try renderToken(tree, ais, var_decl.name_token, name_space);
if (var_decl.getTypeNode()) |type_node| {
- try renderToken(tree, stream, tree.nextToken(var_decl.name_token), indent, start_col, Space.Space);
+ try renderToken(tree, ais, tree.nextToken(var_decl.name_token), Space.Space);
const s = if (var_decl.getAlignNode() != null or
var_decl.getSectionNode() != null or
var_decl.getInitNode() != null) Space.Space else Space.None;
- try renderExpression(allocator, stream, tree, indent, start_col, type_node, s);
+ try renderExpression(allocator, ais, tree, type_node, s);
}
if (var_decl.getAlignNode()) |align_node| {
const lparen = tree.prevToken(align_node.firstToken());
const align_kw = tree.prevToken(lparen);
const rparen = tree.nextToken(align_node.lastToken());
- try renderToken(tree, stream, align_kw, indent, start_col, Space.None); // align
- try renderToken(tree, stream, lparen, indent, start_col, Space.None); // (
- try renderExpression(allocator, stream, tree, indent, start_col, align_node, Space.None);
+ try renderToken(tree, ais, align_kw, Space.None); // align
+ try renderToken(tree, ais, lparen, Space.None); // (
+ try renderExpression(allocator, ais, tree, align_node, Space.None);
const s = if (var_decl.getSectionNode() != null or var_decl.getInitNode() != null) Space.Space else Space.None;
- try renderToken(tree, stream, rparen, indent, start_col, s); // )
+ try renderToken(tree, ais, rparen, s); // )
}
if (var_decl.getSectionNode()) |section_node| {
const lparen = tree.prevToken(section_node.firstToken());
const section_kw = tree.prevToken(lparen);
const rparen = tree.nextToken(section_node.lastToken());
- try renderToken(tree, stream, section_kw, indent, start_col, Space.None); // linksection
- try renderToken(tree, stream, lparen, indent, start_col, Space.None); // (
- try renderExpression(allocator, stream, tree, indent, start_col, section_node, Space.None);
+ try renderToken(tree, ais, section_kw, Space.None); // linksection
+ try renderToken(tree, ais, lparen, Space.None); // (
+ try renderExpression(allocator, ais, tree, section_node, Space.None);
const s = if (var_decl.getInitNode() != null) Space.Space else Space.None;
- try renderToken(tree, stream, rparen, indent, start_col, s); // )
+ try renderToken(tree, ais, rparen, s); // )
}
if (var_decl.getInitNode()) |init_node| {
const s = if (init_node.tag == .MultilineStringLiteral) Space.None else Space.Space;
- try renderToken(tree, stream, var_decl.getEqToken().?, indent, start_col, s); // =
- try renderExpression(allocator, stream, tree, indent, start_col, init_node, Space.None);
+ try renderToken(tree, ais, var_decl.getEqToken().?, s); // =
+ ais.pushIndentOneShot();
+ try renderExpression(allocator, ais, tree, init_node, Space.None);
}
- try renderToken(tree, stream, var_decl.semicolon_token, indent, start_col, Space.Newline);
+ try renderToken(tree, ais, var_decl.semicolon_token, Space.Newline);
}
fn renderParamDecl(
allocator: *mem.Allocator,
- stream: anytype,
+ ais: anytype,
tree: *ast.Tree,
- indent: usize,
- start_col: *usize,
param_decl: ast.Node.FnProto.ParamDecl,
space: Space,
-) (@TypeOf(stream).Error || Error)!void {
- try renderDocComments(tree, stream, param_decl, param_decl.doc_comments, indent, start_col);
+) (@TypeOf(ais.*).Error || Error)!void {
+ try renderDocComments(tree, ais, param_decl, param_decl.doc_comments);
if (param_decl.comptime_token) |comptime_token| {
- try renderToken(tree, stream, comptime_token, indent, start_col, Space.Space);
+ try renderToken(tree, ais, comptime_token, Space.Space);
}
if (param_decl.noalias_token) |noalias_token| {
- try renderToken(tree, stream, noalias_token, indent, start_col, Space.Space);
+ try renderToken(tree, ais, noalias_token, Space.Space);
}
if (param_decl.name_token) |name_token| {
- try renderToken(tree, stream, name_token, indent, start_col, Space.None);
- try renderToken(tree, stream, tree.nextToken(name_token), indent, start_col, Space.Space); // :
+ try renderToken(tree, ais, name_token, Space.None);
+ try renderToken(tree, ais, tree.nextToken(name_token), Space.Space); // :
}
switch (param_decl.param_type) {
- .any_type, .type_expr => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, space),
+ .any_type, .type_expr => |node| try renderExpression(allocator, ais, tree, node, space),
}
}
fn renderStatement(
allocator: *mem.Allocator,
- stream: anytype,
+ ais: anytype,
tree: *ast.Tree,
- indent: usize,
- start_col: *usize,
base: *ast.Node,
-) (@TypeOf(stream).Error || Error)!void {
+) (@TypeOf(ais.*).Error || Error)!void {
switch (base.tag) {
.VarDecl => {
const var_decl = @fieldParentPtr(ast.Node.VarDecl, "base", base);
- try renderVarDecl(allocator, stream, tree, indent, start_col, var_decl);
+ try renderVarDecl(allocator, ais, tree, var_decl);
},
else => {
if (base.requireSemiColon()) {
- try renderExpression(allocator, stream, tree, indent, start_col, base, Space.None);
+ try renderExpression(allocator, ais, tree, base, Space.None);
const semicolon_index = tree.nextToken(base.lastToken());
assert(tree.token_ids[semicolon_index] == .Semicolon);
- try renderToken(tree, stream, semicolon_index, indent, start_col, Space.Newline);
+ try renderToken(tree, ais, semicolon_index, Space.Newline);
} else {
- try renderExpression(allocator, stream, tree, indent, start_col, base, Space.Newline);
+ try renderExpression(allocator, ais, tree, base, Space.Newline);
}
},
}
@@ -2360,24 +2285,19 @@ const Space = enum {
fn renderTokenOffset(
tree: *ast.Tree,
- stream: anytype,
+ ais: anytype,
token_index: ast.TokenIndex,
- indent: usize,
- start_col: *usize,
space: Space,
token_skip_bytes: usize,
-) (@TypeOf(stream).Error || Error)!void {
+) (@TypeOf(ais.*).Error || Error)!void {
if (space == Space.BlockStart) {
- if (start_col.* < indent + indent_delta)
- return renderToken(tree, stream, token_index, indent, start_col, Space.Space);
- try renderToken(tree, stream, token_index, indent, start_col, Space.Newline);
- try stream.writeByteNTimes(' ', indent);
- start_col.* = indent;
- return;
+ // If placing the lbrace on the current line would cause an uggly gap then put the lbrace on the next line
+ const new_space = if (ais.isLineOverIndented()) Space.Newline else Space.Space;
+ return renderToken(tree, ais, token_index, new_space);
}
var token_loc = tree.token_locs[token_index];
- try stream.writeAll(mem.trimRight(u8, tree.tokenSliceLoc(token_loc)[token_skip_bytes..], " "));
+ try ais.writer().writeAll(mem.trimRight(u8, tree.tokenSliceLoc(token_loc)[token_skip_bytes..], " "));
if (space == Space.NoComment)
return;
@@ -2386,20 +2306,20 @@ fn renderTokenOffset(
var next_token_loc = tree.token_locs[token_index + 1];
if (space == Space.Comma) switch (next_token_id) {
- .Comma => return renderToken(tree, stream, token_index + 1, indent, start_col, Space.Newline),
+ .Comma => return renderToken(tree, ais, token_index + 1, Space.Newline),
.LineComment => {
- try stream.writeAll(", ");
- return renderToken(tree, stream, token_index + 1, indent, start_col, Space.Newline);
+ try ais.writer().writeAll(", ");
+ return renderToken(tree, ais, token_index + 1, Space.Newline);
},
else => {
if (token_index + 2 < tree.token_ids.len and
tree.token_ids[token_index + 2] == .MultilineStringLiteralLine)
{
- try stream.writeAll(",");
+ try ais.writer().writeAll(",");
return;
} else {
- try stream.writeAll(",\n");
- start_col.* = 0;
+ try ais.writer().writeAll(",");
+ try ais.insertNewline();
return;
}
},
@@ -2423,15 +2343,14 @@ fn renderTokenOffset(
if (next_token_id == .MultilineStringLiteralLine) {
return;
} else {
- try stream.writeAll("\n");
- start_col.* = 0;
+ try ais.insertNewline();
return;
}
},
Space.Space, Space.SpaceOrOutdent => {
if (next_token_id == .MultilineStringLiteralLine)
return;
- try stream.writeByte(' ');
+ try ais.writer().writeByte(' ');
return;
},
Space.NoComment, Space.Comma, Space.BlockStart => unreachable,
@@ -2448,8 +2367,7 @@ fn renderTokenOffset(
next_token_id = tree.token_ids[token_index + offset];
next_token_loc = tree.token_locs[token_index + offset];
if (next_token_id != .LineComment) {
- try stream.writeByte('\n');
- start_col.* = 0;
+ try ais.insertNewline();
return;
}
},
@@ -2462,7 +2380,7 @@ fn renderTokenOffset(
var loc = tree.tokenLocationLoc(token_loc.end, next_token_loc);
if (loc.line == 0) {
- try stream.print(" {}", .{mem.trimRight(u8, tree.tokenSliceLoc(next_token_loc), " ")});
+ try ais.writer().print(" {}", .{mem.trimRight(u8, tree.tokenSliceLoc(next_token_loc), " ")});
offset = 2;
token_loc = next_token_loc;
next_token_loc = tree.token_locs[token_index + offset];
@@ -2470,26 +2388,16 @@ fn renderTokenOffset(
if (next_token_id != .LineComment) {
switch (space) {
Space.None, Space.Space => {
- try stream.writeByte('\n');
- const after_comment_token = tree.token_ids[token_index + offset];
- const next_line_indent = switch (after_comment_token) {
- .RParen, .RBrace, .RBracket => indent,
- else => indent + indent_delta,
- };
- try stream.writeByteNTimes(' ', next_line_indent);
- start_col.* = next_line_indent;
+ try ais.insertNewline();
},
Space.SpaceOrOutdent => {
- try stream.writeByte('\n');
- try stream.writeByteNTimes(' ', indent);
- start_col.* = indent;
+ try ais.insertNewline();
},
Space.Newline => {
if (next_token_id == .MultilineStringLiteralLine) {
return;
} else {
- try stream.writeAll("\n");
- start_col.* = 0;
+ try ais.insertNewline();
return;
}
},
@@ -2505,10 +2413,9 @@ fn renderTokenOffset(
// translate-c doesn't generate correct newlines
// in generated code (loc.line == 0) so treat that case
// as though there was meant to be a newline between the tokens
- const newline_count = if (loc.line <= 1) @as(u8, 1) else @as(u8, 2);
- try stream.writeByteNTimes('\n', newline_count);
- try stream.writeByteNTimes(' ', indent);
- try stream.writeAll(mem.trimRight(u8, tree.tokenSliceLoc(next_token_loc), " "));
+ var newline_count = if (loc.line <= 1) @as(u8, 1) else @as(u8, 2);
+ while (newline_count > 0) : (newline_count -= 1) try ais.insertNewline();
+ try ais.writer().writeAll(mem.trimRight(u8, tree.tokenSliceLoc(next_token_loc), " "));
offset += 1;
token_loc = next_token_loc;
@@ -2520,32 +2427,15 @@ fn renderTokenOffset(
if (next_token_id == .MultilineStringLiteralLine) {
return;
} else {
- try stream.writeAll("\n");
- start_col.* = 0;
+ try ais.insertNewline();
return;
}
},
Space.None, Space.Space => {
- try stream.writeByte('\n');
-
- const after_comment_token = tree.token_ids[token_index + offset];
- const next_line_indent = switch (after_comment_token) {
- .RParen, .RBrace, .RBracket => blk: {
- if (indent > indent_delta) {
- break :blk indent - indent_delta;
- } else {
- break :blk 0;
- }
- },
- else => indent,
- };
- try stream.writeByteNTimes(' ', next_line_indent);
- start_col.* = next_line_indent;
+ try ais.insertNewline();
},
Space.SpaceOrOutdent => {
- try stream.writeByte('\n');
- try stream.writeByteNTimes(' ', indent);
- start_col.* = indent;
+ try ais.insertNewline();
},
Space.NoNewline => {},
Space.NoComment, Space.Comma, Space.BlockStart => unreachable,
@@ -2558,46 +2448,38 @@ fn renderTokenOffset(
fn renderToken(
tree: *ast.Tree,
- stream: anytype,
+ ais: anytype,
token_index: ast.TokenIndex,
- indent: usize,
- start_col: *usize,
space: Space,
-) (@TypeOf(stream).Error || Error)!void {
- return renderTokenOffset(tree, stream, token_index, indent, start_col, space, 0);
+) (@TypeOf(ais.*).Error || Error)!void {
+ return renderTokenOffset(tree, ais, token_index, space, 0);
}
fn renderDocComments(
tree: *ast.Tree,
- stream: anytype,
+ ais: anytype,
node: anytype,
doc_comments: ?*ast.Node.DocComment,
- indent: usize,
- start_col: *usize,
-) (@TypeOf(stream).Error || Error)!void {
+) (@TypeOf(ais.*).Error || Error)!void {
const comment = doc_comments orelse return;
- return renderDocCommentsToken(tree, stream, comment, node.firstToken(), indent, start_col);
+ return renderDocCommentsToken(tree, ais, comment, node.firstToken());
}
fn renderDocCommentsToken(
tree: *ast.Tree,
- stream: anytype,
+ ais: anytype,
comment: *ast.Node.DocComment,
first_token: ast.TokenIndex,
- indent: usize,
- start_col: *usize,
-) (@TypeOf(stream).Error || Error)!void {
+) (@TypeOf(ais.*).Error || Error)!void {
var tok_i = comment.first_line;
while (true) : (tok_i += 1) {
switch (tree.token_ids[tok_i]) {
.DocComment, .ContainerDocComment => {
if (comment.first_line < first_token) {
- try renderToken(tree, stream, tok_i, indent, start_col, Space.Newline);
- try stream.writeByteNTimes(' ', indent);
+ try renderToken(tree, ais, tok_i, Space.Newline);
} else {
- try renderToken(tree, stream, tok_i, indent, start_col, Space.NoComment);
- try stream.writeAll("\n");
- try stream.writeByteNTimes(' ', indent);
+ try renderToken(tree, ais, tok_i, Space.NoComment);
+ try ais.insertNewline();
}
},
.LineComment => continue,
@@ -2669,41 +2551,10 @@ fn nodeCausesSliceOpSpace(base: *ast.Node) bool {
};
}
-/// A `std.io.OutStream` that returns whether the given character has been written to it.
-/// The contents are not written to anything.
-const FindByteOutStream = struct {
- byte_found: bool,
- byte: u8,
-
- pub const Error = error{};
- pub const OutStream = std.io.OutStream(*FindByteOutStream, Error, write);
-
- pub fn init(byte: u8) FindByteOutStream {
- return FindByteOutStream{
- .byte = byte,
- .byte_found = false,
- };
- }
-
- pub fn write(self: *FindByteOutStream, bytes: []const u8) Error!usize {
- if (self.byte_found) return bytes.len;
- self.byte_found = blk: {
- for (bytes) |b|
- if (b == self.byte) break :blk true;
- break :blk false;
- };
- return bytes.len;
- }
-
- pub fn outStream(self: *FindByteOutStream) OutStream {
- return .{ .context = self };
- }
-};
-
-fn copyFixingWhitespace(stream: anytype, slice: []const u8) @TypeOf(stream).Error!void {
+fn copyFixingWhitespace(ais: anytype, slice: []const u8) @TypeOf(ais.*).Error!void {
for (slice) |byte| switch (byte) {
- '\t' => try stream.writeAll(" "),
+ '\t' => try ais.writer().writeAll(" "),
'\r' => {},
- else => try stream.writeByte(byte),
+ else => try ais.writer().writeByte(byte),
};
}
diff --git a/lib/std/zig/tokenizer.zig b/lib/std/zig/tokenizer.zig
index 47c7d23b35..86968c73b2 100644
--- a/lib/std/zig/tokenizer.zig
+++ b/lib/std/zig/tokenizer.zig
@@ -1175,6 +1175,7 @@ pub const Tokenizer = struct {
},
.num_dot_dec => switch (c) {
'.' => {
+ result.id = .IntegerLiteral;
self.index -= 1;
state = .start;
break;
@@ -1183,7 +1184,6 @@ pub const Tokenizer = struct {
state = .float_exponent_unsigned;
},
'0'...'9' => {
- result.id = .FloatLiteral;
state = .float_fraction_dec;
},
else => {
@@ -1769,6 +1769,7 @@ test "tokenizer - number literals decimal" {
testTokenize("7", &[_]Token.Id{.IntegerLiteral});
testTokenize("8", &[_]Token.Id{.IntegerLiteral});
testTokenize("9", &[_]Token.Id{.IntegerLiteral});
+ testTokenize("1..", &[_]Token.Id{ .IntegerLiteral, .Ellipsis2 });
testTokenize("0a", &[_]Token.Id{ .Invalid, .Identifier });
testTokenize("9b", &[_]Token.Id{ .Invalid, .Identifier });
testTokenize("1z", &[_]Token.Id{ .Invalid, .Identifier });