aboutsummaryrefslogtreecommitdiff
path: root/lib/std
diff options
context:
space:
mode:
authorAnthony Arian <anthonyarian96@gmail.com>2020-07-20 10:25:54 +0100
committerAnthony Arian <anthonyarian96@gmail.com>2020-07-20 10:25:54 +0100
commit3658dd5e89cd16c011bdc52d334c1308f440157b (patch)
tree09564ab2db65acc4a52d82bccbf0eb572fbc865f /lib/std
parent68fe3e116d9c4bde67df990b8e0cbb3e70fc98b2 (diff)
parent596ca6cf70cf43c27e31bbcfc36bcdc70b13897a (diff)
downloadzig-3658dd5e89cd16c011bdc52d334c1308f440157b.tar.gz
zig-3658dd5e89cd16c011bdc52d334c1308f440157b.zip
Merge branch 'master' of https://github.com/ziglang/zig into 5002-fix-entrypoint-with-winmain
Diffstat (limited to 'lib/std')
-rw-r--r--lib/std/array_list.zig123
-rw-r--r--lib/std/array_list_sentineled.zig4
-rw-r--r--lib/std/atomic/queue.zig4
-rw-r--r--lib/std/buf_map.zig17
-rw-r--r--lib/std/buf_set.zig8
-rw-r--r--lib/std/build.zig40
-rw-r--r--lib/std/build/emit_raw.zig6
-rw-r--r--lib/std/builtin.zig45
-rw-r--r--lib/std/c.zig17
-rw-r--r--lib/std/c/ast.zig14
-rw-r--r--lib/std/c/darwin.zig1
-rw-r--r--lib/std/c/tokenizer.zig100
-rw-r--r--lib/std/cache_hash.zig4
-rw-r--r--lib/std/comptime_string_map.zig6
-rw-r--r--lib/std/crypto/benchmark.zig24
-rw-r--r--lib/std/crypto/test.zig2
-rw-r--r--lib/std/debug.zig70
-rw-r--r--lib/std/debug/leb128.zig352
-rw-r--r--lib/std/dwarf.zig20
-rw-r--r--lib/std/elf.zig5
-rw-r--r--lib/std/event/group.zig2
-rw-r--r--lib/std/fmt.zig485
-rw-r--r--lib/std/fs.zig91
-rw-r--r--lib/std/fs/file.zig66
-rw-r--r--lib/std/fs/path.zig4
-rw-r--r--lib/std/fs/test.zig313
-rw-r--r--lib/std/fs/wasi.zig88
-rw-r--r--lib/std/fs/watch.zig2
-rw-r--r--lib/std/hash/auto_hash.zig16
-rw-r--r--lib/std/hash/benchmark.zig10
-rw-r--r--lib/std/hash/cityhash.zig2
-rw-r--r--lib/std/hash/murmur.zig2
-rw-r--r--lib/std/hash_map.zig1101
-rw-r--r--lib/std/heap.zig612
-rw-r--r--lib/std/heap/arena_allocator.zig32
-rw-r--r--lib/std/heap/logging_allocator.zig63
-rw-r--r--lib/std/http/headers.zig170
-rw-r--r--lib/std/io/bit_reader.zig2
-rw-r--r--lib/std/io/bit_writer.zig4
-rw-r--r--lib/std/io/buffered_out_stream.zig2
-rw-r--r--lib/std/io/buffered_reader.zig2
-rw-r--r--lib/std/io/buffered_writer.zig2
-rw-r--r--lib/std/io/counting_writer.zig2
-rw-r--r--lib/std/io/fixed_buffer_stream.zig2
-rw-r--r--lib/std/io/multi_writer.zig2
-rw-r--r--lib/std/io/peek_stream.zig2
-rw-r--r--lib/std/io/reader.zig3
-rw-r--r--lib/std/io/serialization.zig62
-rw-r--r--lib/std/io/writer.zig2
-rw-r--r--lib/std/json.zig87
-rw-r--r--lib/std/json/write_stream.zig6
-rw-r--r--lib/std/log.zig202
-rw-r--r--lib/std/math.zig46
-rw-r--r--lib/std/math/acos.zig2
-rw-r--r--lib/std/math/acosh.zig2
-rw-r--r--lib/std/math/asin.zig2
-rw-r--r--lib/std/math/asinh.zig2
-rw-r--r--lib/std/math/atan.zig2
-rw-r--r--lib/std/math/atanh.zig2
-rw-r--r--lib/std/math/big/int.zig22
-rw-r--r--lib/std/math/big/rational.zig4
-rw-r--r--lib/std/math/cbrt.zig2
-rw-r--r--lib/std/math/ceil.zig45
-rw-r--r--lib/std/math/complex/abs.zig2
-rw-r--r--lib/std/math/complex/acos.zig2
-rw-r--r--lib/std/math/complex/acosh.zig2
-rw-r--r--lib/std/math/complex/arg.zig2
-rw-r--r--lib/std/math/complex/asin.zig2
-rw-r--r--lib/std/math/complex/asinh.zig2
-rw-r--r--lib/std/math/complex/atan.zig2
-rw-r--r--lib/std/math/complex/atanh.zig2
-rw-r--r--lib/std/math/complex/conj.zig2
-rw-r--r--lib/std/math/complex/cos.zig2
-rw-r--r--lib/std/math/complex/cosh.zig2
-rw-r--r--lib/std/math/complex/exp.zig2
-rw-r--r--lib/std/math/complex/ldexp.zig2
-rw-r--r--lib/std/math/complex/log.zig2
-rw-r--r--lib/std/math/complex/proj.zig2
-rw-r--r--lib/std/math/complex/sin.zig2
-rw-r--r--lib/std/math/complex/sinh.zig2
-rw-r--r--lib/std/math/complex/sqrt.zig2
-rw-r--r--lib/std/math/complex/tan.zig2
-rw-r--r--lib/std/math/complex/tanh.zig2
-rw-r--r--lib/std/math/cos.zig2
-rw-r--r--lib/std/math/cosh.zig2
-rw-r--r--lib/std/math/exp.zig2
-rw-r--r--lib/std/math/exp2.zig2
-rw-r--r--lib/std/math/expm1.zig2
-rw-r--r--lib/std/math/expo2.zig2
-rw-r--r--lib/std/math/fabs.zig2
-rw-r--r--lib/std/math/floor.zig45
-rw-r--r--lib/std/math/frexp.zig2
-rw-r--r--lib/std/math/ilogb.zig2
-rw-r--r--lib/std/math/isfinite.zig2
-rw-r--r--lib/std/math/isinf.zig6
-rw-r--r--lib/std/math/isnan.zig4
-rw-r--r--lib/std/math/isnormal.zig2
-rw-r--r--lib/std/math/ln.zig2
-rw-r--r--lib/std/math/log10.zig2
-rw-r--r--lib/std/math/log1p.zig2
-rw-r--r--lib/std/math/log2.zig2
-rw-r--r--lib/std/math/modf.zig2
-rw-r--r--lib/std/math/round.zig52
-rw-r--r--lib/std/math/scalbn.zig2
-rw-r--r--lib/std/math/signbit.zig2
-rw-r--r--lib/std/math/sin.zig2
-rw-r--r--lib/std/math/sinh.zig2
-rw-r--r--lib/std/math/sqrt.zig2
-rw-r--r--lib/std/math/tan.zig2
-rw-r--r--lib/std/math/tanh.zig2
-rw-r--r--lib/std/math/trunc.zig39
-rw-r--r--lib/std/mem.zig436
-rw-r--r--lib/std/meta.zig103
-rw-r--r--lib/std/meta/trailer_flags.zig145
-rw-r--r--lib/std/meta/trait.zig21
-rw-r--r--lib/std/net.zig8
-rw-r--r--lib/std/os.zig281
-rw-r--r--lib/std/os/test.zig170
-rw-r--r--lib/std/os/uefi.zig2
-rw-r--r--lib/std/os/windows.zig11
-rw-r--r--lib/std/os/windows/bits.zig1
-rw-r--r--lib/std/os/windows/ws2_32.zig18
-rw-r--r--lib/std/pdb.zig2
-rw-r--r--lib/std/priority_queue.zig2
-rw-r--r--lib/std/process.zig51
-rw-r--r--lib/std/progress.zig4
-rw-r--r--lib/std/segmented_list.zig4
-rw-r--r--lib/std/sort.zig38
-rw-r--r--lib/std/special/build_runner.zig4
-rw-r--r--lib/std/special/compiler_rt/clzsi2_test.zig2
-rw-r--r--lib/std/special/compiler_rt/int.zig2
-rw-r--r--lib/std/special/compiler_rt/udivmod.zig2
-rw-r--r--lib/std/special/test_runner.zig14
-rw-r--r--lib/std/start.zig2
-rw-r--r--lib/std/std.zig10
-rw-r--r--lib/std/target.zig67
-rw-r--r--lib/std/target/aarch64.zig45
-rw-r--r--lib/std/target/amdgpu.zig45
-rw-r--r--lib/std/target/arm.zig89
-rw-r--r--lib/std/target/avr.zig263
-rw-r--r--lib/std/target/bpf.zig11
-rw-r--r--lib/std/target/hexagon.zig13
-rw-r--r--lib/std/target/mips.zig25
-rw-r--r--lib/std/target/msp430.zig9
-rw-r--r--lib/std/target/nvptx.zig21
-rw-r--r--lib/std/target/powerpc.zig44
-rw-r--r--lib/std/target/riscv.zig10
-rw-r--r--lib/std/target/sparc.zig46
-rw-r--r--lib/std/target/systemz.zig19
-rw-r--r--lib/std/target/wasm.zig9
-rw-r--r--lib/std/target/x86.zig85
-rw-r--r--lib/std/testing.zig11
-rw-r--r--lib/std/testing/failing_allocator.zig41
-rw-r--r--lib/std/testing/leak_count_allocator.zig21
-rw-r--r--lib/std/thread.zig2
-rw-r--r--lib/std/unicode.zig41
-rw-r--r--lib/std/zig.zig38
-rw-r--r--lib/std/zig/ast.zig981
-rw-r--r--lib/std/zig/cross_target.zig6
-rw-r--r--lib/std/zig/parse.zig670
-rw-r--r--lib/std/zig/parser_test.zig61
-rw-r--r--lib/std/zig/render.zig630
-rw-r--r--lib/std/zig/string_literal.zig2
-rw-r--r--lib/std/zig/system.zig11
-rw-r--r--lib/std/zig/tokenizer.zig5
165 files changed, 5720 insertions, 3600 deletions
diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig
index e096a65491..4d8cdc200c 100644
--- a/lib/std/array_list.zig
+++ b/lib/std/array_list.zig
@@ -53,7 +53,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
/// Deprecated: use `items` field directly.
/// Return contents as a slice. Only valid while the list
/// doesn't change size.
- pub fn span(self: var) @TypeOf(self.items) {
+ pub fn span(self: anytype) @TypeOf(self.items) {
return self.items;
}
@@ -162,19 +162,24 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
mem.copy(T, self.items[oldlen..], items);
}
- /// Same as `append` except it returns the number of bytes written, which is always the same
- /// as `m.len`. The purpose of this function existing is to match `std.io.OutStream` API.
- /// This function may be called only when `T` is `u8`.
- fn appendWrite(self: *Self, m: []const u8) !usize {
- try self.appendSlice(m);
- return m.len;
- }
+ pub usingnamespace if (T != u8) struct {} else struct {
+ pub const Writer = std.io.Writer(*Self, error{OutOfMemory}, appendWrite);
- /// Initializes an OutStream which will append to the list.
- /// This function may be called only when `T` is `u8`.
- pub fn outStream(self: *Self) std.io.OutStream(*Self, error{OutOfMemory}, appendWrite) {
- return .{ .context = self };
- }
+ /// Initializes a Writer which will append to the list.
+ pub fn writer(self: *Self) Writer {
+ return .{ .context = self };
+ }
+
+ /// Deprecated: use `writer`
+ pub const outStream = writer;
+
+ /// Same as `append` except it returns the number of bytes written, which is always the same
+ /// as `m.len`. The purpose of this function existing is to match `std.io.Writer` API.
+ fn appendWrite(self: *Self, m: []const u8) !usize {
+ try self.appendSlice(m);
+ return m.len;
+ }
+ };
/// Append a value to the list `n` times.
/// Allocates more memory as necessary.
@@ -205,6 +210,14 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
self.capacity = new_len;
}
+ /// Reduce length to `new_len`.
+ /// Invalidates element pointers.
+ /// Keeps capacity the same.
+ pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void {
+ assert(new_len <= self.items.len);
+ self.items.len = new_len;
+ }
+
pub fn ensureCapacity(self: *Self, new_capacity: usize) !void {
var better_capacity = self.capacity;
if (better_capacity >= new_capacity) return;
@@ -214,7 +227,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
if (better_capacity >= new_capacity) break;
}
- const new_memory = try self.allocator.realloc(self.allocatedSlice(), better_capacity);
+ const new_memory = try self.allocator.reallocAtLeast(self.allocatedSlice(), better_capacity);
self.items.ptr = new_memory.ptr;
self.capacity = new_memory.len;
}
@@ -244,6 +257,24 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
return &self.items[self.items.len - 1];
}
+ /// Resize the array, adding `n` new elements, which have `undefined` values.
+ /// The return value is an array pointing to the newly allocated elements.
+ pub fn addManyAsArray(self: *Self, comptime n: usize) !*[n]T {
+ const prev_len = self.items.len;
+ try self.resize(self.items.len + n);
+ return self.items[prev_len..][0..n];
+ }
+
+ /// Resize the array, adding `n` new elements, which have `undefined` values.
+ /// The return value is an array pointing to the newly allocated elements.
+ /// Asserts that there is already space for the new item without allocating more.
+ pub fn addManyAsArrayAssumeCapacity(self: *Self, comptime n: usize) *[n]T {
+ assert(self.items.len + n <= self.capacity);
+ const prev_len = self.items.len;
+ self.items.len += n;
+ return self.items[prev_len..][0..n];
+ }
+
/// Remove and return the last element from the list.
/// Asserts the list has at least one item.
pub fn pop(self: *Self) T {
@@ -427,6 +458,14 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
self.capacity = new_len;
}
+ /// Reduce length to `new_len`.
+ /// Invalidates element pointers.
+ /// Keeps capacity the same.
+ pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void {
+ assert(new_len <= self.items.len);
+ self.items.len = new_len;
+ }
+
pub fn ensureCapacity(self: *Self, allocator: *Allocator, new_capacity: usize) !void {
var better_capacity = self.capacity;
if (better_capacity >= new_capacity) return;
@@ -436,7 +475,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
if (better_capacity >= new_capacity) break;
}
- const new_memory = try allocator.realloc(self.allocatedSlice(), better_capacity);
+ const new_memory = try allocator.reallocAtLeast(self.allocatedSlice(), better_capacity);
self.items.ptr = new_memory.ptr;
self.capacity = new_memory.len;
}
@@ -467,6 +506,24 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
return &self.items[self.items.len - 1];
}
+ /// Resize the array, adding `n` new elements, which have `undefined` values.
+ /// The return value is an array pointing to the newly allocated elements.
+ pub fn addManyAsArray(self: *Self, allocator: *Allocator, comptime n: usize) !*[n]T {
+ const prev_len = self.items.len;
+ try self.resize(allocator, self.items.len + n);
+ return self.items[prev_len..][0..n];
+ }
+
+ /// Resize the array, adding `n` new elements, which have `undefined` values.
+ /// The return value is an array pointing to the newly allocated elements.
+ /// Asserts that there is already space for the new item without allocating more.
+ pub fn addManyAsArrayAssumeCapacity(self: *Self, comptime n: usize) *[n]T {
+ assert(self.items.len + n <= self.capacity);
+ const prev_len = self.items.len;
+ self.items.len += n;
+ return self.items[prev_len..][0..n];
+ }
+
/// Remove and return the last element from the list.
/// Asserts the list has at least one item.
/// This operation does not invalidate any element pointers.
@@ -694,3 +751,39 @@ test "std.ArrayList.shrink still sets length on error.OutOfMemory" {
list.shrink(1);
testing.expect(list.items.len == 1);
}
+
+test "std.ArrayList.writer" {
+ var list = ArrayList(u8).init(std.testing.allocator);
+ defer list.deinit();
+
+ const writer = list.writer();
+ try writer.writeAll("a");
+ try writer.writeAll("bc");
+ try writer.writeAll("d");
+ try writer.writeAll("efg");
+ testing.expectEqualSlices(u8, list.items, "abcdefg");
+}
+
+test "addManyAsArray" {
+ const a = std.testing.allocator;
+ {
+ var list = ArrayList(u8).init(a);
+ defer list.deinit();
+
+ (try list.addManyAsArray(4)).* = "aoeu".*;
+ try list.ensureCapacity(8);
+ list.addManyAsArrayAssumeCapacity(4).* = "asdf".*;
+
+ testing.expectEqualSlices(u8, list.items, "aoeuasdf");
+ }
+ {
+ var list = ArrayListUnmanaged(u8){};
+ defer list.deinit(a);
+
+ (try list.addManyAsArray(a, 4)).* = "aoeu".*;
+ try list.ensureCapacity(a, 8);
+ list.addManyAsArrayAssumeCapacity(4).* = "asdf".*;
+
+ testing.expectEqualSlices(u8, list.items, "aoeuasdf");
+ }
+}
diff --git a/lib/std/array_list_sentineled.zig b/lib/std/array_list_sentineled.zig
index b83cc4ad62..828be7462f 100644
--- a/lib/std/array_list_sentineled.zig
+++ b/lib/std/array_list_sentineled.zig
@@ -69,7 +69,7 @@ pub fn ArrayListSentineled(comptime T: type, comptime sentinel: T) type {
}
/// Only works when `T` is `u8`.
- pub fn allocPrint(allocator: *Allocator, comptime format: []const u8, args: var) !Self {
+ pub fn allocPrint(allocator: *Allocator, comptime format: []const u8, args: anytype) !Self {
const size = std.math.cast(usize, std.fmt.count(format, args)) catch |err| switch (err) {
error.Overflow => return error.OutOfMemory,
};
@@ -82,7 +82,7 @@ pub fn ArrayListSentineled(comptime T: type, comptime sentinel: T) type {
self.list.deinit();
}
- pub fn span(self: var) @TypeOf(self.list.items[0..:sentinel]) {
+ pub fn span(self: anytype) @TypeOf(self.list.items[0..:sentinel]) {
return self.list.items[0..self.len() :sentinel];
}
diff --git a/lib/std/atomic/queue.zig b/lib/std/atomic/queue.zig
index d6d0b70754..880af37ef4 100644
--- a/lib/std/atomic/queue.zig
+++ b/lib/std/atomic/queue.zig
@@ -123,10 +123,10 @@ pub fn Queue(comptime T: type) type {
/// Dumps the contents of the queue to `stream`.
/// Up to 4 elements from the head are dumped and the tail of the queue is
/// dumped as well.
- pub fn dumpToStream(self: *Self, stream: var) !void {
+ pub fn dumpToStream(self: *Self, stream: anytype) !void {
const S = struct {
fn dumpRecursive(
- s: var,
+ s: anytype,
optional_node: ?*Node,
indent: usize,
comptime depth: comptime_int,
diff --git a/lib/std/buf_map.zig b/lib/std/buf_map.zig
index e8bc735b57..5cf4a54ed3 100644
--- a/lib/std/buf_map.zig
+++ b/lib/std/buf_map.zig
@@ -33,10 +33,10 @@ pub const BufMap = struct {
pub fn setMove(self: *BufMap, key: []u8, value: []u8) !void {
const get_or_put = try self.hash_map.getOrPut(key);
if (get_or_put.found_existing) {
- self.free(get_or_put.kv.key);
- get_or_put.kv.key = key;
+ self.free(get_or_put.entry.key);
+ get_or_put.entry.key = key;
}
- get_or_put.kv.value = value;
+ get_or_put.entry.value = value;
}
/// `key` and `value` are copied into the BufMap.
@@ -45,19 +45,18 @@ pub const BufMap = struct {
errdefer self.free(value_copy);
const get_or_put = try self.hash_map.getOrPut(key);
if (get_or_put.found_existing) {
- self.free(get_or_put.kv.value);
+ self.free(get_or_put.entry.value);
} else {
- get_or_put.kv.key = self.copy(key) catch |err| {
+ get_or_put.entry.key = self.copy(key) catch |err| {
_ = self.hash_map.remove(key);
return err;
};
}
- get_or_put.kv.value = value_copy;
+ get_or_put.entry.value = value_copy;
}
pub fn get(self: BufMap, key: []const u8) ?[]const u8 {
- const entry = self.hash_map.get(key) orelse return null;
- return entry.value;
+ return self.hash_map.get(key);
}
pub fn delete(self: *BufMap, key: []const u8) void {
@@ -79,7 +78,7 @@ pub const BufMap = struct {
}
fn copy(self: BufMap, value: []const u8) ![]u8 {
- return mem.dupe(self.hash_map.allocator, u8, value);
+ return self.hash_map.allocator.dupe(u8, value);
}
};
diff --git a/lib/std/buf_set.zig b/lib/std/buf_set.zig
index 89df0478ff..d8a0264bd7 100644
--- a/lib/std/buf_set.zig
+++ b/lib/std/buf_set.zig
@@ -14,14 +14,12 @@ pub const BufSet = struct {
return self;
}
- pub fn deinit(self: *const BufSet) void {
- var it = self.hash_map.iterator();
- while (true) {
- const entry = it.next() orelse break;
+ pub fn deinit(self: *BufSet) void {
+ for (self.hash_map.items()) |entry| {
self.free(entry.key);
}
-
self.hash_map.deinit();
+ self.* = undefined;
}
pub fn put(self: *BufSet, key: []const u8) !void {
diff --git a/lib/std/build.zig b/lib/std/build.zig
index d98ef71a59..19de76b00d 100644
--- a/lib/std/build.zig
+++ b/lib/std/build.zig
@@ -286,7 +286,7 @@ pub const Builder = struct {
}
pub fn dupe(self: *Builder, bytes: []const u8) []u8 {
- return mem.dupe(self.allocator, u8, bytes) catch unreachable;
+ return self.allocator.dupe(u8, bytes) catch unreachable;
}
pub fn dupePath(self: *Builder, bytes: []const u8) []u8 {
@@ -312,7 +312,7 @@ pub const Builder = struct {
return write_file_step;
}
- pub fn addLog(self: *Builder, comptime format: []const u8, args: var) *LogStep {
+ pub fn addLog(self: *Builder, comptime format: []const u8, args: anytype) *LogStep {
const data = self.fmt(format, args);
const log_step = self.allocator.create(LogStep) catch unreachable;
log_step.* = LogStep.init(self, data);
@@ -422,12 +422,12 @@ pub const Builder = struct {
.type_id = type_id,
.description = description,
};
- if ((self.available_options_map.put(name, available_option) catch unreachable) != null) {
+ if ((self.available_options_map.fetchPut(name, available_option) catch unreachable) != null) {
panic("Option '{}' declared twice", .{name});
}
self.available_options_list.append(available_option) catch unreachable;
- const entry = self.user_input_options.get(name) orelse return null;
+ const entry = self.user_input_options.getEntry(name) orelse return null;
entry.value.used = true;
switch (type_id) {
TypeId.Bool => switch (entry.value.value) {
@@ -512,7 +512,7 @@ pub const Builder = struct {
if (self.release_mode != null) {
@panic("setPreferredReleaseMode must be called before standardReleaseOptions and may not be called twice");
}
- const description = self.fmt("create a release build ({})", .{@tagName(mode)});
+ const description = self.fmt("Create a release build ({})", .{@tagName(mode)});
self.is_release = self.option(bool, "release", description) orelse false;
self.release_mode = if (self.is_release) mode else builtin.Mode.Debug;
}
@@ -522,9 +522,9 @@ pub const Builder = struct {
pub fn standardReleaseOptions(self: *Builder) builtin.Mode {
if (self.release_mode) |mode| return mode;
- const release_safe = self.option(bool, "release-safe", "optimizations on and safety on") orelse false;
- const release_fast = self.option(bool, "release-fast", "optimizations on and safety off") orelse false;
- const release_small = self.option(bool, "release-small", "size optimizations on and safety off") orelse false;
+ const release_safe = self.option(bool, "release-safe", "Optimizations on and safety on") orelse false;
+ const release_fast = self.option(bool, "release-fast", "Optimizations on and safety off") orelse false;
+ const release_small = self.option(bool, "release-small", "Size optimizations on and safety off") orelse false;
const mode = if (release_safe and !release_fast and !release_small)
builtin.Mode.ReleaseSafe
@@ -555,7 +555,7 @@ pub const Builder = struct {
const triple = self.option(
[]const u8,
"target",
- "The CPU architecture, OS, and ABI to build for.",
+ "The CPU architecture, OS, and ABI to build for",
) orelse return args.default_target;
// TODO add cpu and features as part of the target triple
@@ -634,7 +634,7 @@ pub const Builder = struct {
pub fn addUserInputOption(self: *Builder, name: []const u8, value: []const u8) !bool {
const gop = try self.user_input_options.getOrPut(name);
if (!gop.found_existing) {
- gop.kv.value = UserInputOption{
+ gop.entry.value = UserInputOption{
.name = name,
.value = UserValue{ .Scalar = value },
.used = false,
@@ -643,7 +643,7 @@ pub const Builder = struct {
}
// option already exists
- switch (gop.kv.value.value) {
+ switch (gop.entry.value.value) {
UserValue.Scalar => |s| {
// turn it into a list
var list = ArrayList([]const u8).init(self.allocator);
@@ -675,7 +675,7 @@ pub const Builder = struct {
pub fn addUserInputFlag(self: *Builder, name: []const u8) !bool {
const gop = try self.user_input_options.getOrPut(name);
if (!gop.found_existing) {
- gop.kv.value = UserInputOption{
+ gop.entry.value = UserInputOption{
.name = name,
.value = UserValue{ .Flag = {} },
.used = false,
@@ -684,7 +684,7 @@ pub const Builder = struct {
}
// option already exists
- switch (gop.kv.value.value) {
+ switch (gop.entry.value.value) {
UserValue.Scalar => |s| {
warn("Flag '-D{}' conflicts with option '-D{}={}'.\n", .{ name, name, s });
return true;
@@ -883,7 +883,7 @@ pub const Builder = struct {
return fs.path.resolve(self.allocator, &[_][]const u8{ self.build_root, rel_path }) catch unreachable;
}
- pub fn fmt(self: *Builder, comptime format: []const u8, args: var) []u8 {
+ pub fn fmt(self: *Builder, comptime format: []const u8, args: anytype) []u8 {
return fmt_lib.allocPrint(self.allocator, format, args) catch unreachable;
}
@@ -1905,10 +1905,11 @@ pub const LibExeObjStep = struct {
builder.allocator,
&[_][]const u8{ builder.cache_root, builder.fmt("{}_build_options.zig", .{self.name}) },
);
- try fs.cwd().writeFile(build_options_file, self.build_options_contents.span());
+ const path_from_root = builder.pathFromRoot(build_options_file);
+ try fs.cwd().writeFile(path_from_root, self.build_options_contents.span());
try zig_args.append("--pkg-begin");
try zig_args.append("build_options");
- try zig_args.append(builder.pathFromRoot(build_options_file));
+ try zig_args.append(path_from_root);
try zig_args.append("--pkg-end");
}
@@ -2558,3 +2559,10 @@ pub const InstalledFile = struct {
dir: InstallDir,
path: []const u8,
};
+
+test "" {
+ // The only purpose of this test is to get all these untested functions
+ // to be referenced to avoid regression so it is okay to skip some targets.
+ if (comptime std.Target.current.cpu.arch.ptrBitWidth() == 64)
+ std.meta.refAllDecls(@This());
+}
diff --git a/lib/std/build/emit_raw.zig b/lib/std/build/emit_raw.zig
index 8fd27d6cfc..058a4a64ff 100644
--- a/lib/std/build/emit_raw.zig
+++ b/lib/std/build/emit_raw.zig
@@ -126,7 +126,7 @@ const BinaryElfOutput = struct {
return segment.p_offset <= section.elfOffset and (segment.p_offset + segment.p_filesz) >= (section.elfOffset + section.fileSize);
}
- fn sectionValidForOutput(shdr: var) bool {
+ fn sectionValidForOutput(shdr: anytype) bool {
return shdr.sh_size > 0 and shdr.sh_type != elf.SHT_NOBITS and
((shdr.sh_flags & elf.SHF_ALLOC) == elf.SHF_ALLOC);
}
@@ -215,3 +215,7 @@ pub const InstallRawStep = struct {
try emitRaw(builder.allocator, full_src_path, full_dest_path);
}
};
+
+test "" {
+ std.meta.refAllDecls(InstallRawStep);
+}
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index af8033ae91..499011eab9 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -131,6 +131,15 @@ pub const CallingConvention = enum {
AAPCSVFP,
};
+/// This data structure is used by the Zig language code generation and
+/// therefore must be kept in sync with the compiler implementation.
+pub const SourceLocation = struct {
+ file: [:0]const u8,
+ fn_name: [:0]const u8,
+ line: u32,
+ column: u32,
+};
+
pub const TypeId = @TagType(TypeInfo);
/// This data structure is used by the Zig language code generation and
@@ -157,7 +166,7 @@ pub const TypeInfo = union(enum) {
Fn: Fn,
BoundFn: Fn,
Opaque: void,
- Frame: void,
+ Frame: Frame,
AnyFrame: AnyFrame,
Vector: Vector,
EnumLiteral: void,
@@ -189,7 +198,7 @@ pub const TypeInfo = union(enum) {
/// The type of the sentinel is the element type of the pointer, which is
/// the value of the `child` field in this struct. However there is no way
/// to refer to that type here, so we use `var`.
- sentinel: var,
+ sentinel: anytype,
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
@@ -211,7 +220,7 @@ pub const TypeInfo = union(enum) {
/// The type of the sentinel is the element type of the array, which is
/// the value of the `child` field in this struct. However there is no way
/// to refer to that type here, so we use `var`.
- sentinel: var,
+ sentinel: anytype,
};
/// This data structure is used by the Zig language code generation and
@@ -228,15 +237,16 @@ pub const TypeInfo = union(enum) {
name: []const u8,
offset: ?comptime_int,
field_type: type,
- default_value: var,
+ default_value: anytype,
};
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const Struct = struct {
layout: ContainerLayout,
- fields: []StructField,
- decls: []Declaration,
+ fields: []const StructField,
+ decls: []const Declaration,
+ is_tuple: bool,
};
/// This data structure is used by the Zig language code generation and
@@ -256,12 +266,13 @@ pub const TypeInfo = union(enum) {
/// therefore must be kept in sync with the compiler implementation.
pub const Error = struct {
name: []const u8,
+ /// This field is ignored when using @Type().
value: comptime_int,
};
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
- pub const ErrorSet = ?[]Error;
+ pub const ErrorSet = ?[]const Error;
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
@@ -275,8 +286,8 @@ pub const TypeInfo = union(enum) {
pub const Enum = struct {
layout: ContainerLayout,
tag_type: type,
- fields: []EnumField,
- decls: []Declaration,
+ fields: []const EnumField,
+ decls: []const Declaration,
is_exhaustive: bool,
};
@@ -293,8 +304,8 @@ pub const TypeInfo = union(enum) {
pub const Union = struct {
layout: ContainerLayout,
tag_type: ?type,
- fields: []UnionField,
- decls: []Declaration,
+ fields: []const UnionField,
+ decls: []const Declaration,
};
/// This data structure is used by the Zig language code generation and
@@ -312,7 +323,13 @@ pub const TypeInfo = union(enum) {
is_generic: bool,
is_var_args: bool,
return_type: ?type,
- args: []FnArg,
+ args: []const FnArg,
+ };
+
+ /// This data structure is used by the Zig language code generation and
+ /// therefore must be kept in sync with the compiler implementation.
+ pub const Frame = struct {
+ function: anytype,
};
/// This data structure is used by the Zig language code generation and
@@ -352,7 +369,7 @@ pub const TypeInfo = union(enum) {
is_export: bool,
lib_name: ?[]const u8,
return_type: type,
- arg_names: [][]const u8,
+ arg_names: []const []const u8,
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
@@ -436,7 +453,7 @@ pub const Version = struct {
self: Version,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
- out_stream: var,
+ out_stream: anytype,
) !void {
if (fmt.len == 0) {
if (self.patch == 0) {
diff --git a/lib/std/c.zig b/lib/std/c.zig
index fe9fc7ac40..e483b5b50f 100644
--- a/lib/std/c.zig
+++ b/lib/std/c.zig
@@ -27,7 +27,7 @@ pub usingnamespace switch (std.Target.current.os.tag) {
else => struct {},
};
-pub fn getErrno(rc: var) u16 {
+pub fn getErrno(rc: anytype) u16 {
if (rc == -1) {
return @intCast(u16, _errno().*);
} else {
@@ -73,7 +73,6 @@ pub extern "c" fn abort() noreturn;
pub extern "c" fn exit(code: c_int) noreturn;
pub extern "c" fn isatty(fd: fd_t) c_int;
pub extern "c" fn close(fd: fd_t) c_int;
-pub extern "c" fn fstatat(dirfd: fd_t, path: [*:0]const u8, stat_buf: *Stat, flags: u32) c_int;
pub extern "c" fn lseek(fd: fd_t, offset: off_t, whence: c_int) off_t;
pub extern "c" fn open(path: [*:0]const u8, oflag: c_uint, ...) c_int;
pub extern "c" fn openat(fd: c_int, path: [*:0]const u8, oflag: c_uint, ...) c_int;
@@ -102,6 +101,7 @@ pub extern "c" fn pipe2(fds: *[2]fd_t, flags: u32) c_int;
pub extern "c" fn mkdir(path: [*:0]const u8, mode: c_uint) c_int;
pub extern "c" fn mkdirat(dirfd: fd_t, path: [*:0]const u8, mode: u32) c_int;
pub extern "c" fn symlink(existing: [*:0]const u8, new: [*:0]const u8) c_int;
+pub extern "c" fn symlinkat(oldpath: [*:0]const u8, newdirfd: fd_t, newpath: [*:0]const u8) c_int;
pub extern "c" fn rename(old: [*:0]const u8, new: [*:0]const u8) c_int;
pub extern "c" fn renameat(olddirfd: fd_t, old: [*:0]const u8, newdirfd: fd_t, new: [*:0]const u8) c_int;
pub extern "c" fn chdir(path: [*:0]const u8) c_int;
@@ -115,9 +115,11 @@ pub extern "c" fn readlinkat(dirfd: fd_t, noalias path: [*:0]const u8, noalias b
pub usingnamespace switch (builtin.os.tag) {
.macosx, .ios, .watchos, .tvos => struct {
pub const realpath = @"realpath$DARWIN_EXTSN";
+ pub const fstatat = @"fstatat$INODE64";
},
else => struct {
pub extern "c" fn realpath(noalias file_name: [*:0]const u8, noalias resolved_name: [*]u8) ?[*:0]u8;
+ pub extern "c" fn fstatat(dirfd: fd_t, path: [*:0]const u8, stat_buf: *Stat, flags: u32) c_int;
},
};
@@ -231,6 +233,17 @@ pub extern "c" fn setuid(uid: c_uint) c_int;
pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?*c_void;
pub extern "c" fn malloc(usize) ?*c_void;
+
+pub usingnamespace switch (builtin.os.tag) {
+ .linux, .freebsd, .kfreebsd, .netbsd, .openbsd => struct {
+ pub extern "c" fn malloc_usable_size(?*const c_void) usize;
+ },
+ .macosx, .ios, .watchos, .tvos => struct {
+ pub extern "c" fn malloc_size(?*const c_void) usize;
+ },
+ else => struct {},
+};
+
pub extern "c" fn realloc(?*c_void, usize) ?*c_void;
pub extern "c" fn free(*c_void) void;
pub extern "c" fn posix_memalign(memptr: **c_void, alignment: usize, size: usize) c_int;
diff --git a/lib/std/c/ast.zig b/lib/std/c/ast.zig
index bb8c01f138..467050d57d 100644
--- a/lib/std/c/ast.zig
+++ b/lib/std/c/ast.zig
@@ -64,7 +64,7 @@ pub const Error = union(enum) {
NothingDeclared: SimpleError("declaration doesn't declare anything"),
QualifierIgnored: SingleTokenError("qualifier '{}' ignored"),
- pub fn render(self: *const Error, tree: *Tree, stream: var) !void {
+ pub fn render(self: *const Error, tree: *Tree, stream: anytype) !void {
switch (self.*) {
.InvalidToken => |*x| return x.render(tree, stream),
.ExpectedToken => |*x| return x.render(tree, stream),
@@ -114,7 +114,7 @@ pub const Error = union(enum) {
token: TokenIndex,
expected_id: @TagType(Token.Id),
- pub fn render(self: *const ExpectedToken, tree: *Tree, stream: var) !void {
+ pub fn render(self: *const ExpectedToken, tree: *Tree, stream: anytype) !void {
const found_token = tree.tokens.at(self.token);
if (found_token.id == .Invalid) {
return stream.print("expected '{}', found invalid bytes", .{self.expected_id.symbol()});
@@ -129,7 +129,7 @@ pub const Error = union(enum) {
token: TokenIndex,
type_spec: *Node.TypeSpec,
- pub fn render(self: *const ExpectedToken, tree: *Tree, stream: var) !void {
+ pub fn render(self: *const ExpectedToken, tree: *Tree, stream: anytype) !void {
try stream.write("invalid type specifier '");
try type_spec.spec.print(tree, stream);
const token_name = tree.tokens.at(self.token).id.symbol();
@@ -141,7 +141,7 @@ pub const Error = union(enum) {
kw: TokenIndex,
name: TokenIndex,
- pub fn render(self: *const ExpectedToken, tree: *Tree, stream: var) !void {
+ pub fn render(self: *const ExpectedToken, tree: *Tree, stream: anytype) !void {
return stream.print("must use '{}' tag to refer to type '{}'", .{ tree.slice(kw), tree.slice(name) });
}
};
@@ -150,7 +150,7 @@ pub const Error = union(enum) {
return struct {
token: TokenIndex,
- pub fn render(self: *const @This(), tree: *Tree, stream: var) !void {
+ pub fn render(self: *const @This(), tree: *Tree, stream: anytype) !void {
const actual_token = tree.tokens.at(self.token);
return stream.print(msg, .{actual_token.id.symbol()});
}
@@ -163,7 +163,7 @@ pub const Error = union(enum) {
token: TokenIndex,
- pub fn render(self: *const ThisError, tokens: *Tree.TokenList, stream: var) !void {
+ pub fn render(self: *const ThisError, tokens: *Tree.TokenList, stream: anytype) !void {
return stream.write(msg);
}
};
@@ -317,7 +317,7 @@ pub const Node = struct {
sym_type: *Type,
},
- pub fn print(self: *@This(), self: *const @This(), tree: *Tree, stream: var) !void {
+ pub fn print(self: *@This(), self: *const @This(), tree: *Tree, stream: anytype) !void {
switch (self.spec) {
.None => unreachable,
.Void => |index| try stream.write(tree.slice(index)),
diff --git a/lib/std/c/darwin.zig b/lib/std/c/darwin.zig
index f827eb6863..2426638569 100644
--- a/lib/std/c/darwin.zig
+++ b/lib/std/c/darwin.zig
@@ -16,6 +16,7 @@ pub extern "c" fn @"realpath$DARWIN_EXTSN"(noalias file_name: [*:0]const u8, noa
pub extern "c" fn __getdirentries64(fd: c_int, buf_ptr: [*]u8, buf_len: usize, basep: *i64) isize;
pub extern "c" fn @"fstat$INODE64"(fd: fd_t, buf: *Stat) c_int;
+pub extern "c" fn @"fstatat$INODE64"(dirfd: fd_t, path_name: [*:0]const u8, buf: *Stat, flags: u32) c_int;
pub extern "c" fn mach_absolute_time() u64;
pub extern "c" fn mach_timebase_info(tinfo: ?*mach_timebase_info_data) void;
diff --git a/lib/std/c/tokenizer.zig b/lib/std/c/tokenizer.zig
index 198c69e2a7..f3fc8b8107 100644
--- a/lib/std/c/tokenizer.zig
+++ b/lib/std/c/tokenizer.zig
@@ -278,62 +278,62 @@ pub const Token = struct {
// TODO extensions
pub const keywords = std.ComptimeStringMap(Id, .{
- .{"auto", .Keyword_auto},
- .{"break", .Keyword_break},
- .{"case", .Keyword_case},
- .{"char", .Keyword_char},
- .{"const", .Keyword_const},
- .{"continue", .Keyword_continue},
- .{"default", .Keyword_default},
- .{"do", .Keyword_do},
- .{"double", .Keyword_double},
- .{"else", .Keyword_else},
- .{"enum", .Keyword_enum},
- .{"extern", .Keyword_extern},
- .{"float", .Keyword_float},
- .{"for", .Keyword_for},
- .{"goto", .Keyword_goto},
- .{"if", .Keyword_if},
- .{"int", .Keyword_int},
- .{"long", .Keyword_long},
- .{"register", .Keyword_register},
- .{"return", .Keyword_return},
- .{"short", .Keyword_short},
- .{"signed", .Keyword_signed},
- .{"sizeof", .Keyword_sizeof},
- .{"static", .Keyword_static},
- .{"struct", .Keyword_struct},
- .{"switch", .Keyword_switch},
- .{"typedef", .Keyword_typedef},
- .{"union", .Keyword_union},
- .{"unsigned", .Keyword_unsigned},
- .{"void", .Keyword_void},
- .{"volatile", .Keyword_volatile},
- .{"while", .Keyword_while},
+ .{ "auto", .Keyword_auto },
+ .{ "break", .Keyword_break },
+ .{ "case", .Keyword_case },
+ .{ "char", .Keyword_char },
+ .{ "const", .Keyword_const },
+ .{ "continue", .Keyword_continue },
+ .{ "default", .Keyword_default },
+ .{ "do", .Keyword_do },
+ .{ "double", .Keyword_double },
+ .{ "else", .Keyword_else },
+ .{ "enum", .Keyword_enum },
+ .{ "extern", .Keyword_extern },
+ .{ "float", .Keyword_float },
+ .{ "for", .Keyword_for },
+ .{ "goto", .Keyword_goto },
+ .{ "if", .Keyword_if },
+ .{ "int", .Keyword_int },
+ .{ "long", .Keyword_long },
+ .{ "register", .Keyword_register },
+ .{ "return", .Keyword_return },
+ .{ "short", .Keyword_short },
+ .{ "signed", .Keyword_signed },
+ .{ "sizeof", .Keyword_sizeof },
+ .{ "static", .Keyword_static },
+ .{ "struct", .Keyword_struct },
+ .{ "switch", .Keyword_switch },
+ .{ "typedef", .Keyword_typedef },
+ .{ "union", .Keyword_union },
+ .{ "unsigned", .Keyword_unsigned },
+ .{ "void", .Keyword_void },
+ .{ "volatile", .Keyword_volatile },
+ .{ "while", .Keyword_while },
// ISO C99
- .{"_Bool", .Keyword_bool},
- .{"_Complex", .Keyword_complex},
- .{"_Imaginary", .Keyword_imaginary},
- .{"inline", .Keyword_inline},
- .{"restrict", .Keyword_restrict},
+ .{ "_Bool", .Keyword_bool },
+ .{ "_Complex", .Keyword_complex },
+ .{ "_Imaginary", .Keyword_imaginary },
+ .{ "inline", .Keyword_inline },
+ .{ "restrict", .Keyword_restrict },
// ISO C11
- .{"_Alignas", .Keyword_alignas},
- .{"_Alignof", .Keyword_alignof},
- .{"_Atomic", .Keyword_atomic},
- .{"_Generic", .Keyword_generic},
- .{"_Noreturn", .Keyword_noreturn},
- .{"_Static_assert", .Keyword_static_assert},
- .{"_Thread_local", .Keyword_thread_local},
+ .{ "_Alignas", .Keyword_alignas },
+ .{ "_Alignof", .Keyword_alignof },
+ .{ "_Atomic", .Keyword_atomic },
+ .{ "_Generic", .Keyword_generic },
+ .{ "_Noreturn", .Keyword_noreturn },
+ .{ "_Static_assert", .Keyword_static_assert },
+ .{ "_Thread_local", .Keyword_thread_local },
// Preprocessor directives
- .{"include", .Keyword_include},
- .{"define", .Keyword_define},
- .{"ifdef", .Keyword_ifdef},
- .{"ifndef", .Keyword_ifndef},
- .{"error", .Keyword_error},
- .{"pragma", .Keyword_pragma},
+ .{ "include", .Keyword_include },
+ .{ "define", .Keyword_define },
+ .{ "ifdef", .Keyword_ifdef },
+ .{ "ifndef", .Keyword_ifndef },
+ .{ "error", .Keyword_error },
+ .{ "pragma", .Keyword_pragma },
});
// TODO do this in the preprocessor
diff --git a/lib/std/cache_hash.zig b/lib/std/cache_hash.zig
index d160c4ebb2..acaa5edc8d 100644
--- a/lib/std/cache_hash.zig
+++ b/lib/std/cache_hash.zig
@@ -70,7 +70,7 @@ pub const CacheHash = struct {
/// Convert the input value into bytes and record it as a dependency of the
/// process being cached
- pub fn add(self: *CacheHash, val: var) void {
+ pub fn add(self: *CacheHash, val: anytype) void {
assert(self.manifest_file == null);
const valPtr = switch (@typeInfo(@TypeOf(val))) {
@@ -207,7 +207,7 @@ pub const CacheHash = struct {
}
if (cache_hash_file.path == null) {
- cache_hash_file.path = try mem.dupe(self.allocator, u8, file_path);
+ cache_hash_file.path = try self.allocator.dupe(u8, file_path);
}
const this_file = fs.cwd().openFile(cache_hash_file.path.?, .{ .read = true }) catch {
diff --git a/lib/std/comptime_string_map.zig b/lib/std/comptime_string_map.zig
index 3021f6bc1e..8cc5cac130 100644
--- a/lib/std/comptime_string_map.zig
+++ b/lib/std/comptime_string_map.zig
@@ -8,7 +8,7 @@ const mem = std.mem;
/// `kvs` expects a list literal containing list literals or an array/slice of structs
/// where `.@"0"` is the `[]const u8` key and `.@"1"` is the associated value of type `V`.
/// TODO: https://github.com/ziglang/zig/issues/4335
-pub fn ComptimeStringMap(comptime V: type, comptime kvs: var) type {
+pub fn ComptimeStringMap(comptime V: type, comptime kvs: anytype) type {
const precomputed = comptime blk: {
@setEvalBranchQuota(2000);
const KV = struct {
@@ -126,7 +126,7 @@ test "ComptimeStringMap slice of structs" {
testMap(map);
}
-fn testMap(comptime map: var) void {
+fn testMap(comptime map: anytype) void {
std.testing.expectEqual(TestEnum.A, map.get("have").?);
std.testing.expectEqual(TestEnum.B, map.get("nothing").?);
std.testing.expect(null == map.get("missing"));
@@ -165,7 +165,7 @@ test "ComptimeStringMap void value type, list literal of list literals" {
testSet(map);
}
-fn testSet(comptime map: var) void {
+fn testSet(comptime map: anytype) void {
std.testing.expectEqual({}, map.get("have").?);
std.testing.expectEqual({}, map.get("nothing").?);
std.testing.expect(null == map.get("missing"));
diff --git a/lib/std/crypto/benchmark.zig b/lib/std/crypto/benchmark.zig
index 8f961f80f2..f0f40bd231 100644
--- a/lib/std/crypto/benchmark.zig
+++ b/lib/std/crypto/benchmark.zig
@@ -29,7 +29,7 @@ const hashes = [_]Crypto{
Crypto{ .ty = crypto.Blake3, .name = "blake3" },
};
-pub fn benchmarkHash(comptime Hash: var, comptime bytes: comptime_int) !u64 {
+pub fn benchmarkHash(comptime Hash: anytype, comptime bytes: comptime_int) !u64 {
var h = Hash.init();
var block: [Hash.digest_length]u8 = undefined;
@@ -56,7 +56,7 @@ const macs = [_]Crypto{
Crypto{ .ty = crypto.HmacSha256, .name = "hmac-sha256" },
};
-pub fn benchmarkMac(comptime Mac: var, comptime bytes: comptime_int) !u64 {
+pub fn benchmarkMac(comptime Mac: anytype, comptime bytes: comptime_int) !u64 {
std.debug.assert(32 >= Mac.mac_length and 32 >= Mac.minimum_key_length);
var in: [1 * MiB]u8 = undefined;
@@ -81,7 +81,7 @@ pub fn benchmarkMac(comptime Mac: var, comptime bytes: comptime_int) !u64 {
const exchanges = [_]Crypto{Crypto{ .ty = crypto.X25519, .name = "x25519" }};
-pub fn benchmarkKeyExchange(comptime DhKeyExchange: var, comptime exchange_count: comptime_int) !u64 {
+pub fn benchmarkKeyExchange(comptime DhKeyExchange: anytype, comptime exchange_count: comptime_int) !u64 {
std.debug.assert(DhKeyExchange.minimum_key_length >= DhKeyExchange.secret_length);
var in: [DhKeyExchange.minimum_key_length]u8 = undefined;
@@ -123,15 +123,6 @@ fn mode(comptime x: comptime_int) comptime_int {
return if (builtin.mode == .Debug) x / 64 else x;
}
-// TODO(#1358): Replace with builtin formatted padding when available.
-fn printPad(stdout: var, s: []const u8) !void {
- var i: usize = 0;
- while (i < 12 - s.len) : (i += 1) {
- try stdout.print(" ", .{});
- }
- try stdout.print("{}", .{s});
-}
-
pub fn main() !void {
const stdout = std.io.getStdOut().outStream();
@@ -175,24 +166,21 @@ pub fn main() !void {
inline for (hashes) |H| {
if (filter == null or std.mem.indexOf(u8, H.name, filter.?) != null) {
const throughput = try benchmarkHash(H.ty, mode(32 * MiB));
- try printPad(stdout, H.name);
- try stdout.print(": {} MiB/s\n", .{throughput / (1 * MiB)});
+ try stdout.print("{:>11}: {:5} MiB/s\n", .{ H.name, throughput / (1 * MiB) });
}
}
inline for (macs) |M| {
if (filter == null or std.mem.indexOf(u8, M.name, filter.?) != null) {
const throughput = try benchmarkMac(M.ty, mode(128 * MiB));
- try printPad(stdout, M.name);
- try stdout.print(": {} MiB/s\n", .{throughput / (1 * MiB)});
+ try stdout.print("{:>11}: {:5} MiB/s\n", .{ M.name, throughput / (1 * MiB) });
}
}
inline for (exchanges) |E| {
if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) {
const throughput = try benchmarkKeyExchange(E.ty, mode(1000));
- try printPad(stdout, E.name);
- try stdout.print(": {} exchanges/s\n", .{throughput});
+ try stdout.print("{:>11}: {:5} exchanges/s\n", .{ E.name, throughput });
}
}
}
diff --git a/lib/std/crypto/test.zig b/lib/std/crypto/test.zig
index 1ff326cf39..61260c7e39 100644
--- a/lib/std/crypto/test.zig
+++ b/lib/std/crypto/test.zig
@@ -4,7 +4,7 @@ const mem = std.mem;
const fmt = std.fmt;
// Hash using the specified hasher `H` asserting `expected == H(input)`.
-pub fn assertEqualHash(comptime Hasher: var, comptime expected: []const u8, input: []const u8) void {
+pub fn assertEqualHash(comptime Hasher: anytype, comptime expected: []const u8, input: []const u8) void {
var h: [expected.len / 2]u8 = undefined;
Hasher.hash(input, h[0..]);
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index f339aa639b..3346598ab7 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -50,33 +50,21 @@ pub const LineInfo = struct {
}
};
-/// Tries to write to stderr, unbuffered, and ignores any error returned.
-/// Does not append a newline.
-var stderr_file: File = undefined;
-var stderr_file_writer: File.Writer = undefined;
-
-var stderr_stream: ?*File.OutStream = null;
var stderr_mutex = std.Mutex.init();
-pub fn warn(comptime fmt: []const u8, args: var) void {
+/// Deprecated. Use `std.log` functions for logging or `std.debug.print` for
+/// "printf debugging".
+pub const warn = print;
+
+/// Print to stderr, unbuffered, and silently returning on failure. Intended
+/// for use in "printf debugging." Use `std.log` functions for proper logging.
+pub fn print(comptime fmt: []const u8, args: anytype) void {
const held = stderr_mutex.acquire();
defer held.release();
- const stderr = getStderrStream();
+ const stderr = io.getStdErr().writer();
nosuspend stderr.print(fmt, args) catch return;
}
-pub fn getStderrStream() *File.OutStream {
- if (stderr_stream) |st| {
- return st;
- } else {
- stderr_file = io.getStdErr();
- stderr_file_writer = stderr_file.outStream();
- const st = &stderr_file_writer;
- stderr_stream = st;
- return st;
- }
-}
-
pub fn getStderrMutex() *std.Mutex {
return &stderr_mutex;
}
@@ -99,6 +87,7 @@ pub fn detectTTYConfig() TTY.Config {
if (process.getEnvVarOwned(allocator, "ZIG_DEBUG_COLOR")) |_| {
return .escape_codes;
} else |_| {
+ const stderr_file = io.getStdErr();
if (stderr_file.supportsAnsiEscapeCodes()) {
return .escape_codes;
} else if (builtin.os.tag == .windows and stderr_file.isTty()) {
@@ -113,7 +102,7 @@ pub fn detectTTYConfig() TTY.Config {
/// TODO multithreaded awareness
pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
nosuspend {
- const stderr = getStderrStream();
+ const stderr = io.getStdErr().writer();
if (builtin.strip_debug_info) {
stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return;
return;
@@ -134,7 +123,7 @@ pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
/// TODO multithreaded awareness
pub fn dumpStackTraceFromBase(bp: usize, ip: usize) void {
nosuspend {
- const stderr = getStderrStream();
+ const stderr = io.getStdErr().writer();
if (builtin.strip_debug_info) {
stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return;
return;
@@ -204,7 +193,7 @@ pub fn captureStackTrace(first_address: ?usize, stack_trace: *builtin.StackTrace
/// TODO multithreaded awareness
pub fn dumpStackTrace(stack_trace: builtin.StackTrace) void {
nosuspend {
- const stderr = getStderrStream();
+ const stderr = io.getStdErr().writer();
if (builtin.strip_debug_info) {
stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return;
return;
@@ -234,7 +223,7 @@ pub fn assert(ok: bool) void {
if (!ok) unreachable; // assertion failure
}
-pub fn panic(comptime format: []const u8, args: var) noreturn {
+pub fn panic(comptime format: []const u8, args: anytype) noreturn {
@setCold(true);
// TODO: remove conditional once wasi / LLVM defines __builtin_return_address
const first_trace_addr = if (builtin.os.tag == .wasi) null else @returnAddress();
@@ -252,7 +241,7 @@ var panic_mutex = std.Mutex.init();
/// This is used to catch and handle panics triggered by the panic handler.
threadlocal var panic_stage: usize = 0;
-pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: var) noreturn {
+pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: anytype) noreturn {
@setCold(true);
if (enable_segfault_handler) {
@@ -272,7 +261,7 @@ pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, c
const held = panic_mutex.acquire();
defer held.release();
- const stderr = getStderrStream();
+ const stderr = io.getStdErr().writer();
stderr.print(format ++ "\n", args) catch os.abort();
if (trace) |t| {
dumpStackTrace(t.*);
@@ -297,7 +286,7 @@ pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, c
// A panic happened while trying to print a previous panic message,
// we're still holding the mutex but that's fine as we're going to
// call abort()
- const stderr = getStderrStream();
+ const stderr = io.getStdErr().writer();
stderr.print("Panicked during a panic. Aborting.\n", .{}) catch os.abort();
},
else => {
@@ -317,7 +306,7 @@ const RESET = "\x1b[0m";
pub fn writeStackTrace(
stack_trace: builtin.StackTrace,
- out_stream: var,
+ out_stream: anytype,
allocator: *mem.Allocator,
debug_info: *DebugInfo,
tty_config: TTY.Config,
@@ -395,7 +384,7 @@ pub const StackIterator = struct {
};
pub fn writeCurrentStackTrace(
- out_stream: var,
+ out_stream: anytype,
debug_info: *DebugInfo,
tty_config: TTY.Config,
start_addr: ?usize,
@@ -410,7 +399,7 @@ pub fn writeCurrentStackTrace(
}
pub fn writeCurrentStackTraceWindows(
- out_stream: var,
+ out_stream: anytype,
debug_info: *DebugInfo,
tty_config: TTY.Config,
start_addr: ?usize,
@@ -446,7 +435,7 @@ pub const TTY = struct {
// TODO give this a payload of file handle
windows_api,
- fn setColor(conf: Config, out_stream: var, color: Color) void {
+ fn setColor(conf: Config, out_stream: anytype, color: Color) void {
nosuspend switch (conf) {
.no_color => return,
.escape_codes => switch (color) {
@@ -458,6 +447,7 @@ pub const TTY = struct {
.Reset => out_stream.writeAll(RESET) catch return,
},
.windows_api => if (builtin.os.tag == .windows) {
+ const stderr_file = io.getStdErr();
const S = struct {
var attrs: windows.WORD = undefined;
var init_attrs = false;
@@ -565,7 +555,7 @@ fn machoSearchSymbols(symbols: []const MachoSymbol, address: usize) ?*const Mach
}
/// TODO resources https://github.com/ziglang/zig/issues/4353
-pub fn printSourceAtAddress(debug_info: *DebugInfo, out_stream: var, address: usize, tty_config: TTY.Config) !void {
+pub fn printSourceAtAddress(debug_info: *DebugInfo, out_stream: anytype, address: usize, tty_config: TTY.Config) !void {
const module = debug_info.getModuleForAddress(address) catch |err| switch (err) {
error.MissingDebugInfo, error.InvalidDebugInfo => {
return printLineInfo(
@@ -596,13 +586,13 @@ pub fn printSourceAtAddress(debug_info: *DebugInfo, out_stream: var, address: us
}
fn printLineInfo(
- out_stream: var,
+ out_stream: anytype,
line_info: ?LineInfo,
address: usize,
symbol_name: []const u8,
compile_unit_name: []const u8,
tty_config: TTY.Config,
- comptime printLineFromFile: var,
+ comptime printLineFromFile: anytype,
) !void {
nosuspend {
tty_config.setColor(out_stream, .White);
@@ -830,7 +820,7 @@ fn readCoffDebugInfo(allocator: *mem.Allocator, coff_file: File) !ModuleDebugInf
}
}
-fn readSparseBitVector(stream: var, allocator: *mem.Allocator) ![]usize {
+fn readSparseBitVector(stream: anytype, allocator: *mem.Allocator) ![]usize {
const num_words = try stream.readIntLittle(u32);
var word_i: usize = 0;
var list = ArrayList(usize).init(allocator);
@@ -1014,7 +1004,7 @@ fn readMachODebugInfo(allocator: *mem.Allocator, macho_file: File) !ModuleDebugI
};
}
-fn printLineFromFileAnyOs(out_stream: var, line_info: LineInfo) !void {
+fn printLineFromFileAnyOs(out_stream: anytype, line_info: LineInfo) !void {
// Need this to always block even in async I/O mode, because this could potentially
// be called from e.g. the event loop code crashing.
var f = try fs.cwd().openFile(line_info.file_name, .{ .intended_io_mode = .blocking });
@@ -1142,7 +1132,7 @@ pub const DebugInfo = struct {
const seg_end = seg_start + segment_cmd.vmsize;
if (rebased_address >= seg_start and rebased_address < seg_end) {
- if (self.address_map.getValue(base_address)) |obj_di| {
+ if (self.address_map.get(base_address)) |obj_di| {
return obj_di;
}
@@ -1214,7 +1204,7 @@ pub const DebugInfo = struct {
const seg_end = seg_start + info.SizeOfImage;
if (address >= seg_start and address < seg_end) {
- if (self.address_map.getValue(seg_start)) |obj_di| {
+ if (self.address_map.get(seg_start)) |obj_di| {
return obj_di;
}
@@ -1288,7 +1278,7 @@ pub const DebugInfo = struct {
else => return error.MissingDebugInfo,
}
- if (self.address_map.getValue(ctx.base_address)) |obj_di| {
+ if (self.address_map.get(ctx.base_address)) |obj_di| {
return obj_di;
}
@@ -1451,7 +1441,7 @@ pub const ModuleDebugInfo = switch (builtin.os.tag) {
const o_file_path = mem.spanZ(self.strings[symbol.ofile.?.n_strx..]);
// Check if its debug infos are already in the cache
- var o_file_di = self.ofiles.getValue(o_file_path) orelse
+ var o_file_di = self.ofiles.get(o_file_path) orelse
(self.loadOFile(o_file_path) catch |err| switch (err) {
error.FileNotFound,
error.MissingDebugInfo,
diff --git a/lib/std/debug/leb128.zig b/lib/std/debug/leb128.zig
index ac278c4b1f..8149554246 100644
--- a/lib/std/debug/leb128.zig
+++ b/lib/std/debug/leb128.zig
@@ -1,171 +1,211 @@
const std = @import("std");
const testing = std.testing;
-pub fn readULEB128(comptime T: type, in_stream: var) !T {
- const ShiftT = std.meta.Int(false, std.math.log2(T.bit_count));
+/// Read a single unsigned LEB128 value from the given reader as type T,
+/// or error.Overflow if the value cannot fit.
+pub fn readULEB128(comptime T: type, reader: anytype) !T {
+ const U = if (T.bit_count < 8) u8 else T;
+ const ShiftT = std.math.Log2Int(U);
- var result: T = 0;
- var shift: usize = 0;
+ const max_group = (U.bit_count + 6) / 7;
- while (true) {
- const byte = try in_stream.readByte();
-
- if (shift > T.bit_count)
- return error.Overflow;
-
- var operand: T = undefined;
- if (@shlWithOverflow(T, byte & 0x7f, @intCast(ShiftT, shift), &operand))
- return error.Overflow;
+ var value = @as(U, 0);
+ var group = @as(ShiftT, 0);
- result |= operand;
+ while (group < max_group) : (group += 1) {
+ const byte = try reader.readByte();
+ var temp = @as(U, byte & 0x7f);
- if ((byte & 0x80) == 0)
- return result;
+ if (@shlWithOverflow(U, temp, group * 7, &temp)) return error.Overflow;
- shift += 7;
+ value |= temp;
+ if (byte & 0x80 == 0) break;
+ } else {
+ return error.Overflow;
}
-}
-
-pub fn readULEB128Mem(comptime T: type, ptr: *[*]const u8) !T {
- const ShiftT = std.meta.Int(false, std.math.log2(T.bit_count));
- var result: T = 0;
- var shift: usize = 0;
- var i: usize = 0;
-
- while (true) : (i += 1) {
- const byte = ptr.*[i];
-
- if (shift > T.bit_count)
- return error.Overflow;
+ // only applies in the case that we extended to u8
+ if (U != T) {
+ if (value > std.math.maxInt(T)) return error.Overflow;
+ }
- var operand: T = undefined;
- if (@shlWithOverflow(T, byte & 0x7f, @intCast(ShiftT, shift), &operand))
- return error.Overflow;
+ return @truncate(T, value);
+}
- result |= operand;
+/// Write a single unsigned integer as unsigned LEB128 to the given writer.
+pub fn writeULEB128(writer: anytype, uint_value: anytype) !void {
+ const T = @TypeOf(uint_value);
+ const U = if (T.bit_count < 8) u8 else T;
+ var value = @intCast(U, uint_value);
- if ((byte & 0x80) == 0) {
- ptr.* += i + 1;
- return result;
+ while (true) {
+ const byte = @truncate(u8, value & 0x7f);
+ value >>= 7;
+ if (value == 0) {
+ try writer.writeByte(byte);
+ break;
+ } else {
+ try writer.writeByte(byte | 0x80);
}
-
- shift += 7;
}
}
-pub fn readILEB128(comptime T: type, in_stream: var) !T {
- const UT = std.meta.Int(false, T.bit_count);
- const ShiftT = std.meta.Int(false, std.math.log2(T.bit_count));
+/// Read a single unsinged integer from the given memory as type T.
+/// The provided slice reference will be updated to point to the byte after the last byte read.
+pub fn readULEB128Mem(comptime T: type, ptr: *[]const u8) !T {
+ var buf = std.io.fixedBufferStream(ptr.*);
+ const value = try readULEB128(T, buf.reader());
+ ptr.*.ptr += buf.pos;
+ return value;
+}
- var result: UT = 0;
- var shift: usize = 0;
+/// Write a single unsigned LEB128 integer to the given memory as unsigned LEB128,
+/// returning the number of bytes written.
+pub fn writeULEB128Mem(ptr: []u8, uint_value: anytype) !usize {
+ const T = @TypeOf(uint_value);
+ const max_group = (T.bit_count + 6) / 7;
+ var buf = std.io.fixedBufferStream(ptr);
+ try writeULEB128(buf.writer(), uint_value);
+ return buf.pos;
+}
- while (true) {
- const byte: u8 = try in_stream.readByte();
+/// Read a single signed LEB128 value from the given reader as type T,
+/// or error.Overflow if the value cannot fit.
+pub fn readILEB128(comptime T: type, reader: anytype) !T {
+ const S = if (T.bit_count < 8) i8 else T;
+ const U = std.meta.Int(false, S.bit_count);
+ const ShiftU = std.math.Log2Int(U);
- if (shift > T.bit_count)
- return error.Overflow;
+ const max_group = (U.bit_count + 6) / 7;
- var operand: UT = undefined;
- if (@shlWithOverflow(UT, @as(UT, byte & 0x7f), @intCast(ShiftT, shift), &operand)) {
- if (byte != 0x7f)
- return error.Overflow;
- }
+ var value = @as(U, 0);
+ var group = @as(ShiftU, 0);
- result |= operand;
+ while (group < max_group) : (group += 1) {
+ const byte = try reader.readByte();
+ var temp = @as(U, byte & 0x7f);
- shift += 7;
+ const shift = group * 7;
+ if (@shlWithOverflow(U, temp, shift, &temp)) {
+ // Overflow is ok so long as the sign bit is set and this is the last byte
+ if (byte & 0x80 != 0) return error.Overflow;
+ if (@bitCast(S, temp) >= 0) return error.Overflow;
- if ((byte & 0x80) == 0) {
- if (shift < T.bit_count and (byte & 0x40) != 0) {
- result |= @bitCast(UT, @intCast(T, -1)) << @intCast(ShiftT, shift);
+ // and all the overflowed bits are 1
+ const remaining_shift = @intCast(u3, U.bit_count - @as(u16, shift));
+ const remaining_bits = @bitCast(i8, byte | 0x80) >> remaining_shift;
+ if (remaining_bits != -1) return error.Overflow;
+ }
+
+ value |= temp;
+ if (byte & 0x80 == 0) {
+ const needs_sign_ext = group + 1 < max_group;
+ if (byte & 0x40 != 0 and needs_sign_ext) {
+ const ones = @as(S, -1);
+ value |= @bitCast(U, ones) << (shift + 7);
}
- return @bitCast(T, result);
+ break;
}
+ } else {
+ return error.Overflow;
}
-}
-pub fn readILEB128Mem(comptime T: type, ptr: *[*]const u8) !T {
- const UT = std.meta.Int(false, T.bit_count);
- const ShiftT = std.meta.Int(false, std.math.log2(T.bit_count));
+ const result = @bitCast(S, value);
+ // Only applies if we extended to i8
+ if (S != T) {
+ if (result > std.math.maxInt(T) or result < std.math.minInt(T)) return error.Overflow;
+ }
- var result: UT = 0;
- var shift: usize = 0;
- var i: usize = 0;
+ return @truncate(T, result);
+}
- while (true) : (i += 1) {
- const byte = ptr.*[i];
+/// Write a single signed integer as signed LEB128 to the given writer.
+pub fn writeILEB128(writer: anytype, int_value: anytype) !void {
+ const T = @TypeOf(int_value);
+ const S = if (T.bit_count < 8) i8 else T;
+ const U = std.meta.Int(false, S.bit_count);
- if (shift > T.bit_count)
- return error.Overflow;
+ var value = @intCast(S, int_value);
- var operand: UT = undefined;
- if (@shlWithOverflow(UT, @as(UT, byte & 0x7f), @intCast(ShiftT, shift), &operand)) {
- if (byte != 0x7f)
- return error.Overflow;
+ while (true) {
+ const uvalue = @bitCast(U, value);
+ const byte = @truncate(u8, uvalue);
+ value >>= 6;
+ if (value == -1 or value == 0) {
+ try writer.writeByte(byte & 0x7F);
+ break;
+ } else {
+ value >>= 1;
+ try writer.writeByte(byte | 0x80);
}
+ }
+}
- result |= operand;
-
- shift += 7;
+/// Read a single singed LEB128 integer from the given memory as type T.
+/// The provided slice reference will be updated to point to the byte after the last byte read.
+pub fn readILEB128Mem(comptime T: type, ptr: *[]const u8) !T {
+ var buf = std.io.fixedBufferStream(ptr.*);
+ const value = try readILEB128(T, buf.reader());
+ ptr.*.ptr += buf.pos;
+ return value;
+}
- if ((byte & 0x80) == 0) {
- if (shift < T.bit_count and (byte & 0x40) != 0) {
- result |= @bitCast(UT, @intCast(T, -1)) << @intCast(ShiftT, shift);
- }
- ptr.* += i + 1;
- return @bitCast(T, result);
- }
- }
+/// Write a single signed LEB128 integer to the given memory as unsigned LEB128,
+/// returning the number of bytes written.
+pub fn writeILEB128Mem(ptr: []u8, int_value: anytype) !usize {
+ const T = @TypeOf(int_value);
+ var buf = std.io.fixedBufferStream(ptr);
+ try writeILEB128(buf.writer(), int_value);
+ return buf.pos;
}
+// tests
fn test_read_stream_ileb128(comptime T: type, encoded: []const u8) !T {
- var in_stream = std.io.fixedBufferStream(encoded);
- return try readILEB128(T, in_stream.inStream());
+ var reader = std.io.fixedBufferStream(encoded);
+ return try readILEB128(T, reader.reader());
}
fn test_read_stream_uleb128(comptime T: type, encoded: []const u8) !T {
- var in_stream = std.io.fixedBufferStream(encoded);
- return try readULEB128(T, in_stream.inStream());
+ var reader = std.io.fixedBufferStream(encoded);
+ return try readULEB128(T, reader.reader());
}
fn test_read_ileb128(comptime T: type, encoded: []const u8) !T {
- var in_stream = std.io.fixedBufferStream(encoded);
- const v1 = readILEB128(T, in_stream.inStream());
- var in_ptr = encoded.ptr;
- const v2 = readILEB128Mem(T, &in_ptr);
+ var reader = std.io.fixedBufferStream(encoded);
+ const v1 = try readILEB128(T, reader.reader());
+ var in_ptr = encoded;
+ const v2 = try readILEB128Mem(T, &in_ptr);
testing.expectEqual(v1, v2);
return v1;
}
fn test_read_uleb128(comptime T: type, encoded: []const u8) !T {
- var in_stream = std.io.fixedBufferStream(encoded);
- const v1 = readULEB128(T, in_stream.inStream());
- var in_ptr = encoded.ptr;
- const v2 = readULEB128Mem(T, &in_ptr);
+ var reader = std.io.fixedBufferStream(encoded);
+ const v1 = try readULEB128(T, reader.reader());
+ var in_ptr = encoded;
+ const v2 = try readULEB128Mem(T, &in_ptr);
testing.expectEqual(v1, v2);
return v1;
}
-fn test_read_ileb128_seq(comptime T: type, comptime N: usize, encoded: []const u8) void {
- var in_stream = std.io.fixedBufferStream(encoded);
- var in_ptr = encoded.ptr;
+fn test_read_ileb128_seq(comptime T: type, comptime N: usize, encoded: []const u8) !void {
+ var reader = std.io.fixedBufferStream(encoded);
+ var in_ptr = encoded;
var i: usize = 0;
while (i < N) : (i += 1) {
- const v1 = readILEB128(T, in_stream.inStream());
- const v2 = readILEB128Mem(T, &in_ptr);
+ const v1 = try readILEB128(T, reader.reader());
+ const v2 = try readILEB128Mem(T, &in_ptr);
testing.expectEqual(v1, v2);
}
}
-fn test_read_uleb128_seq(comptime T: type, comptime N: usize, encoded: []const u8) void {
- var in_stream = std.io.fixedBufferStream(encoded);
- var in_ptr = encoded.ptr;
+fn test_read_uleb128_seq(comptime T: type, comptime N: usize, encoded: []const u8) !void {
+ var reader = std.io.fixedBufferStream(encoded);
+ var in_ptr = encoded;
var i: usize = 0;
while (i < N) : (i += 1) {
- const v1 = readULEB128(T, in_stream.inStream());
- const v2 = readULEB128Mem(T, &in_ptr);
+ const v1 = try readULEB128(T, reader.reader());
+ const v2 = try readULEB128Mem(T, &in_ptr);
testing.expectEqual(v1, v2);
}
}
@@ -212,7 +252,7 @@ test "deserialize signed LEB128" {
testing.expect((try test_read_ileb128(i64, "\x80\x81\x80\x00")) == 0x80);
// Decode sequence of SLEB128 values
- test_read_ileb128_seq(i64, 4, "\x81\x01\x3f\x80\x7f\x80\x80\x80\x00");
+ try test_read_ileb128_seq(i64, 4, "\x81\x01\x3f\x80\x7f\x80\x80\x80\x00");
}
test "deserialize unsigned LEB128" {
@@ -252,5 +292,99 @@ test "deserialize unsigned LEB128" {
testing.expect((try test_read_uleb128(u64, "\x80\x81\x80\x00")) == 0x80);
// Decode sequence of ULEB128 values
- test_read_uleb128_seq(u64, 4, "\x81\x01\x3f\x80\x7f\x80\x80\x80\x00");
+ try test_read_uleb128_seq(u64, 4, "\x81\x01\x3f\x80\x7f\x80\x80\x80\x00");
+}
+
+fn test_write_leb128(value: anytype) !void {
+ const T = @TypeOf(value);
+
+ const writeStream = if (T.is_signed) writeILEB128 else writeULEB128;
+ const writeMem = if (T.is_signed) writeILEB128Mem else writeULEB128Mem;
+ const readStream = if (T.is_signed) readILEB128 else readULEB128;
+ const readMem = if (T.is_signed) readILEB128Mem else readULEB128Mem;
+
+ // decode to a larger bit size too, to ensure sign extension
+ // is working as expected
+ const larger_type_bits = ((T.bit_count + 8) / 8) * 8;
+ const B = std.meta.Int(T.is_signed, larger_type_bits);
+
+ const bytes_needed = bn: {
+ const S = std.meta.Int(T.is_signed, @sizeOf(T) * 8);
+ if (T.bit_count <= 7) break :bn @as(u16, 1);
+
+ const unused_bits = if (value < 0) @clz(T, ~value) else @clz(T, value);
+ const used_bits: u16 = (T.bit_count - unused_bits) + @boolToInt(T.is_signed);
+ if (used_bits <= 7) break :bn @as(u16, 1);
+ break :bn ((used_bits + 6) / 7);
+ };
+
+ const max_groups = if (T.bit_count == 0) 1 else (T.bit_count + 6) / 7;
+
+ var buf: [max_groups]u8 = undefined;
+ var fbs = std.io.fixedBufferStream(&buf);
+
+ // stream write
+ try writeStream(fbs.writer(), value);
+ const w1_pos = fbs.pos;
+ testing.expect(w1_pos == bytes_needed);
+
+ // stream read
+ fbs.pos = 0;
+ const sr = try readStream(T, fbs.reader());
+ testing.expect(fbs.pos == w1_pos);
+ testing.expect(sr == value);
+
+ // bigger type stream read
+ fbs.pos = 0;
+ const bsr = try readStream(B, fbs.reader());
+ testing.expect(fbs.pos == w1_pos);
+ testing.expect(bsr == value);
+
+ // mem write
+ const w2_pos = try writeMem(&buf, value);
+ testing.expect(w2_pos == w1_pos);
+
+ // mem read
+ var buf_ref: []u8 = buf[0..];
+ const mr = try readMem(T, &buf_ref);
+ testing.expect(@ptrToInt(buf_ref.ptr) - @ptrToInt(&buf) == w2_pos);
+ testing.expect(mr == value);
+
+ // bigger type mem read
+ buf_ref = buf[0..];
+ const bmr = try readMem(T, &buf_ref);
+ testing.expect(@ptrToInt(buf_ref.ptr) - @ptrToInt(&buf) == w2_pos);
+ testing.expect(bmr == value);
+}
+
+test "serialize unsigned LEB128" {
+ const max_bits = 18;
+
+ comptime var t = 0;
+ inline while (t <= max_bits) : (t += 1) {
+ const T = std.meta.Int(false, t);
+ const min = std.math.minInt(T);
+ const max = std.math.maxInt(T);
+ var i = @as(std.meta.Int(false, T.bit_count + 1), min);
+
+ while (i <= max) : (i += 1) try test_write_leb128(@intCast(T, i));
+ }
+}
+
+test "serialize signed LEB128" {
+ // explicitly test i0 because starting `t` at 0
+ // will break the while loop
+ try test_write_leb128(@as(i0, 0));
+
+ const max_bits = 18;
+
+ comptime var t = 1;
+ inline while (t <= max_bits) : (t += 1) {
+ const T = std.meta.Int(true, t);
+ const min = std.math.minInt(T);
+ const max = std.math.maxInt(T);
+ var i = @as(std.meta.Int(true, T.bit_count + 1), min);
+
+ while (i <= max) : (i += 1) try test_write_leb128(@intCast(T, i));
+ }
}
diff --git a/lib/std/dwarf.zig b/lib/std/dwarf.zig
index ebb4c096f8..1400442247 100644
--- a/lib/std/dwarf.zig
+++ b/lib/std/dwarf.zig
@@ -236,7 +236,7 @@ const LineNumberProgram = struct {
}
};
-fn readUnitLength(in_stream: var, endian: builtin.Endian, is_64: *bool) !u64 {
+fn readUnitLength(in_stream: anytype, endian: builtin.Endian, is_64: *bool) !u64 {
const first_32_bits = try in_stream.readInt(u32, endian);
is_64.* = (first_32_bits == 0xffffffff);
if (is_64.*) {
@@ -249,7 +249,7 @@ fn readUnitLength(in_stream: var, endian: builtin.Endian, is_64: *bool) !u64 {
}
// TODO the nosuspends here are workarounds
-fn readAllocBytes(allocator: *mem.Allocator, in_stream: var, size: usize) ![]u8 {
+fn readAllocBytes(allocator: *mem.Allocator, in_stream: anytype, size: usize) ![]u8 {
const buf = try allocator.alloc(u8, size);
errdefer allocator.free(buf);
if ((try nosuspend in_stream.read(buf)) < size) return error.EndOfFile;
@@ -257,25 +257,25 @@ fn readAllocBytes(allocator: *mem.Allocator, in_stream: var, size: usize) ![]u8
}
// TODO the nosuspends here are workarounds
-fn readAddress(in_stream: var, endian: builtin.Endian, is_64: bool) !u64 {
+fn readAddress(in_stream: anytype, endian: builtin.Endian, is_64: bool) !u64 {
return nosuspend if (is_64)
try in_stream.readInt(u64, endian)
else
@as(u64, try in_stream.readInt(u32, endian));
}
-fn parseFormValueBlockLen(allocator: *mem.Allocator, in_stream: var, size: usize) !FormValue {
+fn parseFormValueBlockLen(allocator: *mem.Allocator, in_stream: anytype, size: usize) !FormValue {
const buf = try readAllocBytes(allocator, in_stream, size);
return FormValue{ .Block = buf };
}
// TODO the nosuspends here are workarounds
-fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: var, endian: builtin.Endian, size: usize) !FormValue {
+fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: anytype, endian: builtin.Endian, size: usize) !FormValue {
const block_len = try nosuspend in_stream.readVarInt(usize, endian, size);
return parseFormValueBlockLen(allocator, in_stream, block_len);
}
-fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: var, signed: bool, endian: builtin.Endian, comptime size: i32) !FormValue {
+fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed: bool, endian: builtin.Endian, comptime size: i32) !FormValue {
// TODO: Please forgive me, I've worked around zig not properly spilling some intermediate values here.
// `nosuspend` should be removed from all the function calls once it is fixed.
return FormValue{
@@ -302,7 +302,7 @@ fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: var, signed: boo
}
// TODO the nosuspends here are workarounds
-fn parseFormValueRef(allocator: *mem.Allocator, in_stream: var, endian: builtin.Endian, size: i32) !FormValue {
+fn parseFormValueRef(allocator: *mem.Allocator, in_stream: anytype, endian: builtin.Endian, size: i32) !FormValue {
return FormValue{
.Ref = switch (size) {
1 => try nosuspend in_stream.readInt(u8, endian),
@@ -316,7 +316,7 @@ fn parseFormValueRef(allocator: *mem.Allocator, in_stream: var, endian: builtin.
}
// TODO the nosuspends here are workarounds
-fn parseFormValue(allocator: *mem.Allocator, in_stream: var, form_id: u64, endian: builtin.Endian, is_64: bool) anyerror!FormValue {
+fn parseFormValue(allocator: *mem.Allocator, in_stream: anytype, form_id: u64, endian: builtin.Endian, is_64: bool) anyerror!FormValue {
return switch (form_id) {
FORM_addr => FormValue{ .Address = try readAddress(in_stream, endian, @sizeOf(usize) == 8) },
FORM_block1 => parseFormValueBlock(allocator, in_stream, endian, 1),
@@ -359,7 +359,7 @@ fn parseFormValue(allocator: *mem.Allocator, in_stream: var, form_id: u64, endia
const F = @TypeOf(async parseFormValue(allocator, in_stream, child_form_id, endian, is_64));
var frame = try allocator.create(F);
defer allocator.destroy(frame);
- return await @asyncCall(frame, {}, parseFormValue, allocator, in_stream, child_form_id, endian, is_64);
+ return await @asyncCall(frame, {}, parseFormValue, .{ allocator, in_stream, child_form_id, endian, is_64 });
},
else => error.InvalidDebugInfo,
};
@@ -670,7 +670,7 @@ pub const DwarfInfo = struct {
}
}
- fn parseDie(di: *DwarfInfo, in_stream: var, abbrev_table: *const AbbrevTable, is_64: bool) !?Die {
+ fn parseDie(di: *DwarfInfo, in_stream: anytype, abbrev_table: *const AbbrevTable, is_64: bool) !?Die {
const abbrev_code = try leb.readULEB128(u64, in_stream);
if (abbrev_code == 0) return null;
const table_entry = getAbbrevTableEntry(abbrev_table, abbrev_code) orelse return error.InvalidDebugInfo;
diff --git a/lib/std/elf.zig b/lib/std/elf.zig
index dd22a42304..98508df190 100644
--- a/lib/std/elf.zig
+++ b/lib/std/elf.zig
@@ -517,7 +517,7 @@ pub fn readAllHeaders(allocator: *mem.Allocator, file: File) !AllHeaders {
return hdrs;
}
-pub fn int(is_64: bool, need_bswap: bool, int_32: var, int_64: var) @TypeOf(int_64) {
+pub fn int(is_64: bool, need_bswap: bool, int_32: anytype, int_64: anytype) @TypeOf(int_64) {
if (is_64) {
if (need_bswap) {
return @byteSwap(@TypeOf(int_64), int_64);
@@ -529,7 +529,7 @@ pub fn int(is_64: bool, need_bswap: bool, int_32: var, int_64: var) @TypeOf(int_
}
}
-pub fn int32(need_bswap: bool, int_32: var, comptime Int64: var) Int64 {
+pub fn int32(need_bswap: bool, int_32: anytype, comptime Int64: anytype) Int64 {
if (need_bswap) {
return @byteSwap(@TypeOf(int_32), int_32);
} else {
@@ -551,6 +551,7 @@ fn preadNoEof(file: std.fs.File, buf: []u8, offset: u64) !void {
error.InputOutput => return error.FileSystem,
error.Unexpected => return error.Unexpected,
error.WouldBlock => return error.Unexpected,
+ error.AccessDenied => return error.Unexpected,
};
if (len == 0) return error.UnexpectedEndOfFile;
i += len;
diff --git a/lib/std/event/group.zig b/lib/std/event/group.zig
index 61130b32cb..0dc6550218 100644
--- a/lib/std/event/group.zig
+++ b/lib/std/event/group.zig
@@ -65,7 +65,7 @@ pub fn Group(comptime ReturnType: type) type {
/// allocated by the group and freed by `wait`.
/// `func` must be async and have return type `ReturnType`.
/// Thread-safe.
- pub fn call(self: *Self, comptime func: var, args: var) error{OutOfMemory}!void {
+ pub fn call(self: *Self, comptime func: anytype, args: anytype) error{OutOfMemory}!void {
var frame = try self.allocator.create(@TypeOf(@call(.{ .modifier = .async_kw }, func, args)));
errdefer self.allocator.destroy(frame);
const node = try self.allocator.create(AllocStack.Node);
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index 23066a6963..c9ba3b3470 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -64,21 +64,22 @@ fn peekIsAlign(comptime fmt: []const u8) bool {
/// - `e`: output floating point value in scientific notation
/// - `d`: output numeric value in decimal notation
/// - `b`: output integer value in binary notation
+/// - `o`: output integer value in octal notation
/// - `c`: output integer as an ASCII character. Integer type must have 8 bits at max.
/// - `*`: output the address of the value instead of the value itself.
///
/// If a formatted user type contains a function of the type
/// ```
-/// pub fn format(value: ?, comptime fmt: []const u8, options: std.fmt.FormatOptions, out_stream: var) !void
+/// pub fn format(value: ?, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void
/// ```
/// with `?` being the type formatted, this function will be called instead of the default implementation.
/// This allows user types to be formatted in a logical manner instead of dumping all fields of the type.
///
/// A user type may be a `struct`, `vector`, `union` or `enum` type.
pub fn format(
- out_stream: var,
+ writer: anytype,
comptime fmt: []const u8,
- args: var,
+ args: anytype,
) !void {
const ArgSetType = u32;
if (@typeInfo(@TypeOf(args)) != .Struct) {
@@ -136,7 +137,7 @@ pub fn format(
.Start => switch (c) {
'{' => {
if (start_index < i) {
- try out_stream.writeAll(fmt[start_index..i]);
+ try writer.writeAll(fmt[start_index..i]);
}
start_index = i;
@@ -148,7 +149,7 @@ pub fn format(
},
'}' => {
if (start_index < i) {
- try out_stream.writeAll(fmt[start_index..i]);
+ try writer.writeAll(fmt[start_index..i]);
}
state = .CloseBrace;
},
@@ -183,7 +184,7 @@ pub fn format(
args[arg_to_print],
fmt[0..0],
options,
- out_stream,
+ writer,
default_max_depth,
);
@@ -214,7 +215,7 @@ pub fn format(
args[arg_to_print],
fmt[specifier_start..i],
options,
- out_stream,
+ writer,
default_max_depth,
);
state = .Start;
@@ -259,7 +260,7 @@ pub fn format(
args[arg_to_print],
fmt[specifier_start..specifier_end],
options,
- out_stream,
+ writer,
default_max_depth,
);
state = .Start;
@@ -285,7 +286,7 @@ pub fn format(
args[arg_to_print],
fmt[specifier_start..specifier_end],
options,
- out_stream,
+ writer,
default_max_depth,
);
state = .Start;
@@ -306,148 +307,149 @@ pub fn format(
}
}
if (start_index < fmt.len) {
- try out_stream.writeAll(fmt[start_index..]);
+ try writer.writeAll(fmt[start_index..]);
}
}
pub fn formatType(
- value: var,
+ value: anytype,
comptime fmt: []const u8,
options: FormatOptions,
- out_stream: var,
+ writer: anytype,
max_depth: usize,
-) @TypeOf(out_stream).Error!void {
+) @TypeOf(writer).Error!void {
if (comptime std.mem.eql(u8, fmt, "*")) {
- try out_stream.writeAll(@typeName(@TypeOf(value).Child));
- try out_stream.writeAll("@");
- try formatInt(@ptrToInt(value), 16, false, FormatOptions{}, out_stream);
+ try writer.writeAll(@typeName(@TypeOf(value).Child));
+ try writer.writeAll("@");
+ try formatInt(@ptrToInt(value), 16, false, FormatOptions{}, writer);
return;
}
const T = @TypeOf(value);
if (comptime std.meta.trait.hasFn("format")(T)) {
- return try value.format(fmt, options, out_stream);
+ return try value.format(fmt, options, writer);
}
switch (@typeInfo(T)) {
.ComptimeInt, .Int, .ComptimeFloat, .Float => {
- return formatValue(value, fmt, options, out_stream);
+ return formatValue(value, fmt, options, writer);
},
.Void => {
- return formatBuf("void", options, out_stream);
+ return formatBuf("void", options, writer);
},
.Bool => {
- return formatBuf(if (value) "true" else "false", options, out_stream);
+ return formatBuf(if (value) "true" else "false", options, writer);
},
.Optional => {
if (value) |payload| {
- return formatType(payload, fmt, options, out_stream, max_depth);
+ return formatType(payload, fmt, options, writer, max_depth);
} else {
- return formatBuf("null", options, out_stream);
+ return formatBuf("null", options, writer);
}
},
.ErrorUnion => {
if (value) |payload| {
- return formatType(payload, fmt, options, out_stream, max_depth);
+ return formatType(payload, fmt, options, writer, max_depth);
} else |err| {
- return formatType(err, fmt, options, out_stream, max_depth);
+ return formatType(err, fmt, options, writer, max_depth);
}
},
.ErrorSet => {
- try out_stream.writeAll("error.");
- return out_stream.writeAll(@errorName(value));
+ try writer.writeAll("error.");
+ return writer.writeAll(@errorName(value));
},
.Enum => |enumInfo| {
- try out_stream.writeAll(@typeName(T));
+ try writer.writeAll(@typeName(T));
if (enumInfo.is_exhaustive) {
- try out_stream.writeAll(".");
- try out_stream.writeAll(@tagName(value));
+ try writer.writeAll(".");
+ try writer.writeAll(@tagName(value));
return;
}
// Use @tagName only if value is one of known fields
+ @setEvalBranchQuota(3 * enumInfo.fields.len);
inline for (enumInfo.fields) |enumField| {
if (@enumToInt(value) == enumField.value) {
- try out_stream.writeAll(".");
- try out_stream.writeAll(@tagName(value));
+ try writer.writeAll(".");
+ try writer.writeAll(@tagName(value));
return;
}
}
- try out_stream.writeAll("(");
- try formatType(@enumToInt(value), fmt, options, out_stream, max_depth);
- try out_stream.writeAll(")");
+ try writer.writeAll("(");
+ try formatType(@enumToInt(value), fmt, options, writer, max_depth);
+ try writer.writeAll(")");
},
.Union => {
- try out_stream.writeAll(@typeName(T));
+ try writer.writeAll(@typeName(T));
if (max_depth == 0) {
- return out_stream.writeAll("{ ... }");
+ return writer.writeAll("{ ... }");
}
const info = @typeInfo(T).Union;
if (info.tag_type) |UnionTagType| {
- try out_stream.writeAll("{ .");
- try out_stream.writeAll(@tagName(@as(UnionTagType, value)));
- try out_stream.writeAll(" = ");
+ try writer.writeAll("{ .");
+ try writer.writeAll(@tagName(@as(UnionTagType, value)));
+ try writer.writeAll(" = ");
inline for (info.fields) |u_field| {
if (@enumToInt(@as(UnionTagType, value)) == u_field.enum_field.?.value) {
- try formatType(@field(value, u_field.name), fmt, options, out_stream, max_depth - 1);
+ try formatType(@field(value, u_field.name), fmt, options, writer, max_depth - 1);
}
}
- try out_stream.writeAll(" }");
+ try writer.writeAll(" }");
} else {
- try format(out_stream, "@{x}", .{@ptrToInt(&value)});
+ try format(writer, "@{x}", .{@ptrToInt(&value)});
}
},
.Struct => |StructT| {
- try out_stream.writeAll(@typeName(T));
+ try writer.writeAll(@typeName(T));
if (max_depth == 0) {
- return out_stream.writeAll("{ ... }");
+ return writer.writeAll("{ ... }");
}
- try out_stream.writeAll("{");
+ try writer.writeAll("{");
inline for (StructT.fields) |f, i| {
if (i == 0) {
- try out_stream.writeAll(" .");
+ try writer.writeAll(" .");
} else {
- try out_stream.writeAll(", .");
+ try writer.writeAll(", .");
}
- try out_stream.writeAll(f.name);
- try out_stream.writeAll(" = ");
- try formatType(@field(value, f.name), fmt, options, out_stream, max_depth - 1);
+ try writer.writeAll(f.name);
+ try writer.writeAll(" = ");
+ try formatType(@field(value, f.name), fmt, options, writer, max_depth - 1);
}
- try out_stream.writeAll(" }");
+ try writer.writeAll(" }");
},
.Pointer => |ptr_info| switch (ptr_info.size) {
.One => switch (@typeInfo(ptr_info.child)) {
.Array => |info| {
if (info.child == u8) {
- return formatText(value, fmt, options, out_stream);
+ return formatText(value, fmt, options, writer);
}
- return format(out_stream, "{}@{x}", .{ @typeName(T.Child), @ptrToInt(value) });
+ return format(writer, "{}@{x}", .{ @typeName(T.Child), @ptrToInt(value) });
},
.Enum, .Union, .Struct => {
- return formatType(value.*, fmt, options, out_stream, max_depth);
+ return formatType(value.*, fmt, options, writer, max_depth);
},
- else => return format(out_stream, "{}@{x}", .{ @typeName(T.Child), @ptrToInt(value) }),
+ else => return format(writer, "{}@{x}", .{ @typeName(T.Child), @ptrToInt(value) }),
},
.Many, .C => {
if (ptr_info.sentinel) |sentinel| {
- return formatType(mem.span(value), fmt, options, out_stream, max_depth);
+ return formatType(mem.span(value), fmt, options, writer, max_depth);
}
if (ptr_info.child == u8) {
if (fmt.len > 0 and fmt[0] == 's') {
- return formatText(mem.span(value), fmt, options, out_stream);
+ return formatText(mem.span(value), fmt, options, writer);
}
}
- return format(out_stream, "{}@{x}", .{ @typeName(T.Child), @ptrToInt(value) });
+ return format(writer, "{}@{x}", .{ @typeName(T.Child), @ptrToInt(value) });
},
.Slice => {
if (fmt.len > 0 and ((fmt[0] == 'x') or (fmt[0] == 'X'))) {
- return formatText(value, fmt, options, out_stream);
+ return formatText(value, fmt, options, writer);
}
if (ptr_info.child == u8) {
- return formatText(value, fmt, options, out_stream);
+ return formatText(value, fmt, options, writer);
}
- return format(out_stream, "{}@{x}", .{ @typeName(ptr_info.child), @ptrToInt(value.ptr) });
+ return format(writer, "{}@{x}", .{ @typeName(ptr_info.child), @ptrToInt(value.ptr) });
},
},
.Array => |info| {
@@ -462,58 +464,58 @@ pub fn formatType(
.sentinel = null,
},
});
- return formatType(@as(Slice, &value), fmt, options, out_stream, max_depth);
+ return formatType(@as(Slice, &value), fmt, options, writer, max_depth);
},
.Vector => {
const len = @typeInfo(T).Vector.len;
- try out_stream.writeAll("{ ");
+ try writer.writeAll("{ ");
var i: usize = 0;
while (i < len) : (i += 1) {
- try formatValue(value[i], fmt, options, out_stream);
+ try formatValue(value[i], fmt, options, writer);
if (i < len - 1) {
- try out_stream.writeAll(", ");
+ try writer.writeAll(", ");
}
}
- try out_stream.writeAll(" }");
+ try writer.writeAll(" }");
},
.Fn => {
- return format(out_stream, "{}@{x}", .{ @typeName(T), @ptrToInt(value) });
+ return format(writer, "{}@{x}", .{ @typeName(T), @ptrToInt(value) });
},
- .Type => return out_stream.writeAll(@typeName(T)),
+ .Type => return writer.writeAll(@typeName(T)),
.EnumLiteral => {
const buffer = [_]u8{'.'} ++ @tagName(value);
- return formatType(buffer, fmt, options, out_stream, max_depth);
+ return formatType(buffer, fmt, options, writer, max_depth);
},
else => @compileError("Unable to format type '" ++ @typeName(T) ++ "'"),
}
}
fn formatValue(
- value: var,
+ value: anytype,
comptime fmt: []const u8,
options: FormatOptions,
- out_stream: var,
+ writer: anytype,
) !void {
if (comptime std.mem.eql(u8, fmt, "B")) {
- return formatBytes(value, options, 1000, out_stream);
+ return formatBytes(value, options, 1000, writer);
} else if (comptime std.mem.eql(u8, fmt, "Bi")) {
- return formatBytes(value, options, 1024, out_stream);
+ return formatBytes(value, options, 1024, writer);
}
const T = @TypeOf(value);
switch (@typeInfo(T)) {
- .Float, .ComptimeFloat => return formatFloatValue(value, fmt, options, out_stream),
- .Int, .ComptimeInt => return formatIntValue(value, fmt, options, out_stream),
- .Bool => return formatBuf(if (value) "true" else "false", options, out_stream),
+ .Float, .ComptimeFloat => return formatFloatValue(value, fmt, options, writer),
+ .Int, .ComptimeInt => return formatIntValue(value, fmt, options, writer),
+ .Bool => return formatBuf(if (value) "true" else "false", options, writer),
else => comptime unreachable,
}
}
pub fn formatIntValue(
- value: var,
+ value: anytype,
comptime fmt: []const u8,
options: FormatOptions,
- out_stream: var,
+ writer: anytype,
) !void {
comptime var radix = 10;
comptime var uppercase = false;
@@ -529,7 +531,7 @@ pub fn formatIntValue(
uppercase = false;
} else if (comptime std.mem.eql(u8, fmt, "c")) {
if (@TypeOf(int_value).bit_count <= 8) {
- return formatAsciiChar(@as(u8, int_value), options, out_stream);
+ return formatAsciiChar(@as(u8, int_value), options, writer);
} else {
@compileError("Cannot print integer that is larger than 8 bits as a ascii");
}
@@ -542,23 +544,26 @@ pub fn formatIntValue(
} else if (comptime std.mem.eql(u8, fmt, "X")) {
radix = 16;
uppercase = true;
+ } else if (comptime std.mem.eql(u8, fmt, "o")) {
+ radix = 8;
+ uppercase = false;
} else {
@compileError("Unknown format string: '" ++ fmt ++ "'");
}
- return formatInt(int_value, radix, uppercase, options, out_stream);
+ return formatInt(int_value, radix, uppercase, options, writer);
}
fn formatFloatValue(
- value: var,
+ value: anytype,
comptime fmt: []const u8,
options: FormatOptions,
- out_stream: var,
+ writer: anytype,
) !void {
if (fmt.len == 0 or comptime std.mem.eql(u8, fmt, "e")) {
- return formatFloatScientific(value, options, out_stream);
+ return formatFloatScientific(value, options, writer);
} else if (comptime std.mem.eql(u8, fmt, "d")) {
- return formatFloatDecimal(value, options, out_stream);
+ return formatFloatDecimal(value, options, writer);
} else {
@compileError("Unknown format string: '" ++ fmt ++ "'");
}
@@ -568,13 +573,13 @@ pub fn formatText(
bytes: []const u8,
comptime fmt: []const u8,
options: FormatOptions,
- out_stream: var,
+ writer: anytype,
) !void {
if (comptime std.mem.eql(u8, fmt, "s") or (fmt.len == 0)) {
- return formatBuf(bytes, options, out_stream);
+ return formatBuf(bytes, options, writer);
} else if (comptime (std.mem.eql(u8, fmt, "x") or std.mem.eql(u8, fmt, "X"))) {
for (bytes) |c| {
- try formatInt(c, 16, fmt[0] == 'X', FormatOptions{ .width = 2, .fill = '0' }, out_stream);
+ try formatInt(c, 16, fmt[0] == 'X', FormatOptions{ .width = 2, .fill = '0' }, writer);
}
return;
} else {
@@ -585,38 +590,38 @@ pub fn formatText(
pub fn formatAsciiChar(
c: u8,
options: FormatOptions,
- out_stream: var,
+ writer: anytype,
) !void {
- return out_stream.writeAll(@as(*const [1]u8, &c));
+ return writer.writeAll(@as(*const [1]u8, &c));
}
pub fn formatBuf(
buf: []const u8,
options: FormatOptions,
- out_stream: var,
+ writer: anytype,
) !void {
const width = options.width orelse buf.len;
var padding = if (width > buf.len) (width - buf.len) else 0;
const pad_byte = [1]u8{options.fill};
switch (options.alignment) {
.Left => {
- try out_stream.writeAll(buf);
+ try writer.writeAll(buf);
while (padding > 0) : (padding -= 1) {
- try out_stream.writeAll(&pad_byte);
+ try writer.writeAll(&pad_byte);
}
},
.Center => {
const padl = padding / 2;
var i: usize = 0;
- while (i < padl) : (i += 1) try out_stream.writeAll(&pad_byte);
- try out_stream.writeAll(buf);
- while (i < padding) : (i += 1) try out_stream.writeAll(&pad_byte);
+ while (i < padl) : (i += 1) try writer.writeAll(&pad_byte);
+ try writer.writeAll(buf);
+ while (i < padding) : (i += 1) try writer.writeAll(&pad_byte);
},
.Right => {
while (padding > 0) : (padding -= 1) {
- try out_stream.writeAll(&pad_byte);
+ try writer.writeAll(&pad_byte);
}
- try out_stream.writeAll(buf);
+ try writer.writeAll(buf);
},
}
}
@@ -625,40 +630,40 @@ pub fn formatBuf(
// It should be the case that every full precision, printed value can be re-parsed back to the
// same type unambiguously.
pub fn formatFloatScientific(
- value: var,
+ value: anytype,
options: FormatOptions,
- out_stream: var,
+ writer: anytype,
) !void {
var x = @floatCast(f64, value);
// Errol doesn't handle these special cases.
if (math.signbit(x)) {
- try out_stream.writeAll("-");
+ try writer.writeAll("-");
x = -x;
}
if (math.isNan(x)) {
- return out_stream.writeAll("nan");
+ return writer.writeAll("nan");
}
if (math.isPositiveInf(x)) {
- return out_stream.writeAll("inf");
+ return writer.writeAll("inf");
}
if (x == 0.0) {
- try out_stream.writeAll("0");
+ try writer.writeAll("0");
if (options.precision) |precision| {
if (precision != 0) {
- try out_stream.writeAll(".");
+ try writer.writeAll(".");
var i: usize = 0;
while (i < precision) : (i += 1) {
- try out_stream.writeAll("0");
+ try writer.writeAll("0");
}
}
} else {
- try out_stream.writeAll(".0");
+ try writer.writeAll(".0");
}
- try out_stream.writeAll("e+00");
+ try writer.writeAll("e+00");
return;
}
@@ -668,86 +673,86 @@ pub fn formatFloatScientific(
if (options.precision) |precision| {
errol.roundToPrecision(&float_decimal, precision, errol.RoundMode.Scientific);
- try out_stream.writeAll(float_decimal.digits[0..1]);
+ try writer.writeAll(float_decimal.digits[0..1]);
// {e0} case prints no `.`
if (precision != 0) {
- try out_stream.writeAll(".");
+ try writer.writeAll(".");
var printed: usize = 0;
if (float_decimal.digits.len > 1) {
const num_digits = math.min(float_decimal.digits.len, precision + 1);
- try out_stream.writeAll(float_decimal.digits[1..num_digits]);
+ try writer.writeAll(float_decimal.digits[1..num_digits]);
printed += num_digits - 1;
}
while (printed < precision) : (printed += 1) {
- try out_stream.writeAll("0");
+ try writer.writeAll("0");
}
}
} else {
- try out_stream.writeAll(float_decimal.digits[0..1]);
- try out_stream.writeAll(".");
+ try writer.writeAll(float_decimal.digits[0..1]);
+ try writer.writeAll(".");
if (float_decimal.digits.len > 1) {
const num_digits = if (@TypeOf(value) == f32) math.min(@as(usize, 9), float_decimal.digits.len) else float_decimal.digits.len;
- try out_stream.writeAll(float_decimal.digits[1..num_digits]);
+ try writer.writeAll(float_decimal.digits[1..num_digits]);
} else {
- try out_stream.writeAll("0");
+ try writer.writeAll("0");
}
}
- try out_stream.writeAll("e");
+ try writer.writeAll("e");
const exp = float_decimal.exp - 1;
if (exp >= 0) {
- try out_stream.writeAll("+");
+ try writer.writeAll("+");
if (exp > -10 and exp < 10) {
- try out_stream.writeAll("0");
+ try writer.writeAll("0");
}
- try formatInt(exp, 10, false, FormatOptions{ .width = 0 }, out_stream);
+ try formatInt(exp, 10, false, FormatOptions{ .width = 0 }, writer);
} else {
- try out_stream.writeAll("-");
+ try writer.writeAll("-");
if (exp > -10 and exp < 10) {
- try out_stream.writeAll("0");
+ try writer.writeAll("0");
}
- try formatInt(-exp, 10, false, FormatOptions{ .width = 0 }, out_stream);
+ try formatInt(-exp, 10, false, FormatOptions{ .width = 0 }, writer);
}
}
// Print a float of the format x.yyyyy where the number of y is specified by the precision argument.
// By default floats are printed at full precision (no rounding).
pub fn formatFloatDecimal(
- value: var,
+ value: anytype,
options: FormatOptions,
- out_stream: var,
+ writer: anytype,
) !void {
var x = @as(f64, value);
// Errol doesn't handle these special cases.
if (math.signbit(x)) {
- try out_stream.writeAll("-");
+ try writer.writeAll("-");
x = -x;
}
if (math.isNan(x)) {
- return out_stream.writeAll("nan");
+ return writer.writeAll("nan");
}
if (math.isPositiveInf(x)) {
- return out_stream.writeAll("inf");
+ return writer.writeAll("inf");
}
if (x == 0.0) {
- try out_stream.writeAll("0");
+ try writer.writeAll("0");
if (options.precision) |precision| {
if (precision != 0) {
- try out_stream.writeAll(".");
+ try writer.writeAll(".");
var i: usize = 0;
while (i < precision) : (i += 1) {
- try out_stream.writeAll("0");
+ try writer.writeAll("0");
}
} else {
- try out_stream.writeAll(".0");
+ try writer.writeAll(".0");
}
}
@@ -769,14 +774,14 @@ pub fn formatFloatDecimal(
if (num_digits_whole > 0) {
// We may have to zero pad, for instance 1e4 requires zero padding.
- try out_stream.writeAll(float_decimal.digits[0..num_digits_whole_no_pad]);
+ try writer.writeAll(float_decimal.digits[0..num_digits_whole_no_pad]);
var i = num_digits_whole_no_pad;
while (i < num_digits_whole) : (i += 1) {
- try out_stream.writeAll("0");
+ try writer.writeAll("0");
}
} else {
- try out_stream.writeAll("0");
+ try writer.writeAll("0");
}
// {.0} special case doesn't want a trailing '.'
@@ -784,7 +789,7 @@ pub fn formatFloatDecimal(
return;
}
- try out_stream.writeAll(".");
+ try writer.writeAll(".");
// Keep track of fractional count printed for case where we pre-pad then post-pad with 0's.
var printed: usize = 0;
@@ -796,7 +801,7 @@ pub fn formatFloatDecimal(
var i: usize = 0;
while (i < zeros_to_print) : (i += 1) {
- try out_stream.writeAll("0");
+ try writer.writeAll("0");
printed += 1;
}
@@ -808,14 +813,14 @@ pub fn formatFloatDecimal(
// Remaining fractional portion, zero-padding if insufficient.
assert(precision >= printed);
if (num_digits_whole_no_pad + precision - printed < float_decimal.digits.len) {
- try out_stream.writeAll(float_decimal.digits[num_digits_whole_no_pad .. num_digits_whole_no_pad + precision - printed]);
+ try writer.writeAll(float_decimal.digits[num_digits_whole_no_pad .. num_digits_whole_no_pad + precision - printed]);
return;
} else {
- try out_stream.writeAll(float_decimal.digits[num_digits_whole_no_pad..]);
+ try writer.writeAll(float_decimal.digits[num_digits_whole_no_pad..]);
printed += float_decimal.digits.len - num_digits_whole_no_pad;
while (printed < precision) : (printed += 1) {
- try out_stream.writeAll("0");
+ try writer.writeAll("0");
}
}
} else {
@@ -827,14 +832,14 @@ pub fn formatFloatDecimal(
if (num_digits_whole > 0) {
// We may have to zero pad, for instance 1e4 requires zero padding.
- try out_stream.writeAll(float_decimal.digits[0..num_digits_whole_no_pad]);
+ try writer.writeAll(float_decimal.digits[0..num_digits_whole_no_pad]);
var i = num_digits_whole_no_pad;
while (i < num_digits_whole) : (i += 1) {
- try out_stream.writeAll("0");
+ try writer.writeAll("0");
}
} else {
- try out_stream.writeAll("0");
+ try writer.writeAll("0");
}
// Omit `.` if no fractional portion
@@ -842,7 +847,7 @@ pub fn formatFloatDecimal(
return;
}
- try out_stream.writeAll(".");
+ try writer.writeAll(".");
// Zero-fill until we reach significant digits or run out of precision.
if (float_decimal.exp < 0) {
@@ -850,22 +855,22 @@ pub fn formatFloatDecimal(
var i: usize = 0;
while (i < zero_digit_count) : (i += 1) {
- try out_stream.writeAll("0");
+ try writer.writeAll("0");
}
}
- try out_stream.writeAll(float_decimal.digits[num_digits_whole_no_pad..]);
+ try writer.writeAll(float_decimal.digits[num_digits_whole_no_pad..]);
}
}
pub fn formatBytes(
- value: var,
+ value: anytype,
options: FormatOptions,
comptime radix: usize,
- out_stream: var,
+ writer: anytype,
) !void {
if (value == 0) {
- return out_stream.writeAll("0B");
+ return writer.writeAll("0B");
}
const is_float = comptime std.meta.trait.is(.Float)(@TypeOf(value));
@@ -885,10 +890,10 @@ pub fn formatBytes(
else => unreachable,
};
- try formatFloatDecimal(new_value, options, out_stream);
+ try formatFloatDecimal(new_value, options, writer);
if (suffix == ' ') {
- return out_stream.writeAll("B");
+ return writer.writeAll("B");
}
const buf = switch (radix) {
@@ -896,15 +901,15 @@ pub fn formatBytes(
1024 => &[_]u8{ suffix, 'i', 'B' },
else => unreachable,
};
- return out_stream.writeAll(buf);
+ return writer.writeAll(buf);
}
pub fn formatInt(
- value: var,
+ value: anytype,
base: u8,
uppercase: bool,
options: FormatOptions,
- out_stream: var,
+ writer: anytype,
) !void {
const int_value = if (@TypeOf(value) == comptime_int) blk: {
const Int = math.IntFittingRange(value, value);
@@ -913,18 +918,18 @@ pub fn formatInt(
value;
if (@TypeOf(int_value).is_signed) {
- return formatIntSigned(int_value, base, uppercase, options, out_stream);
+ return formatIntSigned(int_value, base, uppercase, options, writer);
} else {
- return formatIntUnsigned(int_value, base, uppercase, options, out_stream);
+ return formatIntUnsigned(int_value, base, uppercase, options, writer);
}
}
fn formatIntSigned(
- value: var,
+ value: anytype,
base: u8,
uppercase: bool,
options: FormatOptions,
- out_stream: var,
+ writer: anytype,
) !void {
const new_options = FormatOptions{
.width = if (options.width) |w| (if (w == 0) 0 else w - 1) else null,
@@ -934,24 +939,24 @@ fn formatIntSigned(
const bit_count = @typeInfo(@TypeOf(value)).Int.bits;
const Uint = std.meta.Int(false, bit_count);
if (value < 0) {
- try out_stream.writeAll("-");
+ try writer.writeAll("-");
const new_value = math.absCast(value);
- return formatIntUnsigned(new_value, base, uppercase, new_options, out_stream);
+ return formatIntUnsigned(new_value, base, uppercase, new_options, writer);
} else if (options.width == null or options.width.? == 0) {
- return formatIntUnsigned(@intCast(Uint, value), base, uppercase, options, out_stream);
+ return formatIntUnsigned(@intCast(Uint, value), base, uppercase, options, writer);
} else {
- try out_stream.writeAll("+");
+ try writer.writeAll("+");
const new_value = @intCast(Uint, value);
- return formatIntUnsigned(new_value, base, uppercase, new_options, out_stream);
+ return formatIntUnsigned(new_value, base, uppercase, new_options, writer);
}
}
fn formatIntUnsigned(
- value: var,
+ value: anytype,
base: u8,
uppercase: bool,
options: FormatOptions,
- out_stream: var,
+ writer: anytype,
) !void {
assert(base >= 2);
var buf: [math.max(@TypeOf(value).bit_count, 1)]u8 = undefined;
@@ -976,68 +981,96 @@ fn formatIntUnsigned(
const zero_byte: u8 = options.fill;
var leftover_padding = padding - index;
while (true) {
- try out_stream.writeAll(@as(*const [1]u8, &zero_byte)[0..]);
+ try writer.writeAll(@as(*const [1]u8, &zero_byte)[0..]);
leftover_padding -= 1;
if (leftover_padding == 0) break;
}
mem.set(u8, buf[0..index], options.fill);
- return out_stream.writeAll(&buf);
+ return writer.writeAll(&buf);
} else {
const padded_buf = buf[index - padding ..];
mem.set(u8, padded_buf[0..padding], options.fill);
- return out_stream.writeAll(padded_buf);
+ return writer.writeAll(padded_buf);
}
}
-pub fn formatIntBuf(out_buf: []u8, value: var, base: u8, uppercase: bool, options: FormatOptions) usize {
+pub fn formatIntBuf(out_buf: []u8, value: anytype, base: u8, uppercase: bool, options: FormatOptions) usize {
var fbs = std.io.fixedBufferStream(out_buf);
- formatInt(value, base, uppercase, options, fbs.outStream()) catch unreachable;
+ formatInt(value, base, uppercase, options, fbs.writer()) catch unreachable;
return fbs.pos;
}
-pub fn parseInt(comptime T: type, buf: []const u8, radix: u8) !T {
- if (!T.is_signed) return parseUnsigned(T, buf, radix);
- if (buf.len == 0) return @as(T, 0);
- if (buf[0] == '-') {
- return math.negate(try parseUnsigned(T, buf[1..], radix));
- } else if (buf[0] == '+') {
- return parseUnsigned(T, buf[1..], radix);
- } else {
- return parseUnsigned(T, buf, radix);
- }
-}
-
-test "parseInt" {
- std.testing.expect((parseInt(i32, "-10", 10) catch unreachable) == -10);
- std.testing.expect((parseInt(i32, "+10", 10) catch unreachable) == 10);
- std.testing.expect(if (parseInt(i32, " 10", 10)) |_| false else |err| err == error.InvalidCharacter);
- std.testing.expect(if (parseInt(i32, "10 ", 10)) |_| false else |err| err == error.InvalidCharacter);
- std.testing.expect(if (parseInt(u32, "-10", 10)) |_| false else |err| err == error.InvalidCharacter);
- std.testing.expect((parseInt(u8, "255", 10) catch unreachable) == 255);
- std.testing.expect(if (parseInt(u8, "256", 10)) |_| false else |err| err == error.Overflow);
-}
-
-pub const ParseUnsignedError = error{
+pub const ParseIntError = error{
/// The result cannot fit in the type specified
Overflow,
- /// The input had a byte that was not a digit
+ /// The input was empty or had a byte that was not a digit
InvalidCharacter,
};
-pub fn parseUnsigned(comptime T: type, buf: []const u8, radix: u8) ParseUnsignedError!T {
+pub fn parseInt(comptime T: type, buf: []const u8, radix: u8) ParseIntError!T {
+ if (buf.len == 0) return error.InvalidCharacter;
+ if (buf[0] == '+') return parseWithSign(T, buf[1..], radix, .Pos);
+ if (buf[0] == '-') return parseWithSign(T, buf[1..], radix, .Neg);
+ return parseWithSign(T, buf, radix, .Pos);
+}
+
+test "parseInt" {
+ std.testing.expect((try parseInt(i32, "-10", 10)) == -10);
+ std.testing.expect((try parseInt(i32, "+10", 10)) == 10);
+ std.testing.expect((try parseInt(u32, "+10", 10)) == 10);
+ std.testing.expectError(error.Overflow, parseInt(u32, "-10", 10));
+ std.testing.expectError(error.InvalidCharacter, parseInt(u32, " 10", 10));
+ std.testing.expectError(error.InvalidCharacter, parseInt(u32, "10 ", 10));
+ std.testing.expect((try parseInt(u8, "255", 10)) == 255);
+ std.testing.expectError(error.Overflow, parseInt(u8, "256", 10));
+
+ // +0 and -0 should work for unsigned
+ std.testing.expect((try parseInt(u8, "-0", 10)) == 0);
+ std.testing.expect((try parseInt(u8, "+0", 10)) == 0);
+
+ // ensure minInt is parsed correctly
+ std.testing.expect((try parseInt(i8, "-128", 10)) == math.minInt(i8));
+ std.testing.expect((try parseInt(i43, "-4398046511104", 10)) == math.minInt(i43));
+
+ // empty string or bare +- is invalid
+ std.testing.expectError(error.InvalidCharacter, parseInt(u32, "", 10));
+ std.testing.expectError(error.InvalidCharacter, parseInt(i32, "", 10));
+ std.testing.expectError(error.InvalidCharacter, parseInt(u32, "+", 10));
+ std.testing.expectError(error.InvalidCharacter, parseInt(i32, "+", 10));
+ std.testing.expectError(error.InvalidCharacter, parseInt(u32, "-", 10));
+ std.testing.expectError(error.InvalidCharacter, parseInt(i32, "-", 10));
+}
+
+fn parseWithSign(
+ comptime T: type,
+ buf: []const u8,
+ radix: u8,
+ comptime sign: enum { Pos, Neg },
+) ParseIntError!T {
+ if (buf.len == 0) return error.InvalidCharacter;
+
+ const add = switch (sign) {
+ .Pos => math.add,
+ .Neg => math.sub,
+ };
+
var x: T = 0;
for (buf) |c| {
const digit = try charToDigit(c, radix);
if (x != 0) x = try math.mul(T, x, try math.cast(T, radix));
- x = try math.add(T, x, try math.cast(T, digit));
+ x = try add(T, x, try math.cast(T, digit));
}
return x;
}
+pub fn parseUnsigned(comptime T: type, buf: []const u8, radix: u8) ParseIntError!T {
+ return parseWithSign(T, buf, radix, .Pos);
+}
+
test "parseUnsigned" {
std.testing.expect((try parseUnsigned(u16, "050124", 10)) == 50124);
std.testing.expect((try parseUnsigned(u16, "65535", 10)) == 65535);
@@ -1063,6 +1096,13 @@ test "parseUnsigned" {
std.testing.expect((try parseUnsigned(u1, "001", 16)) == 1);
std.testing.expect((try parseUnsigned(u2, "3", 16)) == 3);
std.testing.expectError(error.Overflow, parseUnsigned(u2, "4", 16));
+
+ // parseUnsigned does not expect a sign
+ std.testing.expectError(error.InvalidCharacter, parseUnsigned(u8, "+0", 10));
+ std.testing.expectError(error.InvalidCharacter, parseUnsigned(u8, "-0", 10));
+
+ // test empty string error
+ std.testing.expectError(error.InvalidCharacter, parseUnsigned(u8, "", 10));
}
pub const parseFloat = @import("fmt/parse_float.zig").parseFloat;
@@ -1096,22 +1136,22 @@ pub const BufPrintError = error{
/// As much as possible was written to the buffer, but it was too small to fit all the printed bytes.
NoSpaceLeft,
};
-pub fn bufPrint(buf: []u8, comptime fmt: []const u8, args: var) BufPrintError![]u8 {
+pub fn bufPrint(buf: []u8, comptime fmt: []const u8, args: anytype) BufPrintError![]u8 {
var fbs = std.io.fixedBufferStream(buf);
- try format(fbs.outStream(), fmt, args);
+ try format(fbs.writer(), fmt, args);
return fbs.getWritten();
}
// Count the characters needed for format. Useful for preallocating memory
-pub fn count(comptime fmt: []const u8, args: var) u64 {
- var counting_stream = std.io.countingOutStream(std.io.null_out_stream);
- format(counting_stream.outStream(), fmt, args) catch |err| switch (err) {};
- return counting_stream.bytes_written;
+pub fn count(comptime fmt: []const u8, args: anytype) u64 {
+ var counting_writer = std.io.countingWriter(std.io.null_writer);
+ format(counting_writer.writer(), fmt, args) catch |err| switch (err) {};
+ return counting_writer.bytes_written;
}
pub const AllocPrintError = error{OutOfMemory};
-pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: var) AllocPrintError![]u8 {
+pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![]u8 {
const size = math.cast(usize, count(fmt, args)) catch |err| switch (err) {
// Output too long. Can't possibly allocate enough memory to display it.
error.Overflow => return error.OutOfMemory,
@@ -1122,7 +1162,7 @@ pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: var
};
}
-pub fn allocPrint0(allocator: *mem.Allocator, comptime fmt: []const u8, args: var) AllocPrintError![:0]u8 {
+pub fn allocPrint0(allocator: *mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![:0]u8 {
const result = try allocPrint(allocator, fmt ++ "\x00", args);
return result[0 .. result.len - 1 :0];
}
@@ -1148,7 +1188,7 @@ test "bufPrintInt" {
std.testing.expectEqualSlices(u8, "-42", bufPrintIntToSlice(buf, @as(i32, -42), 10, false, FormatOptions{ .width = 3 }));
}
-fn bufPrintIntToSlice(buf: []u8, value: var, base: u8, uppercase: bool, options: FormatOptions) []u8 {
+fn bufPrintIntToSlice(buf: []u8, value: anytype, base: u8, uppercase: bool, options: FormatOptions) []u8 {
return buf[0..formatIntBuf(buf, value, base, uppercase, options)];
}
@@ -1204,6 +1244,10 @@ test "int.specifier" {
const value: u8 = 0b1100;
try testFmt("u8: 0b1100\n", "u8: 0b{b}\n", .{value});
}
+ {
+ const value: u16 = 0o1234;
+ try testFmt("u16: 0o1234\n", "u16: 0o{o}\n", .{value});
+ }
}
test "int.padded" {
@@ -1215,15 +1259,15 @@ test "buffer" {
{
var buf1: [32]u8 = undefined;
var fbs = std.io.fixedBufferStream(&buf1);
- try formatType(1234, "", FormatOptions{}, fbs.outStream(), default_max_depth);
+ try formatType(1234, "", FormatOptions{}, fbs.writer(), default_max_depth);
std.testing.expect(mem.eql(u8, fbs.getWritten(), "1234"));
fbs.reset();
- try formatType('a', "c", FormatOptions{}, fbs.outStream(), default_max_depth);
+ try formatType('a', "c", FormatOptions{}, fbs.writer(), default_max_depth);
std.testing.expect(mem.eql(u8, fbs.getWritten(), "a"));
fbs.reset();
- try formatType(0b1100, "b", FormatOptions{}, fbs.outStream(), default_max_depth);
+ try formatType(0b1100, "b", FormatOptions{}, fbs.writer(), default_max_depth);
std.testing.expect(mem.eql(u8, fbs.getWritten(), "1100"));
}
}
@@ -1321,6 +1365,9 @@ test "enum" {
try testFmt("enum: Enum.Two\n", "enum: {}\n", .{&value});
try testFmt("enum: Enum.One\n", "enum: {x}\n", .{Enum.One});
try testFmt("enum: Enum.Two\n", "enum: {X}\n", .{Enum.Two});
+
+ // test very large enum to verify ct branch quota is large enough
+ try testFmt("enum: Win32Error.INVALID_FUNCTION\n", "enum: {}\n", .{std.os.windows.Win32Error.INVALID_FUNCTION});
}
test "non-exhaustive enum" {
@@ -1413,12 +1460,12 @@ test "custom" {
self: SelfType,
comptime fmt: []const u8,
options: FormatOptions,
- out_stream: var,
+ writer: anytype,
) !void {
if (fmt.len == 0 or comptime std.mem.eql(u8, fmt, "p")) {
- return std.fmt.format(out_stream, "({d:.3},{d:.3})", .{ self.x, self.y });
+ return std.fmt.format(writer, "({d:.3},{d:.3})", .{ self.x, self.y });
} else if (comptime std.mem.eql(u8, fmt, "d")) {
- return std.fmt.format(out_stream, "{d:.3}x{d:.3}", .{ self.x, self.y });
+ return std.fmt.format(writer, "{d:.3}x{d:.3}", .{ self.x, self.y });
} else {
@compileError("Unknown format character: '" ++ fmt ++ "'");
}
@@ -1534,7 +1581,7 @@ test "bytes.hex" {
try testFmt("lowercase: 000ebabe\n", "lowercase: {x}\n", .{bytes_with_zeros});
}
-fn testFmt(expected: []const u8, comptime template: []const u8, args: var) !void {
+fn testFmt(expected: []const u8, comptime template: []const u8, args: anytype) !void {
var buf: [100]u8 = undefined;
const result = try bufPrint(buf[0..], template, args);
if (mem.eql(u8, result, expected)) return;
@@ -1604,7 +1651,7 @@ test "formatIntValue with comptime_int" {
var buf: [20]u8 = undefined;
var fbs = std.io.fixedBufferStream(&buf);
- try formatIntValue(value, "", FormatOptions{}, fbs.outStream());
+ try formatIntValue(value, "", FormatOptions{}, fbs.writer());
std.testing.expect(mem.eql(u8, fbs.getWritten(), "123456789123456789"));
}
@@ -1613,7 +1660,7 @@ test "formatFloatValue with comptime_float" {
var buf: [20]u8 = undefined;
var fbs = std.io.fixedBufferStream(&buf);
- try formatFloatValue(value, "", FormatOptions{}, fbs.outStream());
+ try formatFloatValue(value, "", FormatOptions{}, fbs.writer());
std.testing.expect(mem.eql(u8, fbs.getWritten(), "1.0e+00"));
try testFmt("1.0e+00", "{}", .{value});
@@ -1630,10 +1677,10 @@ test "formatType max_depth" {
self: SelfType,
comptime fmt: []const u8,
options: FormatOptions,
- out_stream: var,
+ writer: anytype,
) !void {
if (fmt.len == 0) {
- return std.fmt.format(out_stream, "({d:.3},{d:.3})", .{ self.x, self.y });
+ return std.fmt.format(writer, "({d:.3},{d:.3})", .{ self.x, self.y });
} else {
@compileError("Unknown format string: '" ++ fmt ++ "'");
}
@@ -1669,19 +1716,19 @@ test "formatType max_depth" {
var buf: [1000]u8 = undefined;
var fbs = std.io.fixedBufferStream(&buf);
- try formatType(inst, "", FormatOptions{}, fbs.outStream(), 0);
+ try formatType(inst, "", FormatOptions{}, fbs.writer(), 0);
std.testing.expect(mem.eql(u8, fbs.getWritten(), "S{ ... }"));
fbs.reset();
- try formatType(inst, "", FormatOptions{}, fbs.outStream(), 1);
+ try formatType(inst, "", FormatOptions{}, fbs.writer(), 1);
std.testing.expect(mem.eql(u8, fbs.getWritten(), "S{ .a = S{ ... }, .tu = TU{ ... }, .e = E.Two, .vec = (10.200,2.220) }"));
fbs.reset();
- try formatType(inst, "", FormatOptions{}, fbs.outStream(), 2);
+ try formatType(inst, "", FormatOptions{}, fbs.writer(), 2);
std.testing.expect(mem.eql(u8, fbs.getWritten(), "S{ .a = S{ .a = S{ ... }, .tu = TU{ ... }, .e = E.Two, .vec = (10.200,2.220) }, .tu = TU{ .ptr = TU{ ... } }, .e = E.Two, .vec = (10.200,2.220) }"));
fbs.reset();
- try formatType(inst, "", FormatOptions{}, fbs.outStream(), 3);
+ try formatType(inst, "", FormatOptions{}, fbs.writer(), 3);
std.testing.expect(mem.eql(u8, fbs.getWritten(), "S{ .a = S{ .a = S{ .a = S{ ... }, .tu = TU{ ... }, .e = E.Two, .vec = (10.200,2.220) }, .tu = TU{ .ptr = TU{ ... } }, .e = E.Two, .vec = (10.200,2.220) }, .tu = TU{ .ptr = TU{ .ptr = TU{ ... } } }, .e = E.Two, .vec = (10.200,2.220) }"));
}
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index 262aa4872d..0feaf69d67 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -261,17 +261,7 @@ pub const Dir = struct {
name: []const u8,
kind: Kind,
- pub const Kind = enum {
- BlockDevice,
- CharacterDevice,
- Directory,
- NamedPipe,
- SymLink,
- File,
- UnixDomainSocket,
- Whiteout,
- Unknown,
- };
+ pub const Kind = File.Kind;
};
const IteratorError = error{AccessDenied} || os.UnexpectedError;
@@ -463,6 +453,8 @@ pub const Dir = struct {
pub const Error = IteratorError;
+ /// Memory such as file names referenced in this returned entry becomes invalid
+ /// with subsequent calls to `next`, as well as when this `Dir` is deinitialized.
pub fn next(self: *Self) Error!?Entry {
start_over: while (true) {
const w = os.windows;
@@ -545,14 +537,15 @@ pub const Dir = struct {
w.EFAULT => unreachable,
w.ENOTDIR => unreachable,
w.EINVAL => unreachable,
+ w.ENOTCAPABLE => return error.AccessDenied,
else => |err| return os.unexpectedErrno(err),
}
if (bufused == 0) return null;
self.index = 0;
self.end_index = bufused;
}
- const entry = @ptrCast(*align(1) os.wasi.dirent_t, &self.buf[self.index]);
- const entry_size = @sizeOf(os.wasi.dirent_t);
+ const entry = @ptrCast(*align(1) w.dirent_t, &self.buf[self.index]);
+ const entry_size = @sizeOf(w.dirent_t);
const name_index = self.index + entry_size;
const name = mem.span(self.buf[name_index .. name_index + entry.d_namlen]);
@@ -566,12 +559,12 @@ pub const Dir = struct {
}
const entry_kind = switch (entry.d_type) {
- wasi.FILETYPE_BLOCK_DEVICE => Entry.Kind.BlockDevice,
- wasi.FILETYPE_CHARACTER_DEVICE => Entry.Kind.CharacterDevice,
- wasi.FILETYPE_DIRECTORY => Entry.Kind.Directory,
- wasi.FILETYPE_SYMBOLIC_LINK => Entry.Kind.SymLink,
- wasi.FILETYPE_REGULAR_FILE => Entry.Kind.File,
- wasi.FILETYPE_SOCKET_STREAM, wasi.FILETYPE_SOCKET_DGRAM => Entry.Kind.UnixDomainSocket,
+ w.FILETYPE_BLOCK_DEVICE => Entry.Kind.BlockDevice,
+ w.FILETYPE_CHARACTER_DEVICE => Entry.Kind.CharacterDevice,
+ w.FILETYPE_DIRECTORY => Entry.Kind.Directory,
+ w.FILETYPE_SYMBOLIC_LINK => Entry.Kind.SymLink,
+ w.FILETYPE_REGULAR_FILE => Entry.Kind.File,
+ w.FILETYPE_SOCKET_STREAM, wasi.FILETYPE_SOCKET_DGRAM => Entry.Kind.UnixDomainSocket,
else => Entry.Kind.Unknown,
};
return Entry{
@@ -1109,6 +1102,7 @@ pub const Dir = struct {
.OBJECT_NAME_INVALID => unreachable,
.OBJECT_NAME_NOT_FOUND => return error.FileNotFound,
.OBJECT_PATH_NOT_FOUND => return error.FileNotFound,
+ .NOT_A_DIRECTORY => return error.NotDir,
.INVALID_PARAMETER => unreachable,
else => return w.unexpectedStatus(rc),
}
@@ -1119,10 +1113,18 @@ pub const Dir = struct {
/// Delete a file name and possibly the file it refers to, based on an open directory handle.
/// Asserts that the path parameter has no null bytes.
pub fn deleteFile(self: Dir, sub_path: []const u8) DeleteFileError!void {
- os.unlinkat(self.fd, sub_path, 0) catch |err| switch (err) {
- error.DirNotEmpty => unreachable, // not passing AT_REMOVEDIR
- else => |e| return e,
- };
+ if (builtin.os.tag == .windows) {
+ const sub_path_w = try os.windows.sliceToPrefixedFileW(sub_path);
+ return self.deleteFileW(sub_path_w.span().ptr);
+ } else if (builtin.os.tag == .wasi) {
+ os.unlinkatWasi(self.fd, sub_path, 0) catch |err| switch (err) {
+ error.DirNotEmpty => unreachable, // not passing AT_REMOVEDIR
+ else => |e| return e,
+ };
+ } else {
+ const sub_path_c = try os.toPosixPath(sub_path);
+ return self.deleteFileZ(&sub_path_c);
+ }
}
pub const deleteFileC = @compileError("deprecated: renamed to deleteFileZ");
@@ -1131,6 +1133,17 @@ pub const Dir = struct {
pub fn deleteFileZ(self: Dir, sub_path_c: [*:0]const u8) DeleteFileError!void {
os.unlinkatZ(self.fd, sub_path_c, 0) catch |err| switch (err) {
error.DirNotEmpty => unreachable, // not passing AT_REMOVEDIR
+ error.AccessDenied => |e| switch (builtin.os.tag) {
+ // non-Linux POSIX systems return EPERM when trying to delete a directory, so
+ // we need to handle that case specifically and translate the error
+ .macosx, .ios, .freebsd, .netbsd, .dragonfly => {
+ // Don't follow symlinks to match unlinkat (which acts on symlinks rather than follows them)
+ const fstat = os.fstatatZ(self.fd, sub_path_c, os.AT_SYMLINK_NOFOLLOW) catch return e;
+ const is_dir = fstat.mode & os.S_IFMT == os.S_IFDIR;
+ return if (is_dir) error.IsDir else e;
+ },
+ else => return e,
+ },
else => |e| return e,
};
}
@@ -1229,14 +1242,9 @@ pub const Dir = struct {
var file = try self.openFile(file_path, .{});
defer file.close();
- const size = math.cast(usize, try file.getEndPos()) catch math.maxInt(usize);
- if (size > max_bytes) return error.FileTooBig;
-
- const buf = try allocator.allocWithOptions(u8, size, alignment, optional_sentinel);
- errdefer allocator.free(buf);
+ const stat_size = try file.getEndPos();
- try file.inStream().readNoEof(buf);
- return buf;
+ return file.readAllAllocOptions(allocator, stat_size, max_bytes, alignment, optional_sentinel);
}
pub const DeleteTreeError = error{
@@ -1532,9 +1540,9 @@ pub const Dir = struct {
var size: ?u64 = null;
const mode = options.override_mode orelse blk: {
- const stat = try in_file.stat();
- size = stat.size;
- break :blk stat.mode;
+ const st = try in_file.stat();
+ size = st.size;
+ break :blk st.mode;
};
var atomic_file = try dest_dir.atomicFile(dest_path, .{ .mode = mode });
@@ -1560,6 +1568,17 @@ pub const Dir = struct {
return AtomicFile.init(dest_path, options.mode, self, false);
}
}
+
+ pub const Stat = File.Stat;
+ pub const StatError = File.StatError;
+
+ pub fn stat(self: Dir) StatError!Stat {
+ const file: File = .{
+ .handle = self.fd,
+ .capable_io_mode = .blocking,
+ };
+ return file.stat();
+ }
};
/// Returns an handle to the current working directory. It is not opened with iteration capability.
@@ -1808,7 +1827,7 @@ pub fn selfExePathAlloc(allocator: *Allocator) ![]u8 {
// TODO(#4812): Investigate other systems and whether it is possible to get
// this path by trying larger and larger buffers until one succeeds.
var buf: [MAX_PATH_BYTES]u8 = undefined;
- return mem.dupe(allocator, u8, try selfExePath(&buf));
+ return allocator.dupe(u8, try selfExePath(&buf));
}
/// Get the path to the current executable.
@@ -1871,7 +1890,7 @@ pub fn selfExeDirPathAlloc(allocator: *Allocator) ![]u8 {
// TODO(#4812): Investigate other systems and whether it is possible to get
// this path by trying larger and larger buffers until one succeeds.
var buf: [MAX_PATH_BYTES]u8 = undefined;
- return mem.dupe(allocator, u8, try selfExeDirPath(&buf));
+ return allocator.dupe(u8, try selfExeDirPath(&buf));
}
/// Get the directory path that contains the current executable.
@@ -1893,7 +1912,7 @@ pub fn realpathAlloc(allocator: *Allocator, pathname: []const u8) ![]u8 {
// paths. musl supports passing NULL but restricts the output to PATH_MAX
// anyway.
var buf: [MAX_PATH_BYTES]u8 = undefined;
- return mem.dupe(allocator, u8, try os.realpath(pathname, &buf));
+ return allocator.dupe(u8, try os.realpath(pathname, &buf));
}
test "" {
diff --git a/lib/std/fs/file.zig b/lib/std/fs/file.zig
index d950a1cfa4..cffc8cf87e 100644
--- a/lib/std/fs/file.zig
+++ b/lib/std/fs/file.zig
@@ -29,6 +29,18 @@ pub const File = struct {
pub const Mode = os.mode_t;
pub const INode = os.ino_t;
+ pub const Kind = enum {
+ BlockDevice,
+ CharacterDevice,
+ Directory,
+ NamedPipe,
+ SymLink,
+ File,
+ UnixDomainSocket,
+ Whiteout,
+ Unknown,
+ };
+
pub const default_mode = switch (builtin.os.tag) {
.windows => 0,
.wasi => 0,
@@ -209,7 +221,7 @@ pub const File = struct {
/// TODO: integrate with async I/O
pub fn mode(self: File) ModeError!Mode {
if (builtin.os.tag == .windows) {
- return {};
+ return 0;
}
return (try self.stat()).mode;
}
@@ -219,13 +231,14 @@ pub const File = struct {
/// unique across time, as some file systems may reuse an inode after its file has been deleted.
/// Some systems may change the inode of a file over time.
///
- /// On Linux, the inode _is_ structure that stores the metadata, and the inode _number_ is what
+ /// On Linux, the inode is a structure that stores the metadata, and the inode _number_ is what
/// you see here: the index number of the inode.
///
/// The FileIndex on Windows is similar. It is a number for a file that is unique to each filesystem.
inode: INode,
size: u64,
mode: Mode,
+ kind: Kind,
/// Access time in nanoseconds, relative to UTC 1970-01-01.
atime: i128,
@@ -254,6 +267,7 @@ pub const File = struct {
.inode = info.InternalInformation.IndexNumber,
.size = @bitCast(u64, info.StandardInformation.EndOfFile),
.mode = 0,
+ .kind = if (info.StandardInformation.Directory == 0) .File else .Directory,
.atime = windows.fromSysTime(info.BasicInformation.LastAccessTime),
.mtime = windows.fromSysTime(info.BasicInformation.LastWriteTime),
.ctime = windows.fromSysTime(info.BasicInformation.CreationTime),
@@ -268,6 +282,27 @@ pub const File = struct {
.inode = st.ino,
.size = @bitCast(u64, st.size),
.mode = st.mode,
+ .kind = switch (builtin.os.tag) {
+ .wasi => switch (st.filetype) {
+ os.FILETYPE_BLOCK_DEVICE => Kind.BlockDevice,
+ os.FILETYPE_CHARACTER_DEVICE => Kind.CharacterDevice,
+ os.FILETYPE_DIRECTORY => Kind.Directory,
+ os.FILETYPE_SYMBOLIC_LINK => Kind.SymLink,
+ os.FILETYPE_REGULAR_FILE => Kind.File,
+ os.FILETYPE_SOCKET_STREAM, os.FILETYPE_SOCKET_DGRAM => Kind.UnixDomainSocket,
+ else => Kind.Unknown,
+ },
+ else => switch (st.mode & os.S_IFMT) {
+ os.S_IFBLK => Kind.BlockDevice,
+ os.S_IFCHR => Kind.CharacterDevice,
+ os.S_IFDIR => Kind.Directory,
+ os.S_IFIFO => Kind.NamedPipe,
+ os.S_IFLNK => Kind.SymLink,
+ os.S_IFREG => Kind.File,
+ os.S_IFSOCK => Kind.UnixDomainSocket,
+ else => Kind.Unknown,
+ },
+ },
.atime = @as(i128, atime.tv_sec) * std.time.ns_per_s + atime.tv_nsec,
.mtime = @as(i128, mtime.tv_sec) * std.time.ns_per_s + mtime.tv_nsec,
.ctime = @as(i128, ctime.tv_sec) * std.time.ns_per_s + ctime.tv_nsec,
@@ -306,6 +341,33 @@ pub const File = struct {
try os.futimens(self.handle, &times);
}
+ /// On success, caller owns returned buffer.
+ /// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
+ pub fn readAllAlloc(self: File, allocator: *mem.Allocator, stat_size: u64, max_bytes: usize) ![]u8 {
+ return self.readAllAllocOptions(allocator, stat_size, max_bytes, @alignOf(u8), null);
+ }
+
+ /// On success, caller owns returned buffer.
+ /// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
+ /// Allows specifying alignment and a sentinel value.
+ pub fn readAllAllocOptions(
+ self: File,
+ allocator: *mem.Allocator,
+ stat_size: u64,
+ max_bytes: usize,
+ comptime alignment: u29,
+ comptime optional_sentinel: ?u8,
+ ) !(if (optional_sentinel) |s| [:s]align(alignment) u8 else []align(alignment) u8) {
+ const size = math.cast(usize, stat_size) catch math.maxInt(usize);
+ if (size > max_bytes) return error.FileTooBig;
+
+ const buf = try allocator.allocWithOptions(u8, size, alignment, optional_sentinel);
+ errdefer allocator.free(buf);
+
+ try self.reader().readNoEof(buf);
+ return buf;
+ }
+
pub const ReadError = os.ReadError;
pub const PReadError = os.PReadError;
diff --git a/lib/std/fs/path.zig b/lib/std/fs/path.zig
index 34326f2872..2176498feb 100644
--- a/lib/std/fs/path.zig
+++ b/lib/std/fs/path.zig
@@ -1034,7 +1034,7 @@ pub fn relativeWindows(allocator: *Allocator, from: []const u8, to: []const u8)
var from_it = mem.tokenize(resolved_from, "/\\");
var to_it = mem.tokenize(resolved_to, "/\\");
while (true) {
- const from_component = from_it.next() orelse return mem.dupe(allocator, u8, to_it.rest());
+ const from_component = from_it.next() orelse return allocator.dupe(u8, to_it.rest());
const to_rest = to_it.rest();
if (to_it.next()) |to_component| {
// TODO ASCII is wrong, we actually need full unicode support to compare paths.
@@ -1085,7 +1085,7 @@ pub fn relativePosix(allocator: *Allocator, from: []const u8, to: []const u8) ![
var from_it = mem.tokenize(resolved_from, "/");
var to_it = mem.tokenize(resolved_to, "/");
while (true) {
- const from_component = from_it.next() orelse return mem.dupe(allocator, u8, to_it.rest());
+ const from_component = from_it.next() orelse return allocator.dupe(u8, to_it.rest());
const to_rest = to_it.rest();
if (to_it.next()) |to_component| {
if (mem.eql(u8, from_component, to_component))
diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig
index 875565fc54..a3cf2e8002 100644
--- a/lib/std/fs/test.zig
+++ b/lib/std/fs/test.zig
@@ -1,7 +1,157 @@
const std = @import("../std.zig");
+const testing = std.testing;
const builtin = std.builtin;
const fs = std.fs;
+const mem = std.mem;
+const wasi = std.os.wasi;
+
+const ArenaAllocator = std.heap.ArenaAllocator;
+const Dir = std.fs.Dir;
const File = std.fs.File;
+const tmpDir = testing.tmpDir;
+
+test "Dir.Iterator" {
+ var tmp_dir = tmpDir(.{ .iterate = true });
+ defer tmp_dir.cleanup();
+
+ // First, create a couple of entries to iterate over.
+ const file = try tmp_dir.dir.createFile("some_file", .{});
+ file.close();
+
+ try tmp_dir.dir.makeDir("some_dir");
+
+ var arena = ArenaAllocator.init(testing.allocator);
+ defer arena.deinit();
+
+ var entries = std.ArrayList(Dir.Entry).init(&arena.allocator);
+
+ // Create iterator.
+ var iter = tmp_dir.dir.iterate();
+ while (try iter.next()) |entry| {
+ // We cannot just store `entry` as on Windows, we're re-using the name buffer
+ // which means we'll actually share the `name` pointer between entries!
+ const name = try arena.allocator.dupe(u8, entry.name);
+ try entries.append(Dir.Entry{ .name = name, .kind = entry.kind });
+ }
+
+ testing.expect(entries.items.len == 2); // note that the Iterator skips '.' and '..'
+ testing.expect(contains(&entries, Dir.Entry{ .name = "some_file", .kind = Dir.Entry.Kind.File }));
+ testing.expect(contains(&entries, Dir.Entry{ .name = "some_dir", .kind = Dir.Entry.Kind.Directory }));
+}
+
+fn entry_eql(lhs: Dir.Entry, rhs: Dir.Entry) bool {
+ return mem.eql(u8, lhs.name, rhs.name) and lhs.kind == rhs.kind;
+}
+
+fn contains(entries: *const std.ArrayList(Dir.Entry), el: Dir.Entry) bool {
+ for (entries.items) |entry| {
+ if (entry_eql(entry, el)) return true;
+ }
+ return false;
+}
+
+test "readAllAlloc" {
+ var tmp_dir = tmpDir(.{});
+ defer tmp_dir.cleanup();
+
+ var file = try tmp_dir.dir.createFile("test_file", .{ .read = true });
+ defer file.close();
+
+ const buf1 = try file.readAllAlloc(testing.allocator, 0, 1024);
+ defer testing.allocator.free(buf1);
+ testing.expect(buf1.len == 0);
+
+ const write_buf: []const u8 = "this is a test.\nthis is a test.\nthis is a test.\nthis is a test.\n";
+ try file.writeAll(write_buf);
+ try file.seekTo(0);
+ const file_size = try file.getEndPos();
+
+ // max_bytes > file_size
+ const buf2 = try file.readAllAlloc(testing.allocator, file_size, 1024);
+ defer testing.allocator.free(buf2);
+ testing.expectEqual(write_buf.len, buf2.len);
+ testing.expect(std.mem.eql(u8, write_buf, buf2));
+ try file.seekTo(0);
+
+ // max_bytes == file_size
+ const buf3 = try file.readAllAlloc(testing.allocator, file_size, write_buf.len);
+ defer testing.allocator.free(buf3);
+ testing.expectEqual(write_buf.len, buf3.len);
+ testing.expect(std.mem.eql(u8, write_buf, buf3));
+
+ // max_bytes < file_size
+ testing.expectError(error.FileTooBig, file.readAllAlloc(testing.allocator, file_size, write_buf.len - 1));
+}
+
+test "directory operations on files" {
+ var tmp_dir = tmpDir(.{});
+ defer tmp_dir.cleanup();
+
+ const test_file_name = "test_file";
+
+ var file = try tmp_dir.dir.createFile(test_file_name, .{ .read = true });
+ file.close();
+
+ testing.expectError(error.PathAlreadyExists, tmp_dir.dir.makeDir(test_file_name));
+ testing.expectError(error.NotDir, tmp_dir.dir.openDir(test_file_name, .{}));
+ testing.expectError(error.NotDir, tmp_dir.dir.deleteDir(test_file_name));
+
+ if (builtin.os.tag != .wasi) {
+ // TODO: use Dir's realpath function once that exists
+ const absolute_path = blk: {
+ const relative_path = try fs.path.join(testing.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..], test_file_name });
+ defer testing.allocator.free(relative_path);
+ break :blk try fs.realpathAlloc(testing.allocator, relative_path);
+ };
+ defer testing.allocator.free(absolute_path);
+
+ testing.expectError(error.PathAlreadyExists, fs.makeDirAbsolute(absolute_path));
+ testing.expectError(error.NotDir, fs.deleteDirAbsolute(absolute_path));
+ }
+
+ // ensure the file still exists and is a file as a sanity check
+ file = try tmp_dir.dir.openFile(test_file_name, .{});
+ const stat = try file.stat();
+ testing.expect(stat.kind == .File);
+ file.close();
+}
+
+test "file operations on directories" {
+ var tmp_dir = tmpDir(.{});
+ defer tmp_dir.cleanup();
+
+ const test_dir_name = "test_dir";
+
+ try tmp_dir.dir.makeDir(test_dir_name);
+
+ testing.expectError(error.IsDir, tmp_dir.dir.createFile(test_dir_name, .{}));
+ testing.expectError(error.IsDir, tmp_dir.dir.deleteFile(test_dir_name));
+ // Currently, WASI will return error.Unexpected (via ENOTCAPABLE) when attempting fd_read on a directory handle.
+ // TODO: Re-enable on WASI once https://github.com/bytecodealliance/wasmtime/issues/1935 is resolved.
+ if (builtin.os.tag != .wasi) {
+ testing.expectError(error.IsDir, tmp_dir.dir.readFileAlloc(testing.allocator, test_dir_name, std.math.maxInt(usize)));
+ }
+ // Note: The `.write = true` is necessary to ensure the error occurs on all platforms.
+ // TODO: Add a read-only test as well, see https://github.com/ziglang/zig/issues/5732
+ testing.expectError(error.IsDir, tmp_dir.dir.openFile(test_dir_name, .{ .write = true }));
+
+ if (builtin.os.tag != .wasi) {
+ // TODO: use Dir's realpath function once that exists
+ const absolute_path = blk: {
+ const relative_path = try fs.path.join(testing.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..], test_dir_name });
+ defer testing.allocator.free(relative_path);
+ break :blk try fs.realpathAlloc(testing.allocator, relative_path);
+ };
+ defer testing.allocator.free(absolute_path);
+
+ testing.expectError(error.IsDir, fs.createFileAbsolute(absolute_path, .{}));
+ testing.expectError(error.IsDir, fs.deleteFileAbsolute(absolute_path));
+ }
+
+ // ensure the directory still exists as a sanity check
+ var dir = try tmp_dir.dir.openDir(test_dir_name, .{});
+ dir.close();
+}
test "openSelfExe" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
@@ -10,6 +160,163 @@ test "openSelfExe" {
self_exe_file.close();
}
+test "makePath, put some files in it, deleteTree" {
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ try tmp.dir.makePath("os_test_tmp" ++ fs.path.sep_str ++ "b" ++ fs.path.sep_str ++ "c");
+ try tmp.dir.writeFile("os_test_tmp" ++ fs.path.sep_str ++ "b" ++ fs.path.sep_str ++ "c" ++ fs.path.sep_str ++ "file.txt", "nonsense");
+ try tmp.dir.writeFile("os_test_tmp" ++ fs.path.sep_str ++ "b" ++ fs.path.sep_str ++ "file2.txt", "blah");
+ try tmp.dir.deleteTree("os_test_tmp");
+ if (tmp.dir.openDir("os_test_tmp", .{})) |dir| {
+ @panic("expected error");
+ } else |err| {
+ testing.expect(err == error.FileNotFound);
+ }
+}
+
+test "access file" {
+ if (builtin.os.tag == .wasi) return error.SkipZigTest;
+
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ try tmp.dir.makePath("os_test_tmp");
+ if (tmp.dir.access("os_test_tmp" ++ fs.path.sep_str ++ "file.txt", .{})) |ok| {
+ @panic("expected error");
+ } else |err| {
+ testing.expect(err == error.FileNotFound);
+ }
+
+ try tmp.dir.writeFile("os_test_tmp" ++ fs.path.sep_str ++ "file.txt", "");
+ try tmp.dir.access("os_test_tmp" ++ fs.path.sep_str ++ "file.txt", .{});
+ try tmp.dir.deleteTree("os_test_tmp");
+}
+
+test "sendfile" {
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ try tmp.dir.makePath("os_test_tmp");
+ defer tmp.dir.deleteTree("os_test_tmp") catch {};
+
+ var dir = try tmp.dir.openDir("os_test_tmp", .{});
+ defer dir.close();
+
+ const line1 = "line1\n";
+ const line2 = "second line\n";
+ var vecs = [_]std.os.iovec_const{
+ .{
+ .iov_base = line1,
+ .iov_len = line1.len,
+ },
+ .{
+ .iov_base = line2,
+ .iov_len = line2.len,
+ },
+ };
+
+ var src_file = try dir.createFile("sendfile1.txt", .{ .read = true });
+ defer src_file.close();
+
+ try src_file.writevAll(&vecs);
+
+ var dest_file = try dir.createFile("sendfile2.txt", .{ .read = true });
+ defer dest_file.close();
+
+ const header1 = "header1\n";
+ const header2 = "second header\n";
+ const trailer1 = "trailer1\n";
+ const trailer2 = "second trailer\n";
+ var hdtr = [_]std.os.iovec_const{
+ .{
+ .iov_base = header1,
+ .iov_len = header1.len,
+ },
+ .{
+ .iov_base = header2,
+ .iov_len = header2.len,
+ },
+ .{
+ .iov_base = trailer1,
+ .iov_len = trailer1.len,
+ },
+ .{
+ .iov_base = trailer2,
+ .iov_len = trailer2.len,
+ },
+ };
+
+ var written_buf: [100]u8 = undefined;
+ try dest_file.writeFileAll(src_file, .{
+ .in_offset = 1,
+ .in_len = 10,
+ .headers_and_trailers = &hdtr,
+ .header_count = 2,
+ });
+ const amt = try dest_file.preadAll(&written_buf, 0);
+ testing.expect(mem.eql(u8, written_buf[0..amt], "header1\nsecond header\nine1\nsecontrailer1\nsecond trailer\n"));
+}
+
+test "fs.copyFile" {
+ const data = "u6wj+JmdF3qHsFPE BUlH2g4gJCmEz0PP";
+ const src_file = "tmp_test_copy_file.txt";
+ const dest_file = "tmp_test_copy_file2.txt";
+ const dest_file2 = "tmp_test_copy_file3.txt";
+
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ try tmp.dir.writeFile(src_file, data);
+ defer tmp.dir.deleteFile(src_file) catch {};
+
+ try tmp.dir.copyFile(src_file, tmp.dir, dest_file, .{});
+ defer tmp.dir.deleteFile(dest_file) catch {};
+
+ try tmp.dir.copyFile(src_file, tmp.dir, dest_file2, .{ .override_mode = File.default_mode });
+ defer tmp.dir.deleteFile(dest_file2) catch {};
+
+ try expectFileContents(tmp.dir, dest_file, data);
+ try expectFileContents(tmp.dir, dest_file2, data);
+}
+
+fn expectFileContents(dir: Dir, file_path: []const u8, data: []const u8) !void {
+ const contents = try dir.readFileAlloc(testing.allocator, file_path, 1000);
+ defer testing.allocator.free(contents);
+
+ testing.expectEqualSlices(u8, data, contents);
+}
+
+test "AtomicFile" {
+ const test_out_file = "tmp_atomic_file_test_dest.txt";
+ const test_content =
+ \\ hello!
+ \\ this is a test file
+ ;
+
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ {
+ var af = try tmp.dir.atomicFile(test_out_file, .{});
+ defer af.deinit();
+ try af.file.writeAll(test_content);
+ try af.finish();
+ }
+ const content = try tmp.dir.readFileAlloc(testing.allocator, test_out_file, 9999);
+ defer testing.allocator.free(content);
+ testing.expect(mem.eql(u8, content, test_content));
+
+ try tmp.dir.deleteFile(test_out_file);
+}
+
+test "realpath" {
+ if (builtin.os.tag == .wasi) return error.SkipZigTest;
+
+ var buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
+ testing.expectError(error.FileNotFound, fs.realpath("definitely_bogus_does_not_exist1234", &buf));
+}
+
const FILE_LOCK_TEST_SLEEP_TIME = 5 * std.time.ns_per_ms;
test "open file with exclusive nonblocking lock twice" {
@@ -116,7 +423,7 @@ test "create file, lock and read from multiple process at once" {
test "open file with exclusive nonblocking lock twice (absolute paths)" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
- const allocator = std.testing.allocator;
+ const allocator = testing.allocator;
const file_paths: [1][]const u8 = .{"zig-test-absolute-paths.txt"};
const filename = try fs.path.resolve(allocator, &file_paths);
@@ -126,7 +433,7 @@ test "open file with exclusive nonblocking lock twice (absolute paths)" {
const file2 = fs.createFileAbsolute(filename, .{ .lock = .Exclusive, .lock_nonblocking = true });
file1.close();
- std.testing.expectError(error.WouldBlock, file2);
+ testing.expectError(error.WouldBlock, file2);
try fs.deleteFileAbsolute(filename);
}
@@ -187,7 +494,7 @@ const FileLockTestContext = struct {
};
fn run_lock_file_test(contexts: []FileLockTestContext) !void {
- var threads = std.ArrayList(*std.Thread).init(std.testing.allocator);
+ var threads = std.ArrayList(*std.Thread).init(testing.allocator);
defer {
for (threads.items) |thread| {
thread.wait();
diff --git a/lib/std/fs/wasi.zig b/lib/std/fs/wasi.zig
index f08c74c129..149ede252d 100644
--- a/lib/std/fs/wasi.zig
+++ b/lib/std/fs/wasi.zig
@@ -1,17 +1,44 @@
const std = @import("std");
const os = std.os;
const mem = std.mem;
+const math = std.math;
const Allocator = mem.Allocator;
usingnamespace std.os.wasi;
-/// Type of WASI preopen.
+/// Type-tag of WASI preopen.
///
/// WASI currently offers only `Dir` as a valid preopen resource.
-pub const PreopenType = enum {
+pub const PreopenTypeTag = enum {
Dir,
};
+/// Type of WASI preopen.
+///
+/// WASI currently offers only `Dir` as a valid preopen resource.
+pub const PreopenType = union(PreopenTypeTag) {
+ /// Preopened directory type.
+ Dir: []const u8,
+
+ const Self = @This();
+
+ pub fn eql(self: Self, other: PreopenType) bool {
+ if (!mem.eql(u8, @tagName(self), @tagName(other))) return false;
+
+ switch (self) {
+ PreopenTypeTag.Dir => |this_path| return mem.eql(u8, this_path, other.Dir),
+ }
+ }
+
+ pub fn format(self: Self, comptime fmt: []const u8, options: std.fmt.FormatOptions, out_stream: anytype) !void {
+ try out_stream.print("PreopenType{{ ", .{});
+ switch (self) {
+ PreopenType.Dir => |path| try out_stream.print(".Dir = '{}'", .{path}),
+ }
+ return out_stream.print(" }}", .{});
+ }
+};
+
/// WASI preopen struct. This struct consists of a WASI file descriptor
/// and type of WASI preopen. It can be obtained directly from the WASI
/// runtime using `PreopenList.populate()` method.
@@ -20,29 +47,15 @@ pub const Preopen = struct {
fd: fd_t,
/// Type of the preopen.
- @"type": union(PreopenType) {
- /// Path to a preopened directory.
- Dir: []const u8,
- },
+ @"type": PreopenType,
- const Self = @This();
-
- /// Construct new `Preopen` instance of type `PreopenType.Dir` from
- /// WASI file descriptor and WASI path.
- pub fn newDir(fd: fd_t, path: []const u8) Self {
- return Self{
+ /// Construct new `Preopen` instance.
+ pub fn new(fd: fd_t, preopen_type: PreopenType) Preopen {
+ return Preopen{
.fd = fd,
- .@"type" = .{ .Dir = path },
+ .@"type" = preopen_type,
};
}
-
- pub fn format(self: Self, comptime fmt: []const u8, options: std.fmt.FormatOptions, out_stream: var) !void {
- try out_stream.print("{{ .fd = {}, ", .{self.fd});
- switch (self.@"type") {
- PreopenType.Dir => |path| try out_stream.print(".Dir = '{}'", .{path}),
- }
- return out_stream.print(" }}", .{});
- }
};
/// Dynamically-sized array list of WASI preopens. This struct is a
@@ -60,7 +73,7 @@ pub const PreopenList = struct {
const Self = @This();
- pub const Error = os.UnexpectedError || Allocator.Error;
+ pub const Error = error{ OutOfMemory, Overflow } || os.UnexpectedError;
/// Deinitialize with `deinit`.
pub fn init(allocator: *Allocator) Self {
@@ -82,6 +95,12 @@ pub const PreopenList = struct {
///
/// If called more than once, it will clear its contents every time before
/// issuing the syscalls.
+ ///
+ /// In the unlinkely event of overflowing the number of available file descriptors,
+ /// returns `error.Overflow`. In this case, even though an error condition was reached
+ /// the preopen list still contains all valid preopened file descriptors that are valid
+ /// for use. Therefore, it is fine to call `find`, `asSlice`, or `toOwnedSlice`. Finally,
+ /// `deinit` still must be called!
pub fn populate(self: *Self) Error!void {
// Clear contents if we're being called again
for (self.toOwnedSlice()) |preopen| {
@@ -98,6 +117,7 @@ pub const PreopenList = struct {
ESUCCESS => {},
ENOTSUP => {
// not a preopen, so keep going
+ fd = try math.add(fd_t, fd, 1);
continue;
},
EBADF => {
@@ -113,24 +133,18 @@ pub const PreopenList = struct {
ESUCCESS => {},
else => |err| return os.unexpectedErrno(err),
}
- const preopen = Preopen.newDir(fd, path_buf);
+ const preopen = Preopen.new(fd, PreopenType{ .Dir = path_buf });
try self.buffer.append(preopen);
- fd += 1;
+ fd = try math.add(fd_t, fd, 1);
}
}
- /// Find preopen by path. If the preopen exists, return it.
+ /// Find preopen by type. If the preopen exists, return it.
/// Otherwise, return `null`.
- ///
- /// TODO make the function more generic by searching by `PreopenType` union. This will
- /// be needed in the future when WASI extends its capabilities to resources
- /// other than preopened directories.
- pub fn find(self: Self, path: []const u8) ?*const Preopen {
- for (self.buffer.items) |preopen| {
- switch (preopen.@"type") {
- PreopenType.Dir => |preopen_path| {
- if (mem.eql(u8, path, preopen_path)) return &preopen;
- },
+ pub fn find(self: Self, preopen_type: PreopenType) ?*const Preopen {
+ for (self.buffer.items) |*preopen| {
+ if (preopen.@"type".eql(preopen_type)) {
+ return preopen;
}
}
return null;
@@ -156,7 +170,7 @@ test "extracting WASI preopens" {
try preopens.populate();
std.testing.expectEqual(@as(usize, 1), preopens.asSlice().len);
- const preopen = preopens.find(".") orelse unreachable;
- std.testing.expect(std.mem.eql(u8, ".", preopen.@"type".Dir));
+ const preopen = preopens.find(PreopenType{ .Dir = "." }) orelse unreachable;
+ std.testing.expect(preopen.@"type".eql(PreopenType{ .Dir = "." }));
std.testing.expectEqual(@as(usize, 3), preopen.fd);
}
diff --git a/lib/std/fs/watch.zig b/lib/std/fs/watch.zig
index 8c5a983735..b161e45c71 100644
--- a/lib/std/fs/watch.zig
+++ b/lib/std/fs/watch.zig
@@ -360,7 +360,7 @@ pub fn Watch(comptime V: type) type {
fn addFileWindows(self: *Self, file_path: []const u8, value: V) !?V {
// TODO we might need to convert dirname and basename to canonical file paths ("short"?)
- const dirname = try std.mem.dupe(self.allocator, u8, std.fs.path.dirname(file_path) orelse ".");
+ const dirname = try self.allocator.dupe(u8, std.fs.path.dirname(file_path) orelse ".");
var dirname_consumed = false;
defer if (!dirname_consumed) self.allocator.free(dirname);
diff --git a/lib/std/hash/auto_hash.zig b/lib/std/hash/auto_hash.zig
index a33b23354b..a3e1a390c2 100644
--- a/lib/std/hash/auto_hash.zig
+++ b/lib/std/hash/auto_hash.zig
@@ -21,7 +21,7 @@ pub const HashStrategy = enum {
};
/// Helper function to hash a pointer and mutate the strategy if needed.
-pub fn hashPointer(hasher: var, key: var, comptime strat: HashStrategy) void {
+pub fn hashPointer(hasher: anytype, key: anytype, comptime strat: HashStrategy) void {
const info = @typeInfo(@TypeOf(key));
switch (info.Pointer.size) {
@@ -53,7 +53,7 @@ pub fn hashPointer(hasher: var, key: var, comptime strat: HashStrategy) void {
}
/// Helper function to hash a set of contiguous objects, from an array or slice.
-pub fn hashArray(hasher: var, key: var, comptime strat: HashStrategy) void {
+pub fn hashArray(hasher: anytype, key: anytype, comptime strat: HashStrategy) void {
switch (strat) {
.Shallow => {
// TODO detect via a trait when Key has no padding bits to
@@ -73,7 +73,7 @@ pub fn hashArray(hasher: var, key: var, comptime strat: HashStrategy) void {
/// Provides generic hashing for any eligible type.
/// Strategy is provided to determine if pointers should be followed or not.
-pub fn hash(hasher: var, key: var, comptime strat: HashStrategy) void {
+pub fn hash(hasher: anytype, key: anytype, comptime strat: HashStrategy) void {
const Key = @TypeOf(key);
switch (@typeInfo(Key)) {
.NoReturn,
@@ -161,7 +161,7 @@ pub fn hash(hasher: var, key: var, comptime strat: HashStrategy) void {
/// Provides generic hashing for any eligible type.
/// Only hashes `key` itself, pointers are not followed.
/// Slices are rejected to avoid ambiguity on the user's intention.
-pub fn autoHash(hasher: var, key: var) void {
+pub fn autoHash(hasher: anytype, key: anytype) void {
const Key = @TypeOf(key);
if (comptime meta.trait.isSlice(Key)) {
comptime assert(@hasDecl(std, "StringHashMap")); // detect when the following message needs updated
@@ -181,28 +181,28 @@ pub fn autoHash(hasher: var, key: var) void {
const testing = std.testing;
const Wyhash = std.hash.Wyhash;
-fn testHash(key: var) u64 {
+fn testHash(key: anytype) u64 {
// Any hash could be used here, for testing autoHash.
var hasher = Wyhash.init(0);
hash(&hasher, key, .Shallow);
return hasher.final();
}
-fn testHashShallow(key: var) u64 {
+fn testHashShallow(key: anytype) u64 {
// Any hash could be used here, for testing autoHash.
var hasher = Wyhash.init(0);
hash(&hasher, key, .Shallow);
return hasher.final();
}
-fn testHashDeep(key: var) u64 {
+fn testHashDeep(key: anytype) u64 {
// Any hash could be used here, for testing autoHash.
var hasher = Wyhash.init(0);
hash(&hasher, key, .Deep);
return hasher.final();
}
-fn testHashDeepRecursive(key: var) u64 {
+fn testHashDeepRecursive(key: anytype) u64 {
// Any hash could be used here, for testing autoHash.
var hasher = Wyhash.init(0);
hash(&hasher, key, .DeepRecursive);
diff --git a/lib/std/hash/benchmark.zig b/lib/std/hash/benchmark.zig
index 255c98e409..5f8a15831c 100644
--- a/lib/std/hash/benchmark.zig
+++ b/lib/std/hash/benchmark.zig
@@ -88,7 +88,7 @@ const Result = struct {
const block_size: usize = 8 * 8192;
-pub fn benchmarkHash(comptime H: var, bytes: usize) !Result {
+pub fn benchmarkHash(comptime H: anytype, bytes: usize) !Result {
var h = blk: {
if (H.init_u8s) |init| {
break :blk H.ty.init(init);
@@ -119,7 +119,7 @@ pub fn benchmarkHash(comptime H: var, bytes: usize) !Result {
};
}
-pub fn benchmarkHashSmallKeys(comptime H: var, key_size: usize, bytes: usize) !Result {
+pub fn benchmarkHashSmallKeys(comptime H: anytype, key_size: usize, bytes: usize) !Result {
const key_count = bytes / key_size;
var block: [block_size]u8 = undefined;
prng.random.bytes(block[0..]);
@@ -172,7 +172,7 @@ fn mode(comptime x: comptime_int) comptime_int {
}
pub fn main() !void {
- const stdout = std.io.getStdOut().outStream();
+ const stdout = std.io.getStdOut().writer();
var buffer: [1024]u8 = undefined;
var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]);
@@ -248,13 +248,13 @@ pub fn main() !void {
if (H.has_iterative_api) {
prng.seed(seed);
const result = try benchmarkHash(H, count);
- try stdout.print(" iterative: {:4} MiB/s [{x:0<16}]\n", .{ result.throughput / (1 * MiB), result.hash });
+ try stdout.print(" iterative: {:5} MiB/s [{x:0<16}]\n", .{ result.throughput / (1 * MiB), result.hash });
}
if (!test_iterative_only) {
prng.seed(seed);
const result_small = try benchmarkHashSmallKeys(H, key_size, count);
- try stdout.print(" small keys: {:4} MiB/s [{x:0<16}]\n", .{ result_small.throughput / (1 * MiB), result_small.hash });
+ try stdout.print(" small keys: {:5} MiB/s [{x:0<16}]\n", .{ result_small.throughput / (1 * MiB), result_small.hash });
}
}
}
diff --git a/lib/std/hash/cityhash.zig b/lib/std/hash/cityhash.zig
index a717303090..73b94acbd2 100644
--- a/lib/std/hash/cityhash.zig
+++ b/lib/std/hash/cityhash.zig
@@ -354,7 +354,7 @@ pub const CityHash64 = struct {
}
};
-fn SMHasherTest(comptime hash_fn: var, comptime hashbits: u32) u32 {
+fn SMHasherTest(comptime hash_fn: anytype, comptime hashbits: u32) u32 {
const hashbytes = hashbits / 8;
var key: [256]u8 = undefined;
var hashes: [hashbytes * 256]u8 = undefined;
diff --git a/lib/std/hash/murmur.zig b/lib/std/hash/murmur.zig
index 96efc8b9c1..effa13ad69 100644
--- a/lib/std/hash/murmur.zig
+++ b/lib/std/hash/murmur.zig
@@ -279,7 +279,7 @@ pub const Murmur3_32 = struct {
}
};
-fn SMHasherTest(comptime hash_fn: var, comptime hashbits: u32) u32 {
+fn SMHasherTest(comptime hash_fn: anytype, comptime hashbits: u32) u32 {
const hashbytes = hashbits / 8;
var key: [256]u8 = undefined;
var hashes: [hashbytes * 256]u8 = undefined;
diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig
index bcd4280153..3952ecb4b2 100644
--- a/lib/std/hash_map.zig
+++ b/lib/std/hash_map.zig
@@ -9,17 +9,23 @@ const autoHash = std.hash.autoHash;
const Wyhash = std.hash.Wyhash;
const Allocator = mem.Allocator;
const builtin = @import("builtin");
-
-const want_modification_safety = std.debug.runtime_safety;
-const debug_u32 = if (want_modification_safety) u32 else void;
+const hash_map = @This();
pub fn AutoHashMap(comptime K: type, comptime V: type) type {
- return HashMap(K, V, getAutoHashFn(K), getAutoEqlFn(K));
+ return HashMap(K, V, getAutoHashFn(K), getAutoEqlFn(K), autoEqlIsCheap(K));
+}
+
+pub fn AutoHashMapUnmanaged(comptime K: type, comptime V: type) type {
+ return HashMapUnmanaged(K, V, getAutoHashFn(K), getAutoEqlFn(K), autoEqlIsCheap(K));
}
/// Builtin hashmap for strings as keys.
pub fn StringHashMap(comptime V: type) type {
- return HashMap([]const u8, V, hashString, eqlString);
+ return HashMap([]const u8, V, hashString, eqlString, true);
+}
+
+pub fn StringHashMapUnmanaged(comptime V: type) type {
+ return HashMapUnmanaged([]const u8, V, hashString, eqlString, true);
}
pub fn eqlString(a: []const u8, b: []const u8) bool {
@@ -30,422 +36,860 @@ pub fn hashString(s: []const u8) u32 {
return @truncate(u32, std.hash.Wyhash.hash(0, s));
}
-pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u32, comptime eql: fn (a: K, b: K) bool) type {
+/// Insertion order is preserved.
+/// Deletions perform a "swap removal" on the entries list.
+/// Modifying the hash map while iterating is allowed, however one must understand
+/// the (well defined) behavior when mixing insertions and deletions with iteration.
+/// For a hash map that can be initialized directly that does not store an Allocator
+/// field, see `HashMapUnmanaged`.
+/// When `store_hash` is `false`, this data structure is biased towards cheap `eql`
+/// functions. It does not store each item's hash in the table. Setting `store_hash`
+/// to `true` incurs slightly more memory cost by storing each key's hash in the table
+/// but only has to call `eql` for hash collisions.
+pub fn HashMap(
+ comptime K: type,
+ comptime V: type,
+ comptime hash: fn (key: K) u32,
+ comptime eql: fn (a: K, b: K) bool,
+ comptime store_hash: bool,
+) type {
return struct {
- entries: []Entry,
- size: usize,
- max_distance_from_start_index: usize,
+ unmanaged: Unmanaged,
allocator: *Allocator,
- /// This is used to detect bugs where a hashtable is edited while an iterator is running.
- modification_count: debug_u32,
-
- const Self = @This();
-
- /// A *KV is a mutable pointer into this HashMap's internal storage.
- /// Modifying the key is undefined behavior.
- /// Modifying the value is harmless.
- /// *KV pointers become invalid whenever this HashMap is modified,
- /// and then any access to the *KV is undefined behavior.
- pub const KV = struct {
- key: K,
- value: V,
- };
-
- const Entry = struct {
- used: bool,
- distance_from_start_index: usize,
- kv: KV,
- };
-
- pub const GetOrPutResult = struct {
- kv: *KV,
- found_existing: bool,
- };
+ pub const Unmanaged = HashMapUnmanaged(K, V, hash, eql, store_hash);
+ pub const Entry = Unmanaged.Entry;
+ pub const Hash = Unmanaged.Hash;
+ pub const GetOrPutResult = Unmanaged.GetOrPutResult;
+ /// Deprecated. Iterate using `items`.
pub const Iterator = struct {
hm: *const Self,
- // how many items have we returned
- count: usize,
- // iterator through the entry array
+ /// Iterator through the entry array.
index: usize,
- // used to detect concurrent modification
- initial_modification_count: debug_u32,
- pub fn next(it: *Iterator) ?*KV {
- if (want_modification_safety) {
- assert(it.initial_modification_count == it.hm.modification_count); // concurrent modification
- }
- if (it.count >= it.hm.size) return null;
- while (it.index < it.hm.entries.len) : (it.index += 1) {
- const entry = &it.hm.entries[it.index];
- if (entry.used) {
- it.index += 1;
- it.count += 1;
- return &entry.kv;
- }
- }
- unreachable; // no next item
+ pub fn next(it: *Iterator) ?*Entry {
+ if (it.index >= it.hm.unmanaged.entries.items.len) return null;
+ const result = &it.hm.unmanaged.entries.items[it.index];
+ it.index += 1;
+ return result;
}
- // Reset the iterator to the initial index
+ /// Reset the iterator to the initial index
pub fn reset(it: *Iterator) void {
- it.count = 0;
it.index = 0;
- // Resetting the modification count too
- it.initial_modification_count = it.hm.modification_count;
}
};
+ const Self = @This();
+ const Index = Unmanaged.Index;
+
pub fn init(allocator: *Allocator) Self {
- return Self{
- .entries = &[_]Entry{},
+ return .{
+ .unmanaged = .{},
.allocator = allocator,
- .size = 0,
- .max_distance_from_start_index = 0,
- .modification_count = if (want_modification_safety) 0 else {},
};
}
- pub fn deinit(hm: Self) void {
- hm.allocator.free(hm.entries);
+ pub fn deinit(self: *Self) void {
+ self.unmanaged.deinit(self.allocator);
+ self.* = undefined;
}
- pub fn clear(hm: *Self) void {
- for (hm.entries) |*entry| {
- entry.used = false;
- }
- hm.size = 0;
- hm.max_distance_from_start_index = 0;
- hm.incrementModificationCount();
+ pub fn clearRetainingCapacity(self: *Self) void {
+ return self.unmanaged.clearRetainingCapacity();
}
+ pub fn clearAndFree(self: *Self) void {
+ return self.unmanaged.clearAndFree(self.allocator);
+ }
+
+ /// Deprecated. Use `items().len`.
pub fn count(self: Self) usize {
- return self.size;
+ return self.items().len;
+ }
+
+ /// Deprecated. Iterate using `items`.
+ pub fn iterator(self: *const Self) Iterator {
+ return Iterator{
+ .hm = self,
+ .index = 0,
+ };
}
/// If key exists this function cannot fail.
/// If there is an existing item with `key`, then the result
- /// kv pointer points to it, and found_existing is true.
+ /// `Entry` pointer points to it, and found_existing is true.
/// Otherwise, puts a new item with undefined value, and
- /// the kv pointer points to it. Caller should then initialize
- /// the data.
+ /// the `Entry` pointer points to it. Caller should then initialize
+ /// the value (but not the key).
pub fn getOrPut(self: *Self, key: K) !GetOrPutResult {
- // TODO this implementation can be improved - we should only
- // have to hash once and find the entry once.
- if (self.get(key)) |kv| {
- return GetOrPutResult{
- .kv = kv,
- .found_existing = true,
- };
- }
- self.incrementModificationCount();
- try self.autoCapacity();
- const put_result = self.internalPut(key);
- assert(put_result.old_kv == null);
- return GetOrPutResult{
- .kv = &put_result.new_entry.kv,
- .found_existing = false,
- };
+ return self.unmanaged.getOrPut(self.allocator, key);
}
- pub fn getOrPutValue(self: *Self, key: K, value: V) !*KV {
- const res = try self.getOrPut(key);
- if (!res.found_existing)
- res.kv.value = value;
+ /// If there is an existing item with `key`, then the result
+ /// `Entry` pointer points to it, and found_existing is true.
+ /// Otherwise, puts a new item with undefined value, and
+ /// the `Entry` pointer points to it. Caller should then initialize
+ /// the value (but not the key).
+ /// If a new entry needs to be stored, this function asserts there
+ /// is enough capacity to store it.
+ pub fn getOrPutAssumeCapacity(self: *Self, key: K) GetOrPutResult {
+ return self.unmanaged.getOrPutAssumeCapacity(key);
+ }
+
+ pub fn getOrPutValue(self: *Self, key: K, value: V) !*Entry {
+ return self.unmanaged.getOrPutValue(self.allocator, key, value);
+ }
+
+ /// Increases capacity, guaranteeing that insertions up until the
+ /// `expected_count` will not cause an allocation, and therefore cannot fail.
+ pub fn ensureCapacity(self: *Self, new_capacity: usize) !void {
+ return self.unmanaged.ensureCapacity(self.allocator, new_capacity);
+ }
+
+ /// Returns the number of total elements which may be present before it is
+ /// no longer guaranteed that no allocations will be performed.
+ pub fn capacity(self: *Self) usize {
+ return self.unmanaged.capacity();
+ }
+
+ /// Clobbers any existing data. To detect if a put would clobber
+ /// existing data, see `getOrPut`.
+ pub fn put(self: *Self, key: K, value: V) !void {
+ return self.unmanaged.put(self.allocator, key, value);
+ }
+
+ /// Inserts a key-value pair into the hash map, asserting that no previous
+ /// entry with the same key is already present
+ pub fn putNoClobber(self: *Self, key: K, value: V) !void {
+ return self.unmanaged.putNoClobber(self.allocator, key, value);
+ }
+
+ /// Asserts there is enough capacity to store the new key-value pair.
+ /// Clobbers any existing data. To detect if a put would clobber
+ /// existing data, see `getOrPutAssumeCapacity`.
+ pub fn putAssumeCapacity(self: *Self, key: K, value: V) void {
+ return self.unmanaged.putAssumeCapacity(key, value);
+ }
+
+ /// Asserts there is enough capacity to store the new key-value pair.
+ /// Asserts that it does not clobber any existing data.
+ /// To detect if a put would clobber existing data, see `getOrPutAssumeCapacity`.
+ pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void {
+ return self.unmanaged.putAssumeCapacityNoClobber(key, value);
+ }
- return res.kv;
+ /// Inserts a new `Entry` into the hash map, returning the previous one, if any.
+ pub fn fetchPut(self: *Self, key: K, value: V) !?Entry {
+ return self.unmanaged.fetchPut(self.allocator, key, value);
}
- fn optimizedCapacity(expected_count: usize) usize {
- // ensure that the hash map will be at most 60% full if
- // expected_count items are put into it
- var optimized_capacity = expected_count * 5 / 3;
- // an overflow here would mean the amount of memory required would not
- // be representable in the address space
- return math.ceilPowerOfTwo(usize, optimized_capacity) catch unreachable;
+ /// Inserts a new `Entry` into the hash map, returning the previous one, if any.
+ /// If insertion happuns, asserts there is enough capacity without allocating.
+ pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?Entry {
+ return self.unmanaged.fetchPutAssumeCapacity(key, value);
}
- /// Increases capacity so that the hash map will be at most
- /// 60% full when expected_count items are put into it
- pub fn ensureCapacity(self: *Self, expected_count: usize) !void {
- if (expected_count == 0) return;
- const optimized_capacity = optimizedCapacity(expected_count);
- return self.ensureCapacityExact(optimized_capacity);
+ pub fn getEntry(self: Self, key: K) ?*Entry {
+ return self.unmanaged.getEntry(key);
}
- /// Sets the capacity to the new capacity if the new
- /// capacity is greater than the current capacity.
- /// New capacity must be a power of two.
- fn ensureCapacityExact(self: *Self, new_capacity: usize) !void {
- // capacity must always be a power of two to allow for modulo
- // optimization in the constrainIndex fn
- assert(math.isPowerOfTwo(new_capacity));
+ pub fn get(self: Self, key: K) ?V {
+ return self.unmanaged.get(key);
+ }
+
+ pub fn contains(self: Self, key: K) bool {
+ return self.unmanaged.contains(key);
+ }
+
+ /// If there is an `Entry` with a matching key, it is deleted from
+ /// the hash map, and then returned from this function.
+ pub fn remove(self: *Self, key: K) ?Entry {
+ return self.unmanaged.remove(key);
+ }
+
+ /// Asserts there is an `Entry` with matching key, deletes it from the hash map,
+ /// and discards it.
+ pub fn removeAssertDiscard(self: *Self, key: K) void {
+ return self.unmanaged.removeAssertDiscard(key);
+ }
+
+ pub fn items(self: Self) []Entry {
+ return self.unmanaged.items();
+ }
+
+ pub fn clone(self: Self) !Self {
+ var other = try self.unmanaged.clone(self.allocator);
+ return other.promote(self.allocator);
+ }
+ };
+}
+
+/// General purpose hash table.
+/// Insertion order is preserved.
+/// Deletions perform a "swap removal" on the entries list.
+/// Modifying the hash map while iterating is allowed, however one must understand
+/// the (well defined) behavior when mixing insertions and deletions with iteration.
+/// This type does not store an Allocator field - the Allocator must be passed in
+/// with each function call that requires it. See `HashMap` for a type that stores
+/// an Allocator field for convenience.
+/// Can be initialized directly using the default field values.
+/// This type is designed to have low overhead for small numbers of entries. When
+/// `store_hash` is `false` and the number of entries in the map is less than 9,
+/// the overhead cost of using `HashMapUnmanaged` rather than `std.ArrayList` is
+/// only a single pointer-sized integer.
+/// When `store_hash` is `false`, this data structure is biased towards cheap `eql`
+/// functions. It does not store each item's hash in the table. Setting `store_hash`
+/// to `true` incurs slightly more memory cost by storing each key's hash in the table
+/// but guarantees only one call to `eql` per insertion/deletion.
+pub fn HashMapUnmanaged(
+ comptime K: type,
+ comptime V: type,
+ comptime hash: fn (key: K) u32,
+ comptime eql: fn (a: K, b: K) bool,
+ comptime store_hash: bool,
+) type {
+ return struct {
+ /// It is permitted to access this field directly.
+ entries: std.ArrayListUnmanaged(Entry) = .{},
+
+ /// When entries length is less than `linear_scan_max`, this remains `null`.
+ /// Once entries length grows big enough, this field is allocated. There is
+ /// an IndexHeader followed by an array of Index(I) structs, where I is defined
+ /// by how many total indexes there are.
+ index_header: ?*IndexHeader = null,
+
+ /// Modifying the key is illegal behavior.
+ /// Modifying the value is allowed.
+ /// Entry pointers become invalid whenever this HashMap is modified,
+ /// unless `ensureCapacity` was previously used.
+ pub const Entry = struct {
+ /// This field is `void` if `store_hash` is `false`.
+ hash: Hash,
+ key: K,
+ value: V,
+ };
+
+ pub const Hash = if (store_hash) u32 else void;
+
+ pub const GetOrPutResult = struct {
+ entry: *Entry,
+ found_existing: bool,
+ };
+
+ pub const Managed = HashMap(K, V, hash, eql, store_hash);
+
+ const Self = @This();
+
+ const linear_scan_max = 8;
- if (new_capacity <= self.entries.len) {
- return;
+ pub fn promote(self: Self, allocator: *Allocator) Managed {
+ return .{
+ .unmanaged = self,
+ .allocator = allocator,
+ };
+ }
+
+ pub fn deinit(self: *Self, allocator: *Allocator) void {
+ self.entries.deinit(allocator);
+ if (self.index_header) |header| {
+ header.free(allocator);
+ }
+ self.* = undefined;
+ }
+
+ pub fn clearRetainingCapacity(self: *Self) void {
+ self.entries.items.len = 0;
+ if (self.index_header) |header| {
+ header.max_distance_from_start_index = 0;
+ switch (header.capacityIndexType()) {
+ .u8 => mem.set(Index(u8), header.indexes(u8), Index(u8).empty),
+ .u16 => mem.set(Index(u16), header.indexes(u16), Index(u16).empty),
+ .u32 => mem.set(Index(u32), header.indexes(u32), Index(u32).empty),
+ .usize => mem.set(Index(usize), header.indexes(usize), Index(usize).empty),
+ }
+ }
+ }
+
+ pub fn clearAndFree(self: *Self, allocator: *Allocator) void {
+ self.entries.shrink(allocator, 0);
+ if (self.index_header) |header| {
+ header.free(allocator);
+ self.index_header = null;
}
+ }
+
+ /// If key exists this function cannot fail.
+ /// If there is an existing item with `key`, then the result
+ /// `Entry` pointer points to it, and found_existing is true.
+ /// Otherwise, puts a new item with undefined value, and
+ /// the `Entry` pointer points to it. Caller should then initialize
+ /// the value (but not the key).
+ pub fn getOrPut(self: *Self, allocator: *Allocator, key: K) !GetOrPutResult {
+ self.ensureCapacity(allocator, self.entries.items.len + 1) catch |err| {
+ // "If key exists this function cannot fail."
+ return GetOrPutResult{
+ .entry = self.getEntry(key) orelse return err,
+ .found_existing = true,
+ };
+ };
+ return self.getOrPutAssumeCapacity(key);
+ }
- const old_entries = self.entries;
- try self.initCapacity(new_capacity);
- self.incrementModificationCount();
- if (old_entries.len > 0) {
- // dump all of the old elements into the new table
- for (old_entries) |*old_entry| {
- if (old_entry.used) {
- self.internalPut(old_entry.kv.key).new_entry.kv.value = old_entry.kv.value;
+ /// If there is an existing item with `key`, then the result
+ /// `Entry` pointer points to it, and found_existing is true.
+ /// Otherwise, puts a new item with undefined value, and
+ /// the `Entry` pointer points to it. Caller should then initialize
+ /// the value (but not the key).
+ /// If a new entry needs to be stored, this function asserts there
+ /// is enough capacity to store it.
+ pub fn getOrPutAssumeCapacity(self: *Self, key: K) GetOrPutResult {
+ const header = self.index_header orelse {
+ // Linear scan.
+ const h = if (store_hash) hash(key) else {};
+ for (self.entries.items) |*item| {
+ if (item.hash == h and eql(key, item.key)) {
+ return GetOrPutResult{
+ .entry = item,
+ .found_existing = true,
+ };
}
}
- self.allocator.free(old_entries);
+ const new_entry = self.entries.addOneAssumeCapacity();
+ new_entry.* = .{
+ .hash = if (store_hash) h else {},
+ .key = key,
+ .value = undefined,
+ };
+ return GetOrPutResult{
+ .entry = new_entry,
+ .found_existing = false,
+ };
+ };
+
+ switch (header.capacityIndexType()) {
+ .u8 => return self.getOrPutInternal(key, header, u8),
+ .u16 => return self.getOrPutInternal(key, header, u16),
+ .u32 => return self.getOrPutInternal(key, header, u32),
+ .usize => return self.getOrPutInternal(key, header, usize),
+ }
+ }
+
+ pub fn getOrPutValue(self: *Self, allocator: *Allocator, key: K, value: V) !*Entry {
+ const res = try self.getOrPut(allocator, key);
+ if (!res.found_existing)
+ res.entry.value = value;
+
+ return res.entry;
+ }
+
+ /// Increases capacity, guaranteeing that insertions up until the
+ /// `expected_count` will not cause an allocation, and therefore cannot fail.
+ pub fn ensureCapacity(self: *Self, allocator: *Allocator, new_capacity: usize) !void {
+ try self.entries.ensureCapacity(allocator, new_capacity);
+ if (new_capacity <= linear_scan_max) return;
+
+ // Ensure that the indexes will be at most 60% full if
+ // `new_capacity` items are put into it.
+ const needed_len = new_capacity * 5 / 3;
+ if (self.index_header) |header| {
+ if (needed_len > header.indexes_len) {
+ // An overflow here would mean the amount of memory required would not
+ // be representable in the address space.
+ const new_indexes_len = math.ceilPowerOfTwo(usize, needed_len) catch unreachable;
+ const new_header = try IndexHeader.alloc(allocator, new_indexes_len);
+ self.insertAllEntriesIntoNewHeader(new_header);
+ header.free(allocator);
+ self.index_header = new_header;
+ }
+ } else {
+ // An overflow here would mean the amount of memory required would not
+ // be representable in the address space.
+ const new_indexes_len = math.ceilPowerOfTwo(usize, needed_len) catch unreachable;
+ const header = try IndexHeader.alloc(allocator, new_indexes_len);
+ self.insertAllEntriesIntoNewHeader(header);
+ self.index_header = header;
}
}
- /// Returns the kv pair that was already there.
- pub fn put(self: *Self, key: K, value: V) !?KV {
- try self.autoCapacity();
- return putAssumeCapacity(self, key, value);
+ /// Returns the number of total elements which may be present before it is
+ /// no longer guaranteed that no allocations will be performed.
+ pub fn capacity(self: Self) usize {
+ const entry_cap = self.entries.capacity;
+ const header = self.index_header orelse return math.min(linear_scan_max, entry_cap);
+ const indexes_cap = (header.indexes_len + 1) * 3 / 4;
+ return math.min(entry_cap, indexes_cap);
}
- /// Calls put() and asserts that no kv pair is clobbered.
- pub fn putNoClobber(self: *Self, key: K, value: V) !void {
- assert((try self.put(key, value)) == null);
+ /// Clobbers any existing data. To detect if a put would clobber
+ /// existing data, see `getOrPut`.
+ pub fn put(self: *Self, allocator: *Allocator, key: K, value: V) !void {
+ const result = try self.getOrPut(allocator, key);
+ result.entry.value = value;
}
- pub fn putAssumeCapacity(self: *Self, key: K, value: V) ?KV {
- assert(self.count() < self.entries.len);
- self.incrementModificationCount();
+ /// Inserts a key-value pair into the hash map, asserting that no previous
+ /// entry with the same key is already present
+ pub fn putNoClobber(self: *Self, allocator: *Allocator, key: K, value: V) !void {
+ const result = try self.getOrPut(allocator, key);
+ assert(!result.found_existing);
+ result.entry.value = value;
+ }
- const put_result = self.internalPut(key);
- put_result.new_entry.kv.value = value;
- return put_result.old_kv;
+ /// Asserts there is enough capacity to store the new key-value pair.
+ /// Clobbers any existing data. To detect if a put would clobber
+ /// existing data, see `getOrPutAssumeCapacity`.
+ pub fn putAssumeCapacity(self: *Self, key: K, value: V) void {
+ const result = self.getOrPutAssumeCapacity(key);
+ result.entry.value = value;
}
+ /// Asserts there is enough capacity to store the new key-value pair.
+ /// Asserts that it does not clobber any existing data.
+ /// To detect if a put would clobber existing data, see `getOrPutAssumeCapacity`.
pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void {
- assert(self.putAssumeCapacity(key, value) == null);
+ const result = self.getOrPutAssumeCapacity(key);
+ assert(!result.found_existing);
+ result.entry.value = value;
+ }
+
+ /// Inserts a new `Entry` into the hash map, returning the previous one, if any.
+ pub fn fetchPut(self: *Self, allocator: *Allocator, key: K, value: V) !?Entry {
+ const gop = try self.getOrPut(allocator, key);
+ var result: ?Entry = null;
+ if (gop.found_existing) {
+ result = gop.entry.*;
+ }
+ gop.entry.value = value;
+ return result;
}
- pub fn get(hm: *const Self, key: K) ?*KV {
- if (hm.entries.len == 0) {
+ /// Inserts a new `Entry` into the hash map, returning the previous one, if any.
+ /// If insertion happens, asserts there is enough capacity without allocating.
+ pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?Entry {
+ const gop = self.getOrPutAssumeCapacity(key);
+ var result: ?Entry = null;
+ if (gop.found_existing) {
+ result = gop.entry.*;
+ }
+ gop.entry.value = value;
+ return result;
+ }
+
+ pub fn getEntry(self: Self, key: K) ?*Entry {
+ const header = self.index_header orelse {
+ // Linear scan.
+ const h = if (store_hash) hash(key) else {};
+ for (self.entries.items) |*item| {
+ if (item.hash == h and eql(key, item.key)) {
+ return item;
+ }
+ }
return null;
+ };
+
+ switch (header.capacityIndexType()) {
+ .u8 => return self.getInternal(key, header, u8),
+ .u16 => return self.getInternal(key, header, u16),
+ .u32 => return self.getInternal(key, header, u32),
+ .usize => return self.getInternal(key, header, usize),
}
- return hm.internalGet(key);
}
- pub fn getValue(hm: *const Self, key: K) ?V {
- return if (hm.get(key)) |kv| kv.value else null;
+ pub fn get(self: Self, key: K) ?V {
+ return if (self.getEntry(key)) |entry| entry.value else null;
}
- pub fn contains(hm: *const Self, key: K) bool {
- return hm.get(key) != null;
+ pub fn contains(self: Self, key: K) bool {
+ return self.getEntry(key) != null;
}
- /// Returns any kv pair that was removed.
- pub fn remove(hm: *Self, key: K) ?KV {
- if (hm.entries.len == 0) return null;
- hm.incrementModificationCount();
- const start_index = hm.keyToIndex(key);
- {
- var roll_over: usize = 0;
- while (roll_over <= hm.max_distance_from_start_index) : (roll_over += 1) {
- const index = hm.constrainIndex(start_index + roll_over);
- var entry = &hm.entries[index];
-
- if (!entry.used) return null;
-
- if (!eql(entry.kv.key, key)) continue;
-
- const removed_kv = entry.kv;
- while (roll_over < hm.entries.len) : (roll_over += 1) {
- const next_index = hm.constrainIndex(start_index + roll_over + 1);
- const next_entry = &hm.entries[next_index];
- if (!next_entry.used or next_entry.distance_from_start_index == 0) {
- entry.used = false;
- hm.size -= 1;
- return removed_kv;
- }
- entry.* = next_entry.*;
- entry.distance_from_start_index -= 1;
- entry = next_entry;
+ /// If there is an `Entry` with a matching key, it is deleted from
+ /// the hash map, and then returned from this function.
+ pub fn remove(self: *Self, key: K) ?Entry {
+ const header = self.index_header orelse {
+ // Linear scan.
+ const h = if (store_hash) hash(key) else {};
+ for (self.entries.items) |item, i| {
+ if (item.hash == h and eql(key, item.key)) {
+ return self.entries.swapRemove(i);
}
- unreachable; // shifting everything in the table
}
+ return null;
+ };
+ switch (header.capacityIndexType()) {
+ .u8 => return self.removeInternal(key, header, u8),
+ .u16 => return self.removeInternal(key, header, u16),
+ .u32 => return self.removeInternal(key, header, u32),
+ .usize => return self.removeInternal(key, header, usize),
}
- return null;
}
- /// Calls remove(), asserts that a kv pair is removed, and discards it.
- pub fn removeAssertDiscard(hm: *Self, key: K) void {
- assert(hm.remove(key) != null);
+ /// Asserts there is an `Entry` with matching key, deletes it from the hash map,
+ /// and discards it.
+ pub fn removeAssertDiscard(self: *Self, key: K) void {
+ assert(self.remove(key) != null);
}
- pub fn iterator(hm: *const Self) Iterator {
- return Iterator{
- .hm = hm,
- .count = 0,
- .index = 0,
- .initial_modification_count = hm.modification_count,
- };
+ pub fn items(self: Self) []Entry {
+ return self.entries.items;
}
- pub fn clone(self: Self) !Self {
- var other = Self.init(self.allocator);
- try other.initCapacity(self.entries.len);
- var it = self.iterator();
- while (it.next()) |entry| {
- try other.putNoClobber(entry.key, entry.value);
+ pub fn clone(self: Self, allocator: *Allocator) !Self {
+ var other: Self = .{};
+ try other.entries.appendSlice(allocator, self.entries.items);
+
+ if (self.index_header) |header| {
+ const new_header = try IndexHeader.alloc(allocator, header.indexes_len);
+ other.insertAllEntriesIntoNewHeader(new_header);
+ other.index_header = new_header;
}
return other;
}
- fn autoCapacity(self: *Self) !void {
- if (self.entries.len == 0) {
- return self.ensureCapacityExact(16);
- }
- // if we get too full (60%), double the capacity
- if (self.size * 5 >= self.entries.len * 3) {
- return self.ensureCapacityExact(self.entries.len * 2);
- }
- }
+ fn removeInternal(self: *Self, key: K, header: *IndexHeader, comptime I: type) ?Entry {
+ const indexes = header.indexes(I);
+ const h = hash(key);
+ const start_index = header.constrainIndex(h);
+ var roll_over: usize = 0;
+ while (roll_over <= header.max_distance_from_start_index) : (roll_over += 1) {
+ const index_index = header.constrainIndex(start_index + roll_over);
+ var index = &indexes[index_index];
+ if (index.isEmpty())
+ return null;
- fn initCapacity(hm: *Self, capacity: usize) !void {
- hm.entries = try hm.allocator.alloc(Entry, capacity);
- hm.size = 0;
- hm.max_distance_from_start_index = 0;
- for (hm.entries) |*entry| {
- entry.used = false;
+ const entry = &self.entries.items[index.entry_index];
+
+ const hash_match = if (store_hash) h == entry.hash else true;
+ if (!hash_match or !eql(key, entry.key))
+ continue;
+
+ const removed_entry = self.entries.swapRemove(index.entry_index);
+ if (self.entries.items.len > 0 and self.entries.items.len != index.entry_index) {
+ // Because of the swap remove, now we need to update the index that was
+ // pointing to the last entry and is now pointing to this removed item slot.
+ self.updateEntryIndex(header, self.entries.items.len, index.entry_index, I, indexes);
+ }
+
+ // Now we have to shift over the following indexes.
+ roll_over += 1;
+ while (roll_over < header.indexes_len) : (roll_over += 1) {
+ const next_index_index = header.constrainIndex(start_index + roll_over);
+ const next_index = &indexes[next_index_index];
+ if (next_index.isEmpty() or next_index.distance_from_start_index == 0) {
+ index.setEmpty();
+ return removed_entry;
+ }
+ index.* = next_index.*;
+ index.distance_from_start_index -= 1;
+ index = next_index;
+ }
+ unreachable;
}
+ return null;
}
- fn incrementModificationCount(hm: *Self) void {
- if (want_modification_safety) {
- hm.modification_count +%= 1;
+ fn updateEntryIndex(
+ self: *Self,
+ header: *IndexHeader,
+ old_entry_index: usize,
+ new_entry_index: usize,
+ comptime I: type,
+ indexes: []Index(I),
+ ) void {
+ const h = if (store_hash) self.entries.items[new_entry_index].hash else hash(self.entries.items[new_entry_index].key);
+ const start_index = header.constrainIndex(h);
+ var roll_over: usize = 0;
+ while (roll_over <= header.max_distance_from_start_index) : (roll_over += 1) {
+ const index_index = header.constrainIndex(start_index + roll_over);
+ const index = &indexes[index_index];
+ if (index.entry_index == old_entry_index) {
+ index.entry_index = @intCast(I, new_entry_index);
+ return;
+ }
}
+ unreachable;
}
- const InternalPutResult = struct {
- new_entry: *Entry,
- old_kv: ?KV,
- };
-
- /// Returns a pointer to the new entry.
- /// Asserts that there is enough space for the new item.
- fn internalPut(self: *Self, orig_key: K) InternalPutResult {
- var key = orig_key;
- var value: V = undefined;
- const start_index = self.keyToIndex(key);
+ /// Must ensureCapacity before calling this.
+ fn getOrPutInternal(self: *Self, key: K, header: *IndexHeader, comptime I: type) GetOrPutResult {
+ const indexes = header.indexes(I);
+ const h = hash(key);
+ const start_index = header.constrainIndex(h);
var roll_over: usize = 0;
var distance_from_start_index: usize = 0;
- var got_result_entry = false;
- var result = InternalPutResult{
- .new_entry = undefined,
- .old_kv = null,
- };
- while (roll_over < self.entries.len) : ({
+ while (roll_over <= header.indexes_len) : ({
roll_over += 1;
distance_from_start_index += 1;
}) {
- const index = self.constrainIndex(start_index + roll_over);
- const entry = &self.entries[index];
-
- if (entry.used and !eql(entry.kv.key, key)) {
- if (entry.distance_from_start_index < distance_from_start_index) {
- // robin hood to the rescue
- const tmp = entry.*;
- self.max_distance_from_start_index = math.max(self.max_distance_from_start_index, distance_from_start_index);
- if (!got_result_entry) {
- got_result_entry = true;
- result.new_entry = entry;
+ const index_index = header.constrainIndex(start_index + roll_over);
+ const index = indexes[index_index];
+ if (index.isEmpty()) {
+ indexes[index_index] = .{
+ .distance_from_start_index = @intCast(I, distance_from_start_index),
+ .entry_index = @intCast(I, self.entries.items.len),
+ };
+ header.maybeBumpMax(distance_from_start_index);
+ const new_entry = self.entries.addOneAssumeCapacity();
+ new_entry.* = .{
+ .hash = if (store_hash) h else {},
+ .key = key,
+ .value = undefined,
+ };
+ return .{
+ .found_existing = false,
+ .entry = new_entry,
+ };
+ }
+
+ // This pointer survives the following append because we call
+ // entries.ensureCapacity before getOrPutInternal.
+ const entry = &self.entries.items[index.entry_index];
+ const hash_match = if (store_hash) h == entry.hash else true;
+ if (hash_match and eql(key, entry.key)) {
+ return .{
+ .found_existing = true,
+ .entry = entry,
+ };
+ }
+ if (index.distance_from_start_index < distance_from_start_index) {
+ // In this case, we did not find the item. We will put a new entry.
+ // However, we will use this index for the new entry, and move
+ // the previous index down the line, to keep the max_distance_from_start_index
+ // as small as possible.
+ indexes[index_index] = .{
+ .distance_from_start_index = @intCast(I, distance_from_start_index),
+ .entry_index = @intCast(I, self.entries.items.len),
+ };
+ header.maybeBumpMax(distance_from_start_index);
+ const new_entry = self.entries.addOneAssumeCapacity();
+ new_entry.* = .{
+ .hash = if (store_hash) h else {},
+ .key = key,
+ .value = undefined,
+ };
+
+ distance_from_start_index = index.distance_from_start_index;
+ var prev_entry_index = index.entry_index;
+
+ // Find somewhere to put the index we replaced by shifting
+ // following indexes backwards.
+ roll_over += 1;
+ distance_from_start_index += 1;
+ while (roll_over < header.indexes_len) : ({
+ roll_over += 1;
+ distance_from_start_index += 1;
+ }) {
+ const next_index_index = header.constrainIndex(start_index + roll_over);
+ const next_index = indexes[next_index_index];
+ if (next_index.isEmpty()) {
+ header.maybeBumpMax(distance_from_start_index);
+ indexes[next_index_index] = .{
+ .entry_index = prev_entry_index,
+ .distance_from_start_index = @intCast(I, distance_from_start_index),
+ };
+ return .{
+ .found_existing = false,
+ .entry = new_entry,
+ };
+ }
+ if (next_index.distance_from_start_index < distance_from_start_index) {
+ header.maybeBumpMax(distance_from_start_index);
+ indexes[next_index_index] = .{
+ .entry_index = prev_entry_index,
+ .distance_from_start_index = @intCast(I, distance_from_start_index),
+ };
+ distance_from_start_index = next_index.distance_from_start_index;
+ prev_entry_index = next_index.entry_index;
}
- entry.* = Entry{
- .used = true,
- .distance_from_start_index = distance_from_start_index,
- .kv = KV{
- .key = key,
- .value = value,
- },
- };
- key = tmp.kv.key;
- value = tmp.kv.value;
- distance_from_start_index = tmp.distance_from_start_index;
}
- continue;
+ unreachable;
}
+ }
+ unreachable;
+ }
- if (entry.used) {
- result.old_kv = entry.kv;
- } else {
- // adding an entry. otherwise overwriting old value with
- // same key
- self.size += 1;
- }
+ fn getInternal(self: Self, key: K, header: *IndexHeader, comptime I: type) ?*Entry {
+ const indexes = header.indexes(I);
+ const h = hash(key);
+ const start_index = header.constrainIndex(h);
+ var roll_over: usize = 0;
+ while (roll_over <= header.max_distance_from_start_index) : (roll_over += 1) {
+ const index_index = header.constrainIndex(start_index + roll_over);
+ const index = indexes[index_index];
+ if (index.isEmpty())
+ return null;
+
+ const entry = &self.entries.items[index.entry_index];
+ const hash_match = if (store_hash) h == entry.hash else true;
+ if (hash_match and eql(key, entry.key))
+ return entry;
+ }
+ return null;
+ }
- self.max_distance_from_start_index = math.max(distance_from_start_index, self.max_distance_from_start_index);
- if (!got_result_entry) {
- result.new_entry = entry;
- }
- entry.* = Entry{
- .used = true,
- .distance_from_start_index = distance_from_start_index,
- .kv = KV{
- .key = key,
- .value = value,
- },
- };
- return result;
+ fn insertAllEntriesIntoNewHeader(self: *Self, header: *IndexHeader) void {
+ switch (header.capacityIndexType()) {
+ .u8 => return self.insertAllEntriesIntoNewHeaderGeneric(header, u8),
+ .u16 => return self.insertAllEntriesIntoNewHeaderGeneric(header, u16),
+ .u32 => return self.insertAllEntriesIntoNewHeaderGeneric(header, u32),
+ .usize => return self.insertAllEntriesIntoNewHeaderGeneric(header, usize),
}
- unreachable; // put into a full map
}
- fn internalGet(hm: Self, key: K) ?*KV {
- const start_index = hm.keyToIndex(key);
- {
+ fn insertAllEntriesIntoNewHeaderGeneric(self: *Self, header: *IndexHeader, comptime I: type) void {
+ const indexes = header.indexes(I);
+ entry_loop: for (self.entries.items) |entry, i| {
+ const h = if (store_hash) entry.hash else hash(entry.key);
+ const start_index = header.constrainIndex(h);
+ var entry_index = i;
var roll_over: usize = 0;
- while (roll_over <= hm.max_distance_from_start_index) : (roll_over += 1) {
- const index = hm.constrainIndex(start_index + roll_over);
- const entry = &hm.entries[index];
-
- if (!entry.used) return null;
- if (eql(entry.kv.key, key)) return &entry.kv;
+ var distance_from_start_index: usize = 0;
+ while (roll_over < header.indexes_len) : ({
+ roll_over += 1;
+ distance_from_start_index += 1;
+ }) {
+ const index_index = header.constrainIndex(start_index + roll_over);
+ const next_index = indexes[index_index];
+ if (next_index.isEmpty()) {
+ header.maybeBumpMax(distance_from_start_index);
+ indexes[index_index] = .{
+ .distance_from_start_index = @intCast(I, distance_from_start_index),
+ .entry_index = @intCast(I, entry_index),
+ };
+ continue :entry_loop;
+ }
+ if (next_index.distance_from_start_index < distance_from_start_index) {
+ header.maybeBumpMax(distance_from_start_index);
+ indexes[index_index] = .{
+ .distance_from_start_index = @intCast(I, distance_from_start_index),
+ .entry_index = @intCast(I, entry_index),
+ };
+ distance_from_start_index = next_index.distance_from_start_index;
+ entry_index = next_index.entry_index;
+ }
}
+ unreachable;
}
- return null;
}
+ };
+}
+
+const CapacityIndexType = enum { u8, u16, u32, usize };
+
+fn capacityIndexType(indexes_len: usize) CapacityIndexType {
+ if (indexes_len < math.maxInt(u8))
+ return .u8;
+ if (indexes_len < math.maxInt(u16))
+ return .u16;
+ if (indexes_len < math.maxInt(u32))
+ return .u32;
+ return .usize;
+}
+
+fn capacityIndexSize(indexes_len: usize) usize {
+ switch (capacityIndexType(indexes_len)) {
+ .u8 => return @sizeOf(Index(u8)),
+ .u16 => return @sizeOf(Index(u16)),
+ .u32 => return @sizeOf(Index(u32)),
+ .usize => return @sizeOf(Index(usize)),
+ }
+}
+
+fn Index(comptime I: type) type {
+ return extern struct {
+ entry_index: I,
+ distance_from_start_index: I,
+
+ const Self = @This();
+
+ const empty = Self{
+ .entry_index = math.maxInt(I),
+ .distance_from_start_index = undefined,
+ };
- fn keyToIndex(hm: Self, key: K) usize {
- return hm.constrainIndex(@as(usize, hash(key)));
+ fn isEmpty(idx: Self) bool {
+ return idx.entry_index == math.maxInt(I);
}
- fn constrainIndex(hm: Self, i: usize) usize {
- // this is an optimization for modulo of power of two integers;
- // it requires hm.entries.len to always be a power of two
- return i & (hm.entries.len - 1);
+ fn setEmpty(idx: *Self) void {
+ idx.entry_index = math.maxInt(I);
}
};
}
+/// This struct is trailed by an array of `Index(I)`, where `I`
+/// and the array length are determined by `indexes_len`.
+const IndexHeader = struct {
+ max_distance_from_start_index: usize,
+ indexes_len: usize,
+
+ fn constrainIndex(header: IndexHeader, i: usize) usize {
+ // This is an optimization for modulo of power of two integers;
+ // it requires `indexes_len` to always be a power of two.
+ return i & (header.indexes_len - 1);
+ }
+
+ fn indexes(header: *IndexHeader, comptime I: type) []Index(I) {
+ const start = @ptrCast([*]Index(I), @ptrCast([*]u8, header) + @sizeOf(IndexHeader));
+ return start[0..header.indexes_len];
+ }
+
+ fn capacityIndexType(header: IndexHeader) CapacityIndexType {
+ return hash_map.capacityIndexType(header.indexes_len);
+ }
+
+ fn maybeBumpMax(header: *IndexHeader, distance_from_start_index: usize) void {
+ if (distance_from_start_index > header.max_distance_from_start_index) {
+ header.max_distance_from_start_index = distance_from_start_index;
+ }
+ }
+
+ fn alloc(allocator: *Allocator, len: usize) !*IndexHeader {
+ const index_size = hash_map.capacityIndexSize(len);
+ const nbytes = @sizeOf(IndexHeader) + index_size * len;
+ const bytes = try allocator.allocAdvanced(u8, @alignOf(IndexHeader), nbytes, .exact);
+ @memset(bytes.ptr + @sizeOf(IndexHeader), 0xff, bytes.len - @sizeOf(IndexHeader));
+ const result = @ptrCast(*IndexHeader, bytes.ptr);
+ result.* = .{
+ .max_distance_from_start_index = 0,
+ .indexes_len = len,
+ };
+ return result;
+ }
+
+ fn free(header: *IndexHeader, allocator: *Allocator) void {
+ const index_size = hash_map.capacityIndexSize(header.indexes_len);
+ const ptr = @ptrCast([*]u8, header);
+ const slice = ptr[0 .. @sizeOf(IndexHeader) + header.indexes_len * index_size];
+ allocator.free(slice);
+ }
+};
+
test "basic hash map usage" {
var map = AutoHashMap(i32, i32).init(std.testing.allocator);
defer map.deinit();
- testing.expect((try map.put(1, 11)) == null);
- testing.expect((try map.put(2, 22)) == null);
- testing.expect((try map.put(3, 33)) == null);
- testing.expect((try map.put(4, 44)) == null);
+ testing.expect((try map.fetchPut(1, 11)) == null);
+ testing.expect((try map.fetchPut(2, 22)) == null);
+ testing.expect((try map.fetchPut(3, 33)) == null);
+ testing.expect((try map.fetchPut(4, 44)) == null);
try map.putNoClobber(5, 55);
- testing.expect((try map.put(5, 66)).?.value == 55);
- testing.expect((try map.put(5, 55)).?.value == 66);
+ testing.expect((try map.fetchPut(5, 66)).?.value == 55);
+ testing.expect((try map.fetchPut(5, 55)).?.value == 66);
const gop1 = try map.getOrPut(5);
testing.expect(gop1.found_existing == true);
- testing.expect(gop1.kv.value == 55);
- gop1.kv.value = 77;
- testing.expect(map.get(5).?.value == 77);
+ testing.expect(gop1.entry.value == 55);
+ gop1.entry.value = 77;
+ testing.expect(map.getEntry(5).?.value == 77);
const gop2 = try map.getOrPut(99);
testing.expect(gop2.found_existing == false);
- gop2.kv.value = 42;
- testing.expect(map.get(99).?.value == 42);
+ gop2.entry.value = 42;
+ testing.expect(map.getEntry(99).?.value == 42);
const gop3 = try map.getOrPutValue(5, 5);
testing.expect(gop3.value == 77);
@@ -454,15 +898,15 @@ test "basic hash map usage" {
testing.expect(gop4.value == 41);
testing.expect(map.contains(2));
- testing.expect(map.get(2).?.value == 22);
- testing.expect(map.getValue(2).? == 22);
+ testing.expect(map.getEntry(2).?.value == 22);
+ testing.expect(map.get(2).? == 22);
const rmv1 = map.remove(2);
testing.expect(rmv1.?.key == 2);
testing.expect(rmv1.?.value == 22);
testing.expect(map.remove(2) == null);
+ testing.expect(map.getEntry(2) == null);
testing.expect(map.get(2) == null);
- testing.expect(map.getValue(2) == null);
map.removeAssertDiscard(3);
}
@@ -498,8 +942,8 @@ test "iterator hash map" {
it.reset();
var count: usize = 0;
- while (it.next()) |kv| : (count += 1) {
- buffer[@intCast(usize, kv.key)] = kv.value;
+ while (it.next()) |entry| : (count += 1) {
+ buffer[@intCast(usize, entry.key)] = entry.value;
}
testing.expect(count == 3);
testing.expect(it.next() == null);
@@ -510,8 +954,8 @@ test "iterator hash map" {
it.reset();
count = 0;
- while (it.next()) |kv| {
- buffer[@intCast(usize, kv.key)] = kv.value;
+ while (it.next()) |entry| {
+ buffer[@intCast(usize, entry.key)] = entry.value;
count += 1;
if (count >= 2) break;
}
@@ -531,14 +975,33 @@ test "ensure capacity" {
defer map.deinit();
try map.ensureCapacity(20);
- const initialCapacity = map.entries.len;
- testing.expect(initialCapacity >= 20);
+ const initial_capacity = map.capacity();
+ testing.expect(initial_capacity >= 20);
var i: i32 = 0;
while (i < 20) : (i += 1) {
- testing.expect(map.putAssumeCapacity(i, i + 10) == null);
+ testing.expect(map.fetchPutAssumeCapacity(i, i + 10) == null);
}
// shouldn't resize from putAssumeCapacity
- testing.expect(initialCapacity == map.entries.len);
+ testing.expect(initial_capacity == map.capacity());
+}
+
+test "clone" {
+ var original = AutoHashMap(i32, i32).init(std.testing.allocator);
+ defer original.deinit();
+
+ // put more than `linear_scan_max` so we can test that the index header is properly cloned
+ var i: u8 = 0;
+ while (i < 10) : (i += 1) {
+ try original.putNoClobber(i, i * 10);
+ }
+
+ var copy = try original.clone();
+ defer copy.deinit();
+
+ i = 0;
+ while (i < 10) : (i += 1) {
+ testing.expect(copy.get(i).? == i * 10);
+ }
}
pub fn getHashPtrAddrFn(comptime K: type) (fn (K) u32) {
@@ -575,6 +1038,24 @@ pub fn getAutoEqlFn(comptime K: type) (fn (K, K) bool) {
}.eql;
}
+pub fn autoEqlIsCheap(comptime K: type) bool {
+ return switch (@typeInfo(K)) {
+ .Bool,
+ .Int,
+ .Float,
+ .Pointer,
+ .ComptimeFloat,
+ .ComptimeInt,
+ .Enum,
+ .Fn,
+ .ErrorSet,
+ .AnyFrame,
+ .EnumLiteral,
+ => true,
+ else => false,
+ };
+}
+
pub fn getAutoHashStratFn(comptime K: type, comptime strategy: std.hash.Strategy) (fn (K) u32) {
return struct {
fn hash(key: K) u32 {
diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index f05378c215..a8ab729413 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -15,23 +15,59 @@ pub const ArenaAllocator = @import("heap/arena_allocator.zig").ArenaAllocator;
const Allocator = mem.Allocator;
+usingnamespace if (comptime @hasDecl(c, "malloc_size"))
+ struct {
+ pub const supports_malloc_size = true;
+ pub const malloc_size = c.malloc_size;
+ }
+else if (comptime @hasDecl(c, "malloc_usable_size"))
+ struct {
+ pub const supports_malloc_size = true;
+ pub const malloc_size = c.malloc_usable_size;
+ }
+else
+ struct {
+ pub const supports_malloc_size = false;
+ };
+
pub const c_allocator = &c_allocator_state;
var c_allocator_state = Allocator{
- .reallocFn = cRealloc,
- .shrinkFn = cShrink,
+ .allocFn = cAlloc,
+ .resizeFn = cResize,
};
-fn cRealloc(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
- assert(new_align <= @alignOf(c_longdouble));
- const old_ptr = if (old_mem.len == 0) null else @ptrCast(*c_void, old_mem.ptr);
- const buf = c.realloc(old_ptr, new_size) orelse return error.OutOfMemory;
- return @ptrCast([*]u8, buf)[0..new_size];
+fn cAlloc(self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Allocator.Error![]u8 {
+ assert(ptr_align <= @alignOf(c_longdouble));
+ const ptr = @ptrCast([*]u8, c.malloc(len) orelse return error.OutOfMemory);
+ if (len_align == 0) {
+ return ptr[0..len];
+ }
+ const full_len = init: {
+ if (supports_malloc_size) {
+ const s = malloc_size(ptr);
+ assert(s >= len);
+ break :init s;
+ }
+ break :init len;
+ };
+ return ptr[0..mem.alignBackwardAnyAlign(full_len, len_align)];
}
-fn cShrink(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
- const old_ptr = @ptrCast(*c_void, old_mem.ptr);
- const buf = c.realloc(old_ptr, new_size) orelse return old_mem[0..new_size];
- return @ptrCast([*]u8, buf)[0..new_size];
+fn cResize(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Allocator.Error!usize {
+ if (new_len == 0) {
+ c.free(buf.ptr);
+ return 0;
+ }
+ if (new_len <= buf.len) {
+ return mem.alignAllocLen(buf.len, new_len, len_align);
+ }
+ if (supports_malloc_size) {
+ const full_len = malloc_size(buf.ptr);
+ if (new_len <= full_len) {
+ return mem.alignAllocLen(full_len, new_len, len_align);
+ }
+ }
+ return error.OutOfMemory;
}
/// This allocator makes a syscall directly for every allocation and free.
@@ -44,19 +80,27 @@ else
&page_allocator_state;
var page_allocator_state = Allocator{
- .reallocFn = PageAllocator.realloc,
- .shrinkFn = PageAllocator.shrink,
+ .allocFn = PageAllocator.alloc,
+ .resizeFn = PageAllocator.resize,
};
var wasm_page_allocator_state = Allocator{
- .reallocFn = WasmPageAllocator.realloc,
- .shrinkFn = WasmPageAllocator.shrink,
+ .allocFn = WasmPageAllocator.alloc,
+ .resizeFn = WasmPageAllocator.resize,
};
pub const direct_allocator = @compileError("deprecated; use std.heap.page_allocator");
+/// Verifies that the adjusted length will still map to the full length
+pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize {
+ const aligned_len = mem.alignAllocLen(full_len, len, len_align);
+ assert(mem.alignForward(aligned_len, mem.page_size) == full_len);
+ return aligned_len;
+}
+
const PageAllocator = struct {
- fn alloc(allocator: *Allocator, n: usize, alignment: u29) error{OutOfMemory}![]u8 {
- if (n == 0) return &[0]u8{};
+ fn alloc(allocator: *Allocator, n: usize, alignment: u29, len_align: u29) error{OutOfMemory}![]u8 {
+ assert(n > 0);
+ const alignedLen = mem.alignForward(n, mem.page_size);
if (builtin.os.tag == .windows) {
const w = os.windows;
@@ -68,21 +112,21 @@ const PageAllocator = struct {
// see https://devblogs.microsoft.com/oldnewthing/?p=42223
const addr = w.VirtualAlloc(
null,
- n,
+ alignedLen,
w.MEM_COMMIT | w.MEM_RESERVE,
w.PAGE_READWRITE,
) catch return error.OutOfMemory;
// If the allocation is sufficiently aligned, use it.
if (@ptrToInt(addr) & (alignment - 1) == 0) {
- return @ptrCast([*]u8, addr)[0..n];
+ return @ptrCast([*]u8, addr)[0..alignPageAllocLen(alignedLen, n, len_align)];
}
// If it wasn't, actually do an explicitely aligned allocation.
w.VirtualFree(addr, 0, w.MEM_RELEASE);
- const alloc_size = n + alignment;
+ const alloc_size = n + alignment - mem.page_size;
- const final_addr = while (true) {
+ while (true) {
// Reserve a range of memory large enough to find a sufficiently
// aligned address.
const reserved_addr = w.VirtualAlloc(
@@ -102,48 +146,49 @@ const PageAllocator = struct {
// until it succeeds.
const ptr = w.VirtualAlloc(
@intToPtr(*c_void, aligned_addr),
- n,
+ alignedLen,
w.MEM_COMMIT | w.MEM_RESERVE,
w.PAGE_READWRITE,
) catch continue;
- return @ptrCast([*]u8, ptr)[0..n];
- };
-
- return @ptrCast([*]u8, final_addr)[0..n];
+ return @ptrCast([*]u8, ptr)[0..alignPageAllocLen(alignedLen, n, len_align)];
+ }
}
- const alloc_size = if (alignment <= mem.page_size) n else n + alignment;
+ const maxDropLen = alignment - std.math.min(alignment, mem.page_size);
+ const allocLen = if (maxDropLen <= alignedLen - n) alignedLen else mem.alignForward(alignedLen + maxDropLen, mem.page_size);
const slice = os.mmap(
null,
- mem.alignForward(alloc_size, mem.page_size),
+ allocLen,
os.PROT_READ | os.PROT_WRITE,
os.MAP_PRIVATE | os.MAP_ANONYMOUS,
-1,
0,
) catch return error.OutOfMemory;
- if (alloc_size == n) return slice[0..n];
+ assert(mem.isAligned(@ptrToInt(slice.ptr), mem.page_size));
const aligned_addr = mem.alignForward(@ptrToInt(slice.ptr), alignment);
// Unmap the extra bytes that were only requested in order to guarantee
// that the range of memory we were provided had a proper alignment in
// it somewhere. The extra bytes could be at the beginning, or end, or both.
- const unused_start_len = aligned_addr - @ptrToInt(slice.ptr);
- if (unused_start_len != 0) {
- os.munmap(slice[0..unused_start_len]);
+ const dropLen = aligned_addr - @ptrToInt(slice.ptr);
+ if (dropLen != 0) {
+ os.munmap(slice[0..dropLen]);
}
- const aligned_end_addr = mem.alignForward(aligned_addr + n, mem.page_size);
- const unused_end_len = @ptrToInt(slice.ptr) + slice.len - aligned_end_addr;
- if (unused_end_len != 0) {
- os.munmap(@intToPtr([*]align(mem.page_size) u8, aligned_end_addr)[0..unused_end_len]);
+
+ // Unmap extra pages
+ const alignedBufferLen = allocLen - dropLen;
+ if (alignedBufferLen > alignedLen) {
+ os.munmap(@alignCast(mem.page_size, @intToPtr([*]u8, aligned_addr))[alignedLen..alignedBufferLen]);
}
- return @intToPtr([*]u8, aligned_addr)[0..n];
+ return @intToPtr([*]u8, aligned_addr)[0..alignPageAllocLen(alignedLen, n, len_align)];
}
- fn shrink(allocator: *Allocator, old_mem_unaligned: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
- const old_mem = @alignCast(mem.page_size, old_mem_unaligned);
+ fn resize(allocator: *Allocator, buf_unaligned: []u8, new_size: usize, len_align: u29) Allocator.Error!usize {
+ const new_size_aligned = mem.alignForward(new_size, mem.page_size);
+
if (builtin.os.tag == .windows) {
const w = os.windows;
if (new_size == 0) {
@@ -153,100 +198,45 @@ const PageAllocator = struct {
// is reserved in the initial allocation call to VirtualAlloc."
// So we can only use MEM_RELEASE when actually releasing the
// whole allocation.
- w.VirtualFree(old_mem.ptr, 0, w.MEM_RELEASE);
- } else {
- const base_addr = @ptrToInt(old_mem.ptr);
- const old_addr_end = base_addr + old_mem.len;
- const new_addr_end = base_addr + new_size;
- const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size);
- if (old_addr_end > new_addr_end_rounded) {
+ w.VirtualFree(buf_unaligned.ptr, 0, w.MEM_RELEASE);
+ return 0;
+ }
+ if (new_size < buf_unaligned.len) {
+ const base_addr = @ptrToInt(buf_unaligned.ptr);
+ const old_addr_end = base_addr + buf_unaligned.len;
+ const new_addr_end = mem.alignForward(base_addr + new_size, mem.page_size);
+ if (old_addr_end > new_addr_end) {
// For shrinking that is not releasing, we will only
// decommit the pages not needed anymore.
w.VirtualFree(
- @intToPtr(*c_void, new_addr_end_rounded),
- old_addr_end - new_addr_end_rounded,
+ @intToPtr(*c_void, new_addr_end),
+ old_addr_end - new_addr_end,
w.MEM_DECOMMIT,
);
}
+ return alignPageAllocLen(new_size_aligned, new_size, len_align);
}
- return old_mem[0..new_size];
- }
- const base_addr = @ptrToInt(old_mem.ptr);
- const old_addr_end = base_addr + old_mem.len;
- const new_addr_end = base_addr + new_size;
- const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size);
- if (old_addr_end > new_addr_end_rounded) {
- const ptr = @intToPtr([*]align(mem.page_size) u8, new_addr_end_rounded);
- os.munmap(ptr[0 .. old_addr_end - new_addr_end_rounded]);
- }
- return old_mem[0..new_size];
- }
-
- fn realloc(allocator: *Allocator, old_mem_unaligned: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
- const old_mem = @alignCast(mem.page_size, old_mem_unaligned);
- if (builtin.os.tag == .windows) {
- if (old_mem.len == 0) {
- return alloc(allocator, new_size, new_align);
- }
-
- if (new_size <= old_mem.len and new_align <= old_align) {
- return shrink(allocator, old_mem, old_align, new_size, new_align);
- }
-
- const w = os.windows;
- const base_addr = @ptrToInt(old_mem.ptr);
-
- if (new_align > old_align and base_addr & (new_align - 1) != 0) {
- // Current allocation doesn't satisfy the new alignment.
- // For now we'll do a new one no matter what, but maybe
- // there is something smarter to do instead.
- const result = try alloc(allocator, new_size, new_align);
- assert(old_mem.len != 0);
- @memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
- w.VirtualFree(old_mem.ptr, 0, w.MEM_RELEASE);
-
- return result;
- }
-
- const old_addr_end = base_addr + old_mem.len;
- const old_addr_end_rounded = mem.alignForward(old_addr_end, mem.page_size);
- const new_addr_end = base_addr + new_size;
- const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size);
- if (new_addr_end_rounded == old_addr_end_rounded) {
- // The reallocation fits in the already allocated pages.
- return @ptrCast([*]u8, old_mem.ptr)[0..new_size];
+ if (new_size == buf_unaligned.len) {
+ return alignPageAllocLen(new_size_aligned, new_size, len_align);
}
- assert(new_addr_end_rounded > old_addr_end_rounded);
+ // new_size > buf_unaligned.len not implemented
+ return error.OutOfMemory;
+ }
- // We need to commit new pages.
- const additional_size = new_addr_end - old_addr_end_rounded;
- const realloc_addr = w.kernel32.VirtualAlloc(
- @intToPtr(*c_void, old_addr_end_rounded),
- additional_size,
- w.MEM_COMMIT | w.MEM_RESERVE,
- w.PAGE_READWRITE,
- ) orelse {
- // Committing new pages at the end of the existing allocation
- // failed, we need to try a new one.
- const new_alloc_mem = try alloc(allocator, new_size, new_align);
- @memcpy(new_alloc_mem.ptr, old_mem.ptr, old_mem.len);
- w.VirtualFree(old_mem.ptr, 0, w.MEM_RELEASE);
-
- return new_alloc_mem;
- };
+ const buf_aligned_len = mem.alignForward(buf_unaligned.len, mem.page_size);
+ if (new_size_aligned == buf_aligned_len)
+ return alignPageAllocLen(new_size_aligned, new_size, len_align);
- assert(@ptrToInt(realloc_addr) == old_addr_end_rounded);
- return @ptrCast([*]u8, old_mem.ptr)[0..new_size];
- }
- if (new_size <= old_mem.len and new_align <= old_align) {
- return shrink(allocator, old_mem, old_align, new_size, new_align);
+ if (new_size_aligned < buf_aligned_len) {
+ const ptr = @intToPtr([*]align(mem.page_size) u8, @ptrToInt(buf_unaligned.ptr) + new_size_aligned);
+ os.munmap(ptr[0 .. buf_aligned_len - new_size_aligned]);
+ if (new_size_aligned == 0)
+ return 0;
+ return alignPageAllocLen(new_size_aligned, new_size, len_align);
}
- const result = try alloc(allocator, new_size, new_align);
- if (old_mem.len != 0) {
- @memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
- os.munmap(old_mem);
- }
- return result;
+
+ // TODO: call mremap
+ return error.OutOfMemory;
}
};
@@ -299,7 +289,7 @@ const WasmPageAllocator = struct {
// Revisit if this is settled: https://github.com/ziglang/zig/issues/3806
const not_found = std.math.maxInt(usize);
- fn useRecycled(self: FreeBlock, num_pages: usize) usize {
+ fn useRecycled(self: FreeBlock, num_pages: usize, alignment: u29) usize {
@setCold(true);
for (self.data) |segment, i| {
const spills_into_next = @bitCast(i128, segment) < 0;
@@ -312,7 +302,8 @@ const WasmPageAllocator = struct {
var count: usize = 0;
while (j + count < self.totalPages() and self.getBit(j + count) == .free) {
count += 1;
- if (count >= num_pages) {
+ const addr = j * mem.page_size;
+ if (count >= num_pages and mem.isAligned(addr, alignment)) {
self.setBits(j, num_pages, .used);
return j;
}
@@ -338,73 +329,72 @@ const WasmPageAllocator = struct {
}
fn nPages(memsize: usize) usize {
- return std.mem.alignForward(memsize, std.mem.page_size) / std.mem.page_size;
+ return mem.alignForward(memsize, mem.page_size) / mem.page_size;
}
- fn alloc(allocator: *Allocator, page_count: usize, alignment: u29) error{OutOfMemory}!usize {
- var idx = conventional.useRecycled(page_count);
- if (idx != FreeBlock.not_found) {
- return idx;
+ fn alloc(allocator: *Allocator, len: usize, alignment: u29, len_align: u29) error{OutOfMemory}![]u8 {
+ const page_count = nPages(len);
+ const page_idx = try allocPages(page_count, alignment);
+ return @intToPtr([*]u8, page_idx * mem.page_size)[0..alignPageAllocLen(page_count * mem.page_size, len, len_align)];
+ }
+ fn allocPages(page_count: usize, alignment: u29) !usize {
+ {
+ const idx = conventional.useRecycled(page_count, alignment);
+ if (idx != FreeBlock.not_found) {
+ return idx;
+ }
}
- idx = extended.useRecycled(page_count);
+ const idx = extended.useRecycled(page_count, alignment);
if (idx != FreeBlock.not_found) {
return idx + extendedOffset();
}
- const prev_page_count = @wasmMemoryGrow(0, @intCast(u32, page_count));
- if (prev_page_count <= 0) {
+ const next_page_idx = @wasmMemorySize(0);
+ const next_page_addr = next_page_idx * mem.page_size;
+ const aligned_addr = mem.alignForward(next_page_addr, alignment);
+ const drop_page_count = @divExact(aligned_addr - next_page_addr, mem.page_size);
+ const result = @wasmMemoryGrow(0, @intCast(u32, drop_page_count + page_count));
+ if (result <= 0)
return error.OutOfMemory;
+ assert(result == next_page_idx);
+ const aligned_page_idx = next_page_idx + drop_page_count;
+ if (drop_page_count > 0) {
+ freePages(next_page_idx, aligned_page_idx);
}
-
- return @intCast(usize, prev_page_count);
+ return @intCast(usize, aligned_page_idx);
}
- pub fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) Allocator.Error![]u8 {
- if (new_align > std.mem.page_size) {
- return error.OutOfMemory;
+ fn freePages(start: usize, end: usize) void {
+ if (start < extendedOffset()) {
+ conventional.recycle(start, std.math.min(extendedOffset(), end) - start);
}
-
- if (nPages(new_size) == nPages(old_mem.len)) {
- return old_mem.ptr[0..new_size];
- } else if (new_size < old_mem.len) {
- return shrink(allocator, old_mem, old_align, new_size, new_align);
- } else {
- const page_idx = try alloc(allocator, nPages(new_size), new_align);
- const new_mem = @intToPtr([*]u8, page_idx * std.mem.page_size)[0..new_size];
- std.mem.copy(u8, new_mem, old_mem);
- _ = shrink(allocator, old_mem, old_align, 0, 0);
- return new_mem;
+ if (end > extendedOffset()) {
+ var new_end = end;
+ if (!extended.isInitialized()) {
+ // Steal the last page from the memory currently being recycled
+ // TODO: would it be better if we use the first page instead?
+ new_end -= 1;
+
+ extended.data = @intToPtr([*]u128, new_end * mem.page_size)[0 .. mem.page_size / @sizeOf(u128)];
+ // Since this is the first page being freed and we consume it, assume *nothing* is free.
+ mem.set(u128, extended.data, PageStatus.none_free);
+ }
+ const clamped_start = std.math.max(extendedOffset(), start);
+ extended.recycle(clamped_start - extendedOffset(), new_end - clamped_start);
}
}
- pub fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
- @setCold(true);
- const free_start = nPages(@ptrToInt(old_mem.ptr) + new_size);
- var free_end = nPages(@ptrToInt(old_mem.ptr) + old_mem.len);
-
- if (free_end > free_start) {
- if (free_start < extendedOffset()) {
- const clamped_end = std.math.min(extendedOffset(), free_end);
- conventional.recycle(free_start, clamped_end - free_start);
- }
-
- if (free_end > extendedOffset()) {
- if (!extended.isInitialized()) {
- // Steal the last page from the memory currently being recycled
- // TODO: would it be better if we use the first page instead?
- free_end -= 1;
-
- extended.data = @intToPtr([*]u128, free_end * std.mem.page_size)[0 .. std.mem.page_size / @sizeOf(u128)];
- // Since this is the first page being freed and we consume it, assume *nothing* is free.
- std.mem.set(u128, extended.data, PageStatus.none_free);
- }
- const clamped_start = std.math.max(extendedOffset(), free_start);
- extended.recycle(clamped_start - extendedOffset(), free_end - clamped_start);
- }
+ fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!usize {
+ const aligned_len = mem.alignForward(buf.len, mem.page_size);
+ if (new_len > aligned_len) return error.OutOfMemory;
+ const current_n = nPages(aligned_len);
+ const new_n = nPages(new_len);
+ if (new_n != current_n) {
+ const base = nPages(@ptrToInt(buf.ptr));
+ freePages(base + new_n, base + current_n);
}
-
- return old_mem[0..new_size];
+ return if (new_len == 0) 0 else alignPageAllocLen(new_n * mem.page_size, new_len, len_align);
}
};
@@ -418,8 +408,8 @@ pub const HeapAllocator = switch (builtin.os.tag) {
pub fn init() HeapAllocator {
return HeapAllocator{
.allocator = Allocator{
- .reallocFn = realloc,
- .shrinkFn = shrink,
+ .allocFn = alloc,
+ .resizeFn = resize,
},
.heap_handle = null,
};
@@ -431,11 +421,14 @@ pub const HeapAllocator = switch (builtin.os.tag) {
}
}
- fn alloc(allocator: *Allocator, n: usize, alignment: u29) error{OutOfMemory}![]u8 {
+ fn getRecordPtr(buf: []u8) *align(1) usize {
+ return @intToPtr(*align(1) usize, @ptrToInt(buf.ptr) + buf.len);
+ }
+
+ fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 {
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
- if (n == 0) return &[0]u8{};
- const amt = n + alignment + @sizeOf(usize);
+ const amt = n + ptr_align - 1 + @sizeOf(usize);
const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, builtin.AtomicOrder.SeqCst);
const heap_handle = optional_heap_handle orelse blk: {
const options = if (builtin.single_threaded) os.windows.HEAP_NO_SERIALIZE else 0;
@@ -446,66 +439,60 @@ pub const HeapAllocator = switch (builtin.os.tag) {
};
const ptr = os.windows.kernel32.HeapAlloc(heap_handle, 0, amt) orelse return error.OutOfMemory;
const root_addr = @ptrToInt(ptr);
- const adjusted_addr = mem.alignForward(root_addr, alignment);
- const record_addr = adjusted_addr + n;
- @intToPtr(*align(1) usize, record_addr).* = root_addr;
- return @intToPtr([*]u8, adjusted_addr)[0..n];
- }
-
- fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
- return realloc(allocator, old_mem, old_align, new_size, new_align) catch {
- const old_adjusted_addr = @ptrToInt(old_mem.ptr);
- const old_record_addr = old_adjusted_addr + old_mem.len;
- const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
- const old_ptr = @intToPtr(*c_void, root_addr);
- const new_record_addr = old_record_addr - new_size + old_mem.len;
- @intToPtr(*align(1) usize, new_record_addr).* = root_addr;
- return old_mem[0..new_size];
+ const aligned_addr = mem.alignForward(root_addr, ptr_align);
+ const return_len = init: {
+ if (len_align == 0) break :init n;
+ const full_len = os.windows.kernel32.HeapSize(heap_handle, 0, ptr);
+ assert(full_len != std.math.maxInt(usize));
+ assert(full_len >= amt);
+ break :init mem.alignBackwardAnyAlign(full_len - (aligned_addr - root_addr), len_align);
};
+ const buf = @intToPtr([*]u8, aligned_addr)[0..return_len];
+ getRecordPtr(buf).* = root_addr;
+ return buf;
}
- fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
- if (old_mem.len == 0) return alloc(allocator, new_size, new_align);
-
+ fn resize(allocator: *Allocator, buf: []u8, new_size: usize, len_align: u29) error{OutOfMemory}!usize {
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
- const old_adjusted_addr = @ptrToInt(old_mem.ptr);
- const old_record_addr = old_adjusted_addr + old_mem.len;
- const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
- const old_ptr = @intToPtr(*c_void, root_addr);
-
if (new_size == 0) {
- os.windows.HeapFree(self.heap_handle.?, 0, old_ptr);
- return old_mem[0..0];
+ os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void, getRecordPtr(buf).*));
+ return 0;
}
- const amt = new_size + new_align + @sizeOf(usize);
+ const root_addr = getRecordPtr(buf).*;
+ const align_offset = @ptrToInt(buf.ptr) - root_addr;
+ const amt = align_offset + new_size + @sizeOf(usize);
const new_ptr = os.windows.kernel32.HeapReAlloc(
self.heap_handle.?,
- 0,
- old_ptr,
+ os.windows.HEAP_REALLOC_IN_PLACE_ONLY,
+ @intToPtr(*c_void, root_addr),
amt,
) orelse return error.OutOfMemory;
- const offset = old_adjusted_addr - root_addr;
- const new_root_addr = @ptrToInt(new_ptr);
- var new_adjusted_addr = new_root_addr + offset;
- const offset_is_valid = new_adjusted_addr + new_size + @sizeOf(usize) <= new_root_addr + amt;
- const offset_is_aligned = new_adjusted_addr % new_align == 0;
- if (!offset_is_valid or !offset_is_aligned) {
- // If HeapReAlloc didn't happen to move the memory to the new alignment,
- // or the memory starting at the old offset would be outside of the new allocation,
- // then we need to copy the memory to a valid aligned address and use that
- const new_aligned_addr = mem.alignForward(new_root_addr, new_align);
- @memcpy(@intToPtr([*]u8, new_aligned_addr), @intToPtr([*]u8, new_adjusted_addr), std.math.min(old_mem.len, new_size));
- new_adjusted_addr = new_aligned_addr;
- }
- const new_record_addr = new_adjusted_addr + new_size;
- @intToPtr(*align(1) usize, new_record_addr).* = new_root_addr;
- return @intToPtr([*]u8, new_adjusted_addr)[0..new_size];
+ assert(new_ptr == @intToPtr(*c_void, root_addr));
+ const return_len = init: {
+ if (len_align == 0) break :init new_size;
+ const full_len = os.windows.kernel32.HeapSize(self.heap_handle.?, 0, new_ptr);
+ assert(full_len != std.math.maxInt(usize));
+ assert(full_len >= amt);
+ break :init mem.alignBackwardAnyAlign(full_len - align_offset, len_align);
+ };
+ getRecordPtr(buf.ptr[0..return_len]).* = root_addr;
+ return return_len;
}
},
else => @compileError("Unsupported OS"),
};
+fn sliceContainsPtr(container: []u8, ptr: [*]u8) bool {
+ return @ptrToInt(ptr) >= @ptrToInt(container.ptr) and
+ @ptrToInt(ptr) < (@ptrToInt(container.ptr) + container.len);
+}
+
+fn sliceContainsSlice(container: []u8, slice: []u8) bool {
+ return @ptrToInt(slice.ptr) >= @ptrToInt(container.ptr) and
+ (@ptrToInt(slice.ptr) + slice.len) <= (@ptrToInt(container.ptr) + container.len);
+}
+
pub const FixedBufferAllocator = struct {
allocator: Allocator,
end_index: usize,
@@ -514,19 +501,33 @@ pub const FixedBufferAllocator = struct {
pub fn init(buffer: []u8) FixedBufferAllocator {
return FixedBufferAllocator{
.allocator = Allocator{
- .reallocFn = realloc,
- .shrinkFn = shrink,
+ .allocFn = alloc,
+ .resizeFn = resize,
},
.buffer = buffer,
.end_index = 0,
};
}
- fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
+ pub fn ownsPtr(self: *FixedBufferAllocator, ptr: [*]u8) bool {
+ return sliceContainsPtr(self.buffer, ptr);
+ }
+
+ pub fn ownsSlice(self: *FixedBufferAllocator, slice: []u8) bool {
+ return sliceContainsSlice(self.buffer, slice);
+ }
+
+ /// NOTE: this will not work in all cases, if the last allocation had an adjusted_index
+ /// then we won't be able to determine what the last allocation was. This is because
+ /// the alignForward operation done in alloc is not reverisible.
+ pub fn isLastAllocation(self: *FixedBufferAllocator, buf: []u8) bool {
+ return buf.ptr + buf.len == self.buffer.ptr + self.end_index;
+ }
+
+ fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) ![]u8 {
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
- const addr = @ptrToInt(self.buffer.ptr) + self.end_index;
- const adjusted_addr = mem.alignForward(addr, alignment);
- const adjusted_index = self.end_index + (adjusted_addr - addr);
+ const aligned_addr = mem.alignForward(@ptrToInt(self.buffer.ptr) + self.end_index, ptr_align);
+ const adjusted_index = aligned_addr - @ptrToInt(self.buffer.ptr);
const new_end_index = adjusted_index + n;
if (new_end_index > self.buffer.len) {
return error.OutOfMemory;
@@ -537,30 +538,28 @@ pub const FixedBufferAllocator = struct {
return result;
}
- fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
+ fn resize(allocator: *Allocator, buf: []u8, new_size: usize, len_align: u29) Allocator.Error!usize {
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
- assert(old_mem.len <= self.end_index);
- if (old_mem.ptr == self.buffer.ptr + self.end_index - old_mem.len and
- mem.alignForward(@ptrToInt(old_mem.ptr), new_align) == @ptrToInt(old_mem.ptr))
- {
- const start_index = self.end_index - old_mem.len;
- const new_end_index = start_index + new_size;
- if (new_end_index > self.buffer.len) return error.OutOfMemory;
- const result = self.buffer[start_index..new_end_index];
- self.end_index = new_end_index;
- return result;
- } else if (new_size <= old_mem.len and new_align <= old_align) {
- // We can't do anything with the memory, so tell the client to keep it.
- return error.OutOfMemory;
- } else {
- const result = try alloc(allocator, new_size, new_align);
- @memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
- return result;
+ assert(self.ownsSlice(buf)); // sanity check
+
+ if (!self.isLastAllocation(buf)) {
+ if (new_size > buf.len)
+ return error.OutOfMemory;
+ return if (new_size == 0) 0 else mem.alignAllocLen(buf.len, new_size, len_align);
+ }
+
+ if (new_size <= buf.len) {
+ const sub = buf.len - new_size;
+ self.end_index -= sub;
+ return if (new_size == 0) 0 else mem.alignAllocLen(buf.len - sub, new_size, len_align);
}
- }
- fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
- return old_mem[0..new_size];
+ const add = new_size - buf.len;
+ if (add + self.end_index > self.buffer.len) {
+ return error.OutOfMemory;
+ }
+ self.end_index += add;
+ return new_size;
}
pub fn reset(self: *FixedBufferAllocator) void {
@@ -581,20 +580,20 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
pub fn init(buffer: []u8) ThreadSafeFixedBufferAllocator {
return ThreadSafeFixedBufferAllocator{
.allocator = Allocator{
- .reallocFn = realloc,
- .shrinkFn = shrink,
+ .allocFn = alloc,
+ .resizeFn = Allocator.noResize,
},
.buffer = buffer,
.end_index = 0,
};
}
- fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
+ fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) ![]u8 {
const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);
var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst);
while (true) {
const addr = @ptrToInt(self.buffer.ptr) + end_index;
- const adjusted_addr = mem.alignForward(addr, alignment);
+ const adjusted_addr = mem.alignForward(addr, ptr_align);
const adjusted_index = end_index + (adjusted_addr - addr);
const new_end_index = adjusted_index + n;
if (new_end_index > self.buffer.len) {
@@ -604,21 +603,6 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
}
}
- fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
- if (new_size <= old_mem.len and new_align <= old_align) {
- // We can't do anything useful with the memory, tell the client to keep it.
- return error.OutOfMemory;
- } else {
- const result = try alloc(allocator, new_size, new_align);
- @memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
- return result;
- }
- }
-
- fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
- return old_mem[0..new_size];
- }
-
pub fn reset(self: *ThreadSafeFixedBufferAllocator) void {
self.end_index = 0;
}
@@ -632,8 +616,8 @@ pub fn stackFallback(comptime size: usize, fallback_allocator: *Allocator) Stack
.fallback_allocator = fallback_allocator,
.fixed_buffer_allocator = undefined,
.allocator = Allocator{
- .reallocFn = StackFallbackAllocator(size).realloc,
- .shrinkFn = StackFallbackAllocator(size).shrink,
+ .allocFn = StackFallbackAllocator(size).realloc,
+ .resizeFn = StackFallbackAllocator(size).resize,
},
};
}
@@ -652,58 +636,19 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
return &self.allocator;
}
- fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
+ fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![*]u8 {
const self = @fieldParentPtr(Self, "allocator", allocator);
- const in_buffer = @ptrToInt(old_mem.ptr) >= @ptrToInt(&self.buffer) and
- @ptrToInt(old_mem.ptr) < @ptrToInt(&self.buffer) + self.buffer.len;
- if (in_buffer) {
- return FixedBufferAllocator.realloc(
- &self.fixed_buffer_allocator.allocator,
- old_mem,
- old_align,
- new_size,
- new_align,
- ) catch {
- const result = try self.fallback_allocator.reallocFn(
- self.fallback_allocator,
- &[0]u8{},
- undefined,
- new_size,
- new_align,
- );
- mem.copy(u8, result, old_mem);
- return result;
- };
- }
- return self.fallback_allocator.reallocFn(
- self.fallback_allocator,
- old_mem,
- old_align,
- new_size,
- new_align,
- );
+ return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, ptr_align) catch
+ return fallback_allocator.alloc(len, ptr_align);
}
- fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
+ fn resize(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!void {
const self = @fieldParentPtr(Self, "allocator", allocator);
- const in_buffer = @ptrToInt(old_mem.ptr) >= @ptrToInt(&self.buffer) and
- @ptrToInt(old_mem.ptr) < @ptrToInt(&self.buffer) + self.buffer.len;
- if (in_buffer) {
- return FixedBufferAllocator.shrink(
- &self.fixed_buffer_allocator.allocator,
- old_mem,
- old_align,
- new_size,
- new_align,
- );
+ if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
+ try self.fixed_buffer_allocator.callResizeFn(buf, new_len);
+ } else {
+ try self.fallback_allocator.callResizeFn(buf, new_len);
}
- return self.fallback_allocator.shrinkFn(
- self.fallback_allocator,
- old_mem,
- old_align,
- new_size,
- new_align,
- );
}
};
}
@@ -718,8 +663,8 @@ test "c_allocator" {
test "WasmPageAllocator internals" {
if (comptime std.Target.current.isWasm()) {
- const conventional_memsize = WasmPageAllocator.conventional.totalPages() * std.mem.page_size;
- const initial = try page_allocator.alloc(u8, std.mem.page_size);
+ const conventional_memsize = WasmPageAllocator.conventional.totalPages() * mem.page_size;
+ const initial = try page_allocator.alloc(u8, mem.page_size);
std.debug.assert(@ptrToInt(initial.ptr) < conventional_memsize); // If this isn't conventional, the rest of these tests don't make sense. Also we have a serious memory leak in the test suite.
var inplace = try page_allocator.realloc(initial, 1);
@@ -772,6 +717,11 @@ test "PageAllocator" {
slice[127] = 0x34;
allocator.free(slice);
}
+ {
+ var buf = try allocator.alloc(u8, mem.page_size + 1);
+ defer allocator.free(buf);
+ buf = try allocator.realloc(buf, 1); // shrink past the page boundary
+ }
}
test "HeapAllocator" {
@@ -799,7 +749,7 @@ test "ArenaAllocator" {
var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined;
test "FixedBufferAllocator" {
- var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
+ var fixed_buffer_allocator = mem.validationWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]));
try testAllocator(&fixed_buffer_allocator.allocator);
try testAllocatorAligned(&fixed_buffer_allocator.allocator, 16);
@@ -865,7 +815,10 @@ test "ThreadSafeFixedBufferAllocator" {
try testAllocatorAlignedShrink(&fixed_buffer_allocator.allocator);
}
-fn testAllocator(allocator: *mem.Allocator) !void {
+pub fn testAllocator(base_allocator: *mem.Allocator) !void {
+ var validationAllocator = mem.validationWrap(base_allocator);
+ const allocator = &validationAllocator.allocator;
+
var slice = try allocator.alloc(*i32, 100);
testing.expect(slice.len == 100);
for (slice) |*item, i| {
@@ -893,7 +846,10 @@ fn testAllocator(allocator: *mem.Allocator) !void {
allocator.free(slice);
}
-fn testAllocatorAligned(allocator: *mem.Allocator, comptime alignment: u29) !void {
+pub fn testAllocatorAligned(base_allocator: *mem.Allocator, comptime alignment: u29) !void {
+ var validationAllocator = mem.validationWrap(base_allocator);
+ const allocator = &validationAllocator.allocator;
+
// initial
var slice = try allocator.alignedAlloc(u8, alignment, 10);
testing.expect(slice.len == 10);
@@ -917,7 +873,10 @@ fn testAllocatorAligned(allocator: *mem.Allocator, comptime alignment: u29) !voi
testing.expect(slice.len == 0);
}
-fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!void {
+pub fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) mem.Allocator.Error!void {
+ var validationAllocator = mem.validationWrap(base_allocator);
+ const allocator = &validationAllocator.allocator;
+
//Maybe a platform's page_size is actually the same as or
// very near usize?
if (mem.page_size << 2 > maxInt(usize)) return;
@@ -946,7 +905,10 @@ fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!vo
allocator.free(slice);
}
-fn testAllocatorAlignedShrink(allocator: *mem.Allocator) mem.Allocator.Error!void {
+pub fn testAllocatorAlignedShrink(base_allocator: *mem.Allocator) mem.Allocator.Error!void {
+ var validationAllocator = mem.validationWrap(base_allocator);
+ const allocator = &validationAllocator.allocator;
+
var debug_buffer: [1000]u8 = undefined;
const debug_allocator = &FixedBufferAllocator.init(&debug_buffer).allocator;
diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig
index b41399772a..a5d8aaea45 100644
--- a/lib/std/heap/arena_allocator.zig
+++ b/lib/std/heap/arena_allocator.zig
@@ -20,8 +20,8 @@ pub const ArenaAllocator = struct {
pub fn promote(self: State, child_allocator: *Allocator) ArenaAllocator {
return .{
.allocator = Allocator{
- .reallocFn = realloc,
- .shrinkFn = shrink,
+ .allocFn = alloc,
+ .resizeFn = Allocator.noResize,
},
.child_allocator = child_allocator,
.state = self,
@@ -49,9 +49,8 @@ pub const ArenaAllocator = struct {
const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16);
const big_enough_len = prev_len + actual_min_size;
const len = big_enough_len + big_enough_len / 2;
- const buf = try self.child_allocator.alignedAlloc(u8, @alignOf(BufNode), len);
- const buf_node_slice = mem.bytesAsSlice(BufNode, buf[0..@sizeOf(BufNode)]);
- const buf_node = &buf_node_slice[0];
+ const buf = try self.child_allocator.callAllocFn(len, @alignOf(BufNode), 1);
+ const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), buf.ptr));
buf_node.* = BufNode{
.data = buf,
.next = null,
@@ -61,18 +60,18 @@ pub const ArenaAllocator = struct {
return buf_node;
}
- fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
+ fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) ![]u8 {
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
- var cur_node = if (self.state.buffer_list.first) |first_node| first_node else try self.createNode(0, n + alignment);
+ var cur_node = if (self.state.buffer_list.first) |first_node| first_node else try self.createNode(0, n + ptr_align);
while (true) {
const cur_buf = cur_node.data[@sizeOf(BufNode)..];
const addr = @ptrToInt(cur_buf.ptr) + self.state.end_index;
- const adjusted_addr = mem.alignForward(addr, alignment);
+ const adjusted_addr = mem.alignForward(addr, ptr_align);
const adjusted_index = self.state.end_index + (adjusted_addr - addr);
const new_end_index = adjusted_index + n;
if (new_end_index > cur_buf.len) {
- cur_node = try self.createNode(cur_buf.len, n + alignment);
+ cur_node = try self.createNode(cur_buf.len, n + ptr_align);
continue;
}
const result = cur_buf[adjusted_index..new_end_index];
@@ -80,19 +79,4 @@ pub const ArenaAllocator = struct {
return result;
}
}
-
- fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
- if (new_size <= old_mem.len and new_align <= new_size) {
- // We can't do anything with the memory, so tell the client to keep it.
- return error.OutOfMemory;
- } else {
- const result = try alloc(allocator, new_size, new_align);
- @memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
- return result;
- }
- }
-
- fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
- return old_mem[0..new_size];
- }
};
diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig
index 0d15986a76..d3055c75ee 100644
--- a/lib/std/heap/logging_allocator.zig
+++ b/lib/std/heap/logging_allocator.zig
@@ -15,62 +15,75 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type {
pub fn init(parent_allocator: *Allocator, out_stream: OutStreamType) Self {
return Self{
.allocator = Allocator{
- .reallocFn = realloc,
- .shrinkFn = shrink,
+ .allocFn = alloc,
+ .resizeFn = resize,
},
.parent_allocator = parent_allocator,
.out_stream = out_stream,
};
}
- fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
+ fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 {
const self = @fieldParentPtr(Self, "allocator", allocator);
- if (old_mem.len == 0) {
- self.out_stream.print("allocation of {} ", .{new_size}) catch {};
- } else {
- self.out_stream.print("resize from {} to {} ", .{ old_mem.len, new_size }) catch {};
- }
- const result = self.parent_allocator.reallocFn(self.parent_allocator, old_mem, old_align, new_size, new_align);
+ self.out_stream.print("alloc : {}", .{len}) catch {};
+ const result = self.parent_allocator.callAllocFn(len, ptr_align, len_align);
if (result) |buff| {
- self.out_stream.print("success!\n", .{}) catch {};
+ self.out_stream.print(" success!\n", .{}) catch {};
} else |err| {
- self.out_stream.print("failure!\n", .{}) catch {};
+ self.out_stream.print(" failure!\n", .{}) catch {};
}
return result;
}
- fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
+ fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!usize {
const self = @fieldParentPtr(Self, "allocator", allocator);
- const result = self.parent_allocator.shrinkFn(self.parent_allocator, old_mem, old_align, new_size, new_align);
- if (new_size == 0) {
- self.out_stream.print("free of {} bytes success!\n", .{old_mem.len}) catch {};
+ if (new_len == 0) {
+ self.out_stream.print("free : {}\n", .{buf.len}) catch {};
+ } else if (new_len <= buf.len) {
+ self.out_stream.print("shrink: {} to {}\n", .{ buf.len, new_len }) catch {};
} else {
- self.out_stream.print("shrink from {} bytes to {} bytes success!\n", .{ old_mem.len, new_size }) catch {};
+ self.out_stream.print("expand: {} to {}", .{ buf.len, new_len }) catch {};
+ }
+ if (self.parent_allocator.callResizeFn(buf, new_len, len_align)) |resized_len| {
+ if (new_len > buf.len) {
+ self.out_stream.print(" success!\n", .{}) catch {};
+ }
+ return resized_len;
+ } else |e| {
+ std.debug.assert(new_len > buf.len);
+ self.out_stream.print(" failure!\n", .{}) catch {};
+ return e;
}
- return result;
}
};
}
pub fn loggingAllocator(
parent_allocator: *Allocator,
- out_stream: var,
+ out_stream: anytype,
) LoggingAllocator(@TypeOf(out_stream)) {
return LoggingAllocator(@TypeOf(out_stream)).init(parent_allocator, out_stream);
}
test "LoggingAllocator" {
- var buf: [255]u8 = undefined;
- var fbs = std.io.fixedBufferStream(&buf);
+ var log_buf: [255]u8 = undefined;
+ var fbs = std.io.fixedBufferStream(&log_buf);
- const allocator = &loggingAllocator(std.testing.allocator, fbs.outStream()).allocator;
+ var allocator_buf: [10]u8 = undefined;
+ var fixedBufferAllocator = std.mem.validationWrap(std.heap.FixedBufferAllocator.init(&allocator_buf));
+ const allocator = &loggingAllocator(&fixedBufferAllocator.allocator, fbs.outStream()).allocator;
- const ptr = try allocator.alloc(u8, 10);
- allocator.free(ptr);
+ var a = try allocator.alloc(u8, 10);
+ a.len = allocator.shrinkBytes(a, 5, 0);
+ std.debug.assert(a.len == 5);
+ std.testing.expectError(error.OutOfMemory, allocator.callResizeFn(a, 20, 0));
+ allocator.free(a);
std.testing.expectEqualSlices(u8,
- \\allocation of 10 success!
- \\free of 10 bytes success!
+ \\alloc : 10 success!
+ \\shrink: 10 to 5
+ \\expand: 5 to 20 failure!
+ \\free : 5
\\
, fbs.getWritten());
}
diff --git a/lib/std/http/headers.zig b/lib/std/http/headers.zig
index ba929a446c..86aff5f364 100644
--- a/lib/std/http/headers.zig
+++ b/lib/std/http/headers.zig
@@ -27,7 +27,6 @@ fn never_index_default(name: []const u8) bool {
}
const HeaderEntry = struct {
- allocator: *Allocator,
name: []const u8,
value: []u8,
never_index: bool,
@@ -36,23 +35,22 @@ const HeaderEntry = struct {
fn init(allocator: *Allocator, name: []const u8, value: []const u8, never_index: ?bool) !Self {
return Self{
- .allocator = allocator,
.name = name, // takes reference
- .value = try mem.dupe(allocator, u8, value),
+ .value = try allocator.dupe(u8, value),
.never_index = never_index orelse never_index_default(name),
};
}
- fn deinit(self: Self) void {
- self.allocator.free(self.value);
+ fn deinit(self: Self, allocator: *Allocator) void {
+ allocator.free(self.value);
}
- pub fn modify(self: *Self, value: []const u8, never_index: ?bool) !void {
+ pub fn modify(self: *Self, allocator: *Allocator, value: []const u8, never_index: ?bool) !void {
const old_len = self.value.len;
if (value.len > old_len) {
- self.value = try self.allocator.realloc(self.value, value.len);
+ self.value = try allocator.realloc(self.value, value.len);
} else if (value.len < old_len) {
- self.value = self.allocator.shrink(self.value, value.len);
+ self.value = allocator.shrink(self.value, value.len);
}
mem.copy(u8, self.value, value);
self.never_index = never_index orelse never_index_default(self.name);
@@ -85,22 +83,22 @@ const HeaderEntry = struct {
test "HeaderEntry" {
var e = try HeaderEntry.init(testing.allocator, "foo", "bar", null);
- defer e.deinit();
+ defer e.deinit(testing.allocator);
testing.expectEqualSlices(u8, "foo", e.name);
testing.expectEqualSlices(u8, "bar", e.value);
testing.expectEqual(false, e.never_index);
- try e.modify("longer value", null);
+ try e.modify(testing.allocator, "longer value", null);
testing.expectEqualSlices(u8, "longer value", e.value);
// shorter value
- try e.modify("x", null);
+ try e.modify(testing.allocator, "x", null);
testing.expectEqualSlices(u8, "x", e.value);
}
-const HeaderList = std.ArrayList(HeaderEntry);
-const HeaderIndexList = std.ArrayList(usize);
-const HeaderIndex = std.StringHashMap(HeaderIndexList);
+const HeaderList = std.ArrayListUnmanaged(HeaderEntry);
+const HeaderIndexList = std.ArrayListUnmanaged(usize);
+const HeaderIndex = std.StringHashMapUnmanaged(HeaderIndexList);
pub const Headers = struct {
// the owned header field name is stored in the index as part of the key
@@ -113,62 +111,62 @@ pub const Headers = struct {
pub fn init(allocator: *Allocator) Self {
return Self{
.allocator = allocator,
- .data = HeaderList.init(allocator),
- .index = HeaderIndex.init(allocator),
+ .data = HeaderList{},
+ .index = HeaderIndex{},
};
}
- pub fn deinit(self: Self) void {
+ pub fn deinit(self: *Self) void {
{
- var it = self.index.iterator();
- while (it.next()) |kv| {
- var dex = &kv.value;
- dex.deinit();
- self.allocator.free(kv.key);
+ for (self.index.items()) |*entry| {
+ const dex = &entry.value;
+ dex.deinit(self.allocator);
+ self.allocator.free(entry.key);
}
- self.index.deinit();
+ self.index.deinit(self.allocator);
}
{
- for (self.data.span()) |entry| {
- entry.deinit();
+ for (self.data.items) |entry| {
+ entry.deinit(self.allocator);
}
- self.data.deinit();
+ self.data.deinit(self.allocator);
}
+ self.* = undefined;
}
pub fn clone(self: Self, allocator: *Allocator) !Self {
var other = Headers.init(allocator);
errdefer other.deinit();
- try other.data.ensureCapacity(self.data.items.len);
- try other.index.initCapacity(self.index.entries.len);
- for (self.data.span()) |entry| {
+ try other.data.ensureCapacity(allocator, self.data.items.len);
+ try other.index.initCapacity(allocator, self.index.entries.len);
+ for (self.data.items) |entry| {
try other.append(entry.name, entry.value, entry.never_index);
}
return other;
}
pub fn toSlice(self: Self) []const HeaderEntry {
- return self.data.span();
+ return self.data.items;
}
pub fn append(self: *Self, name: []const u8, value: []const u8, never_index: ?bool) !void {
const n = self.data.items.len + 1;
- try self.data.ensureCapacity(n);
+ try self.data.ensureCapacity(self.allocator, n);
var entry: HeaderEntry = undefined;
- if (self.index.get(name)) |kv| {
+ if (self.index.getEntry(name)) |kv| {
entry = try HeaderEntry.init(self.allocator, kv.key, value, never_index);
- errdefer entry.deinit();
- var dex = &kv.value;
- try dex.append(n - 1);
+ errdefer entry.deinit(self.allocator);
+ const dex = &kv.value;
+ try dex.append(self.allocator, n - 1);
} else {
- const name_dup = try mem.dupe(self.allocator, u8, name);
+ const name_dup = try self.allocator.dupe(u8, name);
errdefer self.allocator.free(name_dup);
entry = try HeaderEntry.init(self.allocator, name_dup, value, never_index);
- errdefer entry.deinit();
- var dex = HeaderIndexList.init(self.allocator);
- try dex.append(n - 1);
- errdefer dex.deinit();
- _ = try self.index.put(name_dup, dex);
+ errdefer entry.deinit(self.allocator);
+ var dex = HeaderIndexList{};
+ try dex.append(self.allocator, n - 1);
+ errdefer dex.deinit(self.allocator);
+ _ = try self.index.put(self.allocator, name_dup, dex);
}
self.data.appendAssumeCapacity(entry);
}
@@ -194,8 +192,8 @@ pub const Headers = struct {
/// Returns boolean indicating if something was deleted.
pub fn delete(self: *Self, name: []const u8) bool {
- if (self.index.remove(name)) |kv| {
- var dex = &kv.value;
+ if (self.index.remove(name)) |*kv| {
+ const dex = &kv.value;
// iterate backwards
var i = dex.items.len;
while (i > 0) {
@@ -203,11 +201,11 @@ pub const Headers = struct {
const data_index = dex.items[i];
const removed = self.data.orderedRemove(data_index);
assert(mem.eql(u8, removed.name, name));
- removed.deinit();
+ removed.deinit(self.allocator);
}
- dex.deinit();
+ dex.deinit(self.allocator);
self.allocator.free(kv.key);
- self.rebuild_index();
+ self.rebuildIndex();
return true;
} else {
return false;
@@ -216,45 +214,52 @@ pub const Headers = struct {
/// Removes the element at the specified index.
/// Moves items down to fill the empty space.
+ /// TODO this implementation can be replaced by adding
+ /// orderedRemove to the new hash table implementation as an
+ /// alternative to swapRemove.
pub fn orderedRemove(self: *Self, i: usize) void {
const removed = self.data.orderedRemove(i);
- const kv = self.index.get(removed.name).?;
- var dex = &kv.value;
+ const kv = self.index.getEntry(removed.name).?;
+ const dex = &kv.value;
if (dex.items.len == 1) {
// was last item; delete the index
- _ = self.index.remove(kv.key);
- dex.deinit();
- removed.deinit();
- self.allocator.free(kv.key);
+ dex.deinit(self.allocator);
+ removed.deinit(self.allocator);
+ const key = kv.key;
+ _ = self.index.remove(key); // invalidates `kv` and `dex`
+ self.allocator.free(key);
} else {
- dex.shrink(dex.items.len - 1);
- removed.deinit();
+ dex.shrink(self.allocator, dex.items.len - 1);
+ removed.deinit(self.allocator);
}
// if it was the last item; no need to rebuild index
if (i != self.data.items.len) {
- self.rebuild_index();
+ self.rebuildIndex();
}
}
/// Removes the element at the specified index.
/// The empty slot is filled from the end of the list.
+ /// TODO this implementation can be replaced by simply using the
+ /// new hash table which does swap removal.
pub fn swapRemove(self: *Self, i: usize) void {
const removed = self.data.swapRemove(i);
- const kv = self.index.get(removed.name).?;
- var dex = &kv.value;
+ const kv = self.index.getEntry(removed.name).?;
+ const dex = &kv.value;
if (dex.items.len == 1) {
// was last item; delete the index
- _ = self.index.remove(kv.key);
- dex.deinit();
- removed.deinit();
- self.allocator.free(kv.key);
+ dex.deinit(self.allocator);
+ removed.deinit(self.allocator);
+ const key = kv.key;
+ _ = self.index.remove(key); // invalidates `kv` and `dex`
+ self.allocator.free(key);
} else {
- dex.shrink(dex.items.len - 1);
- removed.deinit();
+ dex.shrink(self.allocator, dex.items.len - 1);
+ removed.deinit(self.allocator);
}
// if it was the last item; no need to rebuild index
if (i != self.data.items.len) {
- self.rebuild_index();
+ self.rebuildIndex();
}
}
@@ -266,11 +271,7 @@ pub const Headers = struct {
/// Returns a list of indices containing headers with the given name.
/// The returned list should not be modified by the caller.
pub fn getIndices(self: Self, name: []const u8) ?HeaderIndexList {
- if (self.index.get(name)) |kv| {
- return kv.value;
- } else {
- return null;
- }
+ return self.index.get(name);
}
/// Returns a slice containing each header with the given name.
@@ -279,7 +280,7 @@ pub const Headers = struct {
const buf = try allocator.alloc(HeaderEntry, dex.items.len);
var n: usize = 0;
- for (dex.span()) |idx| {
+ for (dex.items) |idx| {
buf[n] = self.data.items[idx];
n += 1;
}
@@ -302,7 +303,7 @@ pub const Headers = struct {
// adapted from mem.join
const total_len = blk: {
var sum: usize = dex.items.len - 1; // space for separator(s)
- for (dex.span()) |idx|
+ for (dex.items) |idx|
sum += self.data.items[idx].value.len;
break :blk sum;
};
@@ -325,32 +326,27 @@ pub const Headers = struct {
return buf;
}
- fn rebuild_index(self: *Self) void {
- { // clear out the indexes
- var it = self.index.iterator();
- while (it.next()) |kv| {
- var dex = &kv.value;
- dex.items.len = 0; // keeps capacity available
- }
+ fn rebuildIndex(self: *Self) void {
+ // clear out the indexes
+ for (self.index.items()) |*entry| {
+ entry.value.shrinkRetainingCapacity(0);
}
- { // fill up indexes again; we know capacity is fine from before
- for (self.data.span()) |entry, i| {
- var dex = &self.index.get(entry.name).?.value;
- dex.appendAssumeCapacity(i);
- }
+ // fill up indexes again; we know capacity is fine from before
+ for (self.data.items) |entry, i| {
+ self.index.getEntry(entry.name).?.value.appendAssumeCapacity(i);
}
}
pub fn sort(self: *Self) void {
std.sort.sort(HeaderEntry, self.data.items, {}, HeaderEntry.compare);
- self.rebuild_index();
+ self.rebuildIndex();
}
pub fn format(
self: Self,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
- out_stream: var,
+ out_stream: anytype,
) !void {
for (self.toSlice()) |entry| {
try out_stream.writeAll(entry.name);
@@ -495,8 +491,8 @@ test "Headers.getIndices" {
try h.append("set-cookie", "y=2", null);
testing.expect(null == h.getIndices("not-present"));
- testing.expectEqualSlices(usize, &[_]usize{0}, h.getIndices("foo").?.span());
- testing.expectEqualSlices(usize, &[_]usize{ 1, 2 }, h.getIndices("set-cookie").?.span());
+ testing.expectEqualSlices(usize, &[_]usize{0}, h.getIndices("foo").?.items);
+ testing.expectEqualSlices(usize, &[_]usize{ 1, 2 }, h.getIndices("set-cookie").?.items);
}
test "Headers.get" {
diff --git a/lib/std/io/bit_reader.zig b/lib/std/io/bit_reader.zig
index d5e8ce934f..fbdf7fbe78 100644
--- a/lib/std/io/bit_reader.zig
+++ b/lib/std/io/bit_reader.zig
@@ -170,7 +170,7 @@ pub fn BitReader(endian: builtin.Endian, comptime ReaderType: type) type {
pub fn bitReader(
comptime endian: builtin.Endian,
- underlying_stream: var,
+ underlying_stream: anytype,
) BitReader(endian, @TypeOf(underlying_stream)) {
return BitReader(endian, @TypeOf(underlying_stream)).init(underlying_stream);
}
diff --git a/lib/std/io/bit_writer.zig b/lib/std/io/bit_writer.zig
index bdf9156136..7c1d3e5dba 100644
--- a/lib/std/io/bit_writer.zig
+++ b/lib/std/io/bit_writer.zig
@@ -34,7 +34,7 @@ pub fn BitWriter(endian: builtin.Endian, comptime WriterType: type) type {
/// Write the specified number of bits to the stream from the least significant bits of
/// the specified unsigned int value. Bits will only be written to the stream when there
/// are enough to fill a byte.
- pub fn writeBits(self: *Self, value: var, bits: usize) Error!void {
+ pub fn writeBits(self: *Self, value: anytype, bits: usize) Error!void {
if (bits == 0) return;
const U = @TypeOf(value);
@@ -145,7 +145,7 @@ pub fn BitWriter(endian: builtin.Endian, comptime WriterType: type) type {
pub fn bitWriter(
comptime endian: builtin.Endian,
- underlying_stream: var,
+ underlying_stream: anytype,
) BitWriter(endian, @TypeOf(underlying_stream)) {
return BitWriter(endian, @TypeOf(underlying_stream)).init(underlying_stream);
}
diff --git a/lib/std/io/buffered_out_stream.zig b/lib/std/io/buffered_out_stream.zig
index 6b8ede5489..6f9efa9575 100644
--- a/lib/std/io/buffered_out_stream.zig
+++ b/lib/std/io/buffered_out_stream.zig
@@ -2,4 +2,4 @@
pub const BufferedOutStream = @import("./buffered_writer.zig").BufferedWriter;
/// Deprecated: use `std.io.buffered_writer.bufferedWriter`
-pub const bufferedOutStream = @import("./buffered_writer.zig").bufferedWriter
+pub const bufferedOutStream = @import("./buffered_writer.zig").bufferedWriter;
diff --git a/lib/std/io/buffered_reader.zig b/lib/std/io/buffered_reader.zig
index f33dc127d2..73d74b465f 100644
--- a/lib/std/io/buffered_reader.zig
+++ b/lib/std/io/buffered_reader.zig
@@ -48,7 +48,7 @@ pub fn BufferedReader(comptime buffer_size: usize, comptime ReaderType: type) ty
};
}
-pub fn bufferedReader(underlying_stream: var) BufferedReader(4096, @TypeOf(underlying_stream)) {
+pub fn bufferedReader(underlying_stream: anytype) BufferedReader(4096, @TypeOf(underlying_stream)) {
return .{ .unbuffered_reader = underlying_stream };
}
diff --git a/lib/std/io/buffered_writer.zig b/lib/std/io/buffered_writer.zig
index 5cd102b510..a970f899d6 100644
--- a/lib/std/io/buffered_writer.zig
+++ b/lib/std/io/buffered_writer.zig
@@ -43,6 +43,6 @@ pub fn BufferedWriter(comptime buffer_size: usize, comptime WriterType: type) ty
};
}
-pub fn bufferedWriter(underlying_stream: var) BufferedWriter(4096, @TypeOf(underlying_stream)) {
+pub fn bufferedWriter(underlying_stream: anytype) BufferedWriter(4096, @TypeOf(underlying_stream)) {
return .{ .unbuffered_writer = underlying_stream };
}
diff --git a/lib/std/io/counting_writer.zig b/lib/std/io/counting_writer.zig
index 90e4580eea..c0cd53c7ee 100644
--- a/lib/std/io/counting_writer.zig
+++ b/lib/std/io/counting_writer.zig
@@ -32,7 +32,7 @@ pub fn CountingWriter(comptime WriterType: type) type {
};
}
-pub fn countingWriter(child_stream: var) CountingWriter(@TypeOf(child_stream)) {
+pub fn countingWriter(child_stream: anytype) CountingWriter(@TypeOf(child_stream)) {
return .{ .bytes_written = 0, .child_stream = child_stream };
}
diff --git a/lib/std/io/fixed_buffer_stream.zig b/lib/std/io/fixed_buffer_stream.zig
index ee5fe48ca5..32625f3b7a 100644
--- a/lib/std/io/fixed_buffer_stream.zig
+++ b/lib/std/io/fixed_buffer_stream.zig
@@ -127,7 +127,7 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
};
}
-pub fn fixedBufferStream(buffer: var) FixedBufferStream(NonSentinelSpan(@TypeOf(buffer))) {
+pub fn fixedBufferStream(buffer: anytype) FixedBufferStream(NonSentinelSpan(@TypeOf(buffer))) {
return .{ .buffer = mem.span(buffer), .pos = 0 };
}
diff --git a/lib/std/io/multi_writer.zig b/lib/std/io/multi_writer.zig
index 02ed75eaaa..e63940bff7 100644
--- a/lib/std/io/multi_writer.zig
+++ b/lib/std/io/multi_writer.zig
@@ -43,7 +43,7 @@ pub fn MultiWriter(comptime Writers: type) type {
};
}
-pub fn multiWriter(streams: var) MultiWriter(@TypeOf(streams)) {
+pub fn multiWriter(streams: anytype) MultiWriter(@TypeOf(streams)) {
return .{ .streams = streams };
}
diff --git a/lib/std/io/peek_stream.zig b/lib/std/io/peek_stream.zig
index 2bf6b83bc5..08e940c6ec 100644
--- a/lib/std/io/peek_stream.zig
+++ b/lib/std/io/peek_stream.zig
@@ -80,7 +80,7 @@ pub fn PeekStream(
pub fn peekStream(
comptime lookahead: comptime_int,
- underlying_stream: var,
+ underlying_stream: anytype,
) PeekStream(.{ .Static = lookahead }, @TypeOf(underlying_stream)) {
return PeekStream(.{ .Static = lookahead }, @TypeOf(underlying_stream)).init(underlying_stream);
}
diff --git a/lib/std/io/reader.zig b/lib/std/io/reader.zig
index 03744e4da4..4c682e8aba 100644
--- a/lib/std/io/reader.zig
+++ b/lib/std/io/reader.zig
@@ -40,8 +40,7 @@ pub fn Reader(
return index;
}
- /// Returns the number of bytes read. If the number read would be smaller than buf.len,
- /// error.EndOfStream is returned instead.
+ /// If the number read would be smaller than `buf.len`, `error.EndOfStream` is returned instead.
pub fn readNoEof(self: Self, buf: []u8) !void {
const amt_read = try self.readAll(buf);
if (amt_read < buf.len) return error.EndOfStream;
diff --git a/lib/std/io/serialization.zig b/lib/std/io/serialization.zig
index 8c63b8b966..317dde6417 100644
--- a/lib/std/io/serialization.zig
+++ b/lib/std/io/serialization.zig
@@ -16,14 +16,16 @@ pub const Packing = enum {
};
/// Creates a deserializer that deserializes types from any stream.
-/// If `is_packed` is true, the data stream is treated as bit-packed,
-/// otherwise data is expected to be packed to the smallest byte.
-/// Types may implement a custom deserialization routine with a
-/// function named `deserialize` in the form of:
-/// pub fn deserialize(self: *Self, deserializer: var) !void
-/// which will be called when the deserializer is used to deserialize
-/// that type. It will pass a pointer to the type instance to deserialize
-/// into and a pointer to the deserializer struct.
+/// If `is_packed` is true, the data stream is treated as bit-packed,
+/// otherwise data is expected to be packed to the smallest byte.
+/// Types may implement a custom deserialization routine with a
+/// function named `deserialize` in the form of:
+/// ```
+/// pub fn deserialize(self: *Self, deserializer: anytype) !void
+/// ```
+/// which will be called when the deserializer is used to deserialize
+/// that type. It will pass a pointer to the type instance to deserialize
+/// into and a pointer to the deserializer struct.
pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing, comptime ReaderType: type) type {
return struct {
in_stream: if (packing == .Bit) io.BitReader(endian, ReaderType) else ReaderType,
@@ -93,7 +95,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
}
/// Deserializes data into the type pointed to by `ptr`
- pub fn deserializeInto(self: *Self, ptr: var) !void {
+ pub fn deserializeInto(self: *Self, ptr: anytype) !void {
const T = @TypeOf(ptr);
comptime assert(trait.is(.Pointer)(T));
@@ -108,7 +110,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
const C = comptime meta.Child(T);
const child_type_id = @typeInfo(C);
- //custom deserializer: fn(self: *Self, deserializer: var) !void
+ //custom deserializer: fn(self: *Self, deserializer: anytype) !void
if (comptime trait.hasFn("deserialize")(C)) return C.deserialize(ptr, self);
if (comptime trait.isPacked(C) and packing != .Bit) {
@@ -190,24 +192,26 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
pub fn deserializer(
comptime endian: builtin.Endian,
comptime packing: Packing,
- in_stream: var,
+ in_stream: anytype,
) Deserializer(endian, packing, @TypeOf(in_stream)) {
return Deserializer(endian, packing, @TypeOf(in_stream)).init(in_stream);
}
/// Creates a serializer that serializes types to any stream.
-/// If `is_packed` is true, the data will be bit-packed into the stream.
-/// Note that the you must call `serializer.flush()` when you are done
-/// writing bit-packed data in order ensure any unwritten bits are committed.
-/// If `is_packed` is false, data is packed to the smallest byte. In the case
-/// of packed structs, the struct will written bit-packed and with the specified
-/// endianess, after which data will resume being written at the next byte boundary.
-/// Types may implement a custom serialization routine with a
-/// function named `serialize` in the form of:
-/// pub fn serialize(self: Self, serializer: var) !void
-/// which will be called when the serializer is used to serialize that type. It will
-/// pass a const pointer to the type instance to be serialized and a pointer
-/// to the serializer struct.
+/// If `is_packed` is true, the data will be bit-packed into the stream.
+/// Note that the you must call `serializer.flush()` when you are done
+/// writing bit-packed data in order ensure any unwritten bits are committed.
+/// If `is_packed` is false, data is packed to the smallest byte. In the case
+/// of packed structs, the struct will written bit-packed and with the specified
+/// endianess, after which data will resume being written at the next byte boundary.
+/// Types may implement a custom serialization routine with a
+/// function named `serialize` in the form of:
+/// ```
+/// pub fn serialize(self: Self, serializer: anytype) !void
+/// ```
+/// which will be called when the serializer is used to serialize that type. It will
+/// pass a const pointer to the type instance to be serialized and a pointer
+/// to the serializer struct.
pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, comptime OutStreamType: type) type {
return struct {
out_stream: if (packing == .Bit) io.BitOutStream(endian, OutStreamType) else OutStreamType,
@@ -229,7 +233,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
if (packing == .Bit) return self.out_stream.flushBits();
}
- fn serializeInt(self: *Self, value: var) Error!void {
+ fn serializeInt(self: *Self, value: anytype) Error!void {
const T = @TypeOf(value);
comptime assert(trait.is(.Int)(T) or trait.is(.Float)(T));
@@ -261,7 +265,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
}
/// Serializes the passed value into the stream
- pub fn serialize(self: *Self, value: var) Error!void {
+ pub fn serialize(self: *Self, value: anytype) Error!void {
const T = comptime @TypeOf(value);
if (comptime trait.isIndexable(T)) {
@@ -270,7 +274,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
return;
}
- //custom serializer: fn(self: Self, serializer: var) !void
+ //custom serializer: fn(self: Self, serializer: anytype) !void
if (comptime trait.hasFn("serialize")(T)) return T.serialize(value, self);
if (comptime trait.isPacked(T) and packing != .Bit) {
@@ -346,7 +350,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
pub fn serializer(
comptime endian: builtin.Endian,
comptime packing: Packing,
- out_stream: var,
+ out_stream: anytype,
) Serializer(endian, packing, @TypeOf(out_stream)) {
return Serializer(endian, packing, @TypeOf(out_stream)).init(out_stream);
}
@@ -462,7 +466,7 @@ test "Serializer/Deserializer Int: Inf/NaN" {
try testIntSerializerDeserializerInfNaN(.Little, .Bit);
}
-fn testAlternateSerializer(self: var, _serializer: var) !void {
+fn testAlternateSerializer(self: anytype, _serializer: anytype) !void {
try _serializer.serialize(self.f_f16);
}
@@ -503,7 +507,7 @@ fn testSerializerDeserializer(comptime endian: builtin.Endian, comptime packing:
f_f16: f16,
f_unused_u32: u32,
- pub fn deserialize(self: *@This(), _deserializer: var) !void {
+ pub fn deserialize(self: *@This(), _deserializer: anytype) !void {
try _deserializer.deserializeInto(&self.f_f16);
self.f_unused_u32 = 47;
}
diff --git a/lib/std/io/writer.zig b/lib/std/io/writer.zig
index 659ba2703e..a98e3b1acd 100644
--- a/lib/std/io/writer.zig
+++ b/lib/std/io/writer.zig
@@ -24,7 +24,7 @@ pub fn Writer(
}
}
- pub fn print(self: Self, comptime format: []const u8, args: var) Error!void {
+ pub fn print(self: Self, comptime format: []const u8, args: anytype) Error!void {
return std.fmt.format(self, format, args);
}
diff --git a/lib/std/json.zig b/lib/std/json.zig
index eeceeac8a7..f1b91fc829 100644
--- a/lib/std/json.zig
+++ b/lib/std/json.zig
@@ -239,7 +239,7 @@ pub const StreamingParser = struct {
NullLiteral3,
// Only call this function to generate array/object final state.
- pub fn fromInt(x: var) State {
+ pub fn fromInt(x: anytype) State {
debug.assert(x == 0 or x == 1);
const T = @TagType(State);
return @intToEnum(State, @intCast(T, x));
@@ -1236,7 +1236,7 @@ pub const Value = union(enum) {
pub fn jsonStringify(
value: @This(),
options: StringifyOptions,
- out_stream: var,
+ out_stream: anytype,
) @TypeOf(out_stream).Error!void {
switch (value) {
.Null => try stringify(null, options, out_stream),
@@ -1288,7 +1288,7 @@ pub const Value = union(enum) {
var held = std.debug.getStderrMutex().acquire();
defer held.release();
- const stderr = std.debug.getStderrStream();
+ const stderr = io.getStdErr().writer();
std.json.stringify(self, std.json.StringifyOptions{ .whitespace = null }, stderr) catch return;
}
};
@@ -1535,7 +1535,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
const allocator = options.allocator orelse return error.AllocatorRequired;
switch (ptrInfo.size) {
.One => {
- const r: T = allocator.create(ptrInfo.child);
+ const r: T = try allocator.create(ptrInfo.child);
r.* = try parseInternal(ptrInfo.child, token, tokens, options);
return r;
},
@@ -1567,7 +1567,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
if (ptrInfo.child != u8) return error.UnexpectedToken;
const source_slice = stringToken.slice(tokens.slice, tokens.i - 1);
switch (stringToken.escapes) {
- .None => return mem.dupe(allocator, u8, source_slice),
+ .None => return allocator.dupe(u8, source_slice),
.Some => |some_escapes| {
const output = try allocator.alloc(u8, stringToken.decodedLength());
errdefer allocator.free(output);
@@ -1629,7 +1629,7 @@ pub fn parseFree(comptime T: type, value: T, options: ParseOptions) void {
switch (ptrInfo.size) {
.One => {
parseFree(ptrInfo.child, value.*, options);
- allocator.destroy(v);
+ allocator.destroy(value);
},
.Slice => {
for (value) |v| {
@@ -2043,7 +2043,7 @@ pub const Parser = struct {
fn parseString(p: *Parser, allocator: *Allocator, s: std.meta.TagPayloadType(Token, Token.String), input: []const u8, i: usize) !Value {
const slice = s.slice(input, i);
switch (s.escapes) {
- .None => return Value{ .String = if (p.copy_strings) try mem.dupe(allocator, u8, slice) else slice },
+ .None => return Value{ .String = if (p.copy_strings) try allocator.dupe(u8, slice) else slice },
.Some => |some_escapes| {
const output = try allocator.alloc(u8, s.decodedLength());
errdefer allocator.free(output);
@@ -2149,27 +2149,27 @@ test "json.parser.dynamic" {
var root = tree.root;
- var image = root.Object.get("Image").?.value;
+ var image = root.Object.get("Image").?;
- const width = image.Object.get("Width").?.value;
+ const width = image.Object.get("Width").?;
testing.expect(width.Integer == 800);
- const height = image.Object.get("Height").?.value;
+ const height = image.Object.get("Height").?;
testing.expect(height.Integer == 600);
- const title = image.Object.get("Title").?.value;
+ const title = image.Object.get("Title").?;
testing.expect(mem.eql(u8, title.String, "View from 15th Floor"));
- const animated = image.Object.get("Animated").?.value;
+ const animated = image.Object.get("Animated").?;
testing.expect(animated.Bool == false);
- const array_of_object = image.Object.get("ArrayOfObject").?.value;
+ const array_of_object = image.Object.get("ArrayOfObject").?;
testing.expect(array_of_object.Array.items.len == 1);
- const obj0 = array_of_object.Array.items[0].Object.get("n").?.value;
+ const obj0 = array_of_object.Array.items[0].Object.get("n").?;
testing.expect(mem.eql(u8, obj0.String, "m"));
- const double = image.Object.get("double").?.value;
+ const double = image.Object.get("double").?;
testing.expect(double.Float == 1.3412);
}
@@ -2217,12 +2217,12 @@ test "write json then parse it" {
var tree = try parser.parse(fixed_buffer_stream.getWritten());
defer tree.deinit();
- testing.expect(tree.root.Object.get("f").?.value.Bool == false);
- testing.expect(tree.root.Object.get("t").?.value.Bool == true);
- testing.expect(tree.root.Object.get("int").?.value.Integer == 1234);
- testing.expect(tree.root.Object.get("array").?.value.Array.items[0].Null == {});
- testing.expect(tree.root.Object.get("array").?.value.Array.items[1].Float == 12.34);
- testing.expect(mem.eql(u8, tree.root.Object.get("str").?.value.String, "hello"));
+ testing.expect(tree.root.Object.get("f").?.Bool == false);
+ testing.expect(tree.root.Object.get("t").?.Bool == true);
+ testing.expect(tree.root.Object.get("int").?.Integer == 1234);
+ testing.expect(tree.root.Object.get("array").?.Array.items[0].Null == {});
+ testing.expect(tree.root.Object.get("array").?.Array.items[1].Float == 12.34);
+ testing.expect(mem.eql(u8, tree.root.Object.get("str").?.String, "hello"));
}
fn test_parse(arena_allocator: *std.mem.Allocator, json_str: []const u8) !Value {
@@ -2245,7 +2245,7 @@ test "integer after float has proper type" {
\\ "ints": [1, 2, 3]
\\}
);
- std.testing.expect(json.Object.getValue("ints").?.Array.items[0] == .Integer);
+ std.testing.expect(json.Object.get("ints").?.Array.items[0] == .Integer);
}
test "escaped characters" {
@@ -2271,16 +2271,16 @@ test "escaped characters" {
const obj = (try test_parse(&arena_allocator.allocator, input)).Object;
- testing.expectEqualSlices(u8, obj.get("backslash").?.value.String, "\\");
- testing.expectEqualSlices(u8, obj.get("forwardslash").?.value.String, "/");
- testing.expectEqualSlices(u8, obj.get("newline").?.value.String, "\n");
- testing.expectEqualSlices(u8, obj.get("carriagereturn").?.value.String, "\r");
- testing.expectEqualSlices(u8, obj.get("tab").?.value.String, "\t");
- testing.expectEqualSlices(u8, obj.get("formfeed").?.value.String, "\x0C");
- testing.expectEqualSlices(u8, obj.get("backspace").?.value.String, "\x08");
- testing.expectEqualSlices(u8, obj.get("doublequote").?.value.String, "\"");
- testing.expectEqualSlices(u8, obj.get("unicode").?.value.String, "Ä…");
- testing.expectEqualSlices(u8, obj.get("surrogatepair").?.value.String, "😂");
+ testing.expectEqualSlices(u8, obj.get("backslash").?.String, "\\");
+ testing.expectEqualSlices(u8, obj.get("forwardslash").?.String, "/");
+ testing.expectEqualSlices(u8, obj.get("newline").?.String, "\n");
+ testing.expectEqualSlices(u8, obj.get("carriagereturn").?.String, "\r");
+ testing.expectEqualSlices(u8, obj.get("tab").?.String, "\t");
+ testing.expectEqualSlices(u8, obj.get("formfeed").?.String, "\x0C");
+ testing.expectEqualSlices(u8, obj.get("backspace").?.String, "\x08");
+ testing.expectEqualSlices(u8, obj.get("doublequote").?.String, "\"");
+ testing.expectEqualSlices(u8, obj.get("unicode").?.String, "Ä…");
+ testing.expectEqualSlices(u8, obj.get("surrogatepair").?.String, "😂");
}
test "string copy option" {
@@ -2306,11 +2306,11 @@ test "string copy option" {
const obj_copy = tree_copy.root.Object;
for ([_][]const u8{ "noescape", "simple", "unicode", "surrogatepair" }) |field_name| {
- testing.expectEqualSlices(u8, obj_nocopy.getValue(field_name).?.String, obj_copy.getValue(field_name).?.String);
+ testing.expectEqualSlices(u8, obj_nocopy.get(field_name).?.String, obj_copy.get(field_name).?.String);
}
- const nocopy_addr = &obj_nocopy.getValue("noescape").?.String[0];
- const copy_addr = &obj_copy.getValue("noescape").?.String[0];
+ const nocopy_addr = &obj_nocopy.get("noescape").?.String[0];
+ const copy_addr = &obj_copy.get("noescape").?.String[0];
var found_nocopy = false;
for (input) |_, index| {
@@ -2338,7 +2338,7 @@ pub const StringifyOptions = struct {
pub fn outputIndent(
whitespace: @This(),
- out_stream: var,
+ out_stream: anytype,
) @TypeOf(out_stream).Error!void {
var char: u8 = undefined;
var n_chars: usize = undefined;
@@ -2380,7 +2380,7 @@ pub const StringifyOptions = struct {
fn outputUnicodeEscape(
codepoint: u21,
- out_stream: var,
+ out_stream: anytype,
) !void {
if (codepoint <= 0xFFFF) {
// If the character is in the Basic Multilingual Plane (U+0000 through U+FFFF),
@@ -2402,9 +2402,9 @@ fn outputUnicodeEscape(
}
pub fn stringify(
- value: var,
+ value: anytype,
options: StringifyOptions,
- out_stream: var,
+ out_stream: anytype,
) @TypeOf(out_stream).Error!void {
const T = @TypeOf(value);
switch (@typeInfo(T)) {
@@ -2576,15 +2576,15 @@ pub fn stringify(
},
.Array => return stringify(&value, options, out_stream),
.Vector => |info| {
- const array: [info.len]info.child = value;
- return stringify(&array, options, out_stream);
+ const array: [info.len]info.child = value;
+ return stringify(&array, options, out_stream);
},
else => @compileError("Unable to stringify type '" ++ @typeName(T) ++ "'"),
}
unreachable;
}
-fn teststringify(expected: []const u8, value: var, options: StringifyOptions) !void {
+fn teststringify(expected: []const u8, value: anytype, options: StringifyOptions) !void {
const ValidationOutStream = struct {
const Self = @This();
pub const OutStream = std.io.OutStream(*Self, Error, write);
@@ -2758,7 +2758,7 @@ test "stringify struct with custom stringifier" {
pub fn jsonStringify(
value: Self,
options: StringifyOptions,
- out_stream: var,
+ out_stream: anytype,
) !void {
try out_stream.writeAll("[\"something special\",");
try stringify(42, options, out_stream);
@@ -2770,4 +2770,3 @@ test "stringify struct with custom stringifier" {
test "stringify vector" {
try teststringify("[1,1]", @splat(2, @as(u32, 1)), StringifyOptions{});
}
-
diff --git a/lib/std/json/write_stream.zig b/lib/std/json/write_stream.zig
index dcfbf04bc1..778173cc24 100644
--- a/lib/std/json/write_stream.zig
+++ b/lib/std/json/write_stream.zig
@@ -152,7 +152,7 @@ pub fn WriteStream(comptime OutStream: type, comptime max_depth: usize) type {
self: *Self,
/// An integer, float, or `std.math.BigInt`. Emitted as a bare number if it fits losslessly
/// in a IEEE 754 double float, otherwise emitted as a string to the full precision.
- value: var,
+ value: anytype,
) !void {
assert(self.state[self.state_index] == State.Value);
switch (@typeInfo(@TypeOf(value))) {
@@ -215,7 +215,7 @@ pub fn WriteStream(comptime OutStream: type, comptime max_depth: usize) type {
self.state_index -= 1;
}
- fn stringify(self: *Self, value: var) !void {
+ fn stringify(self: *Self, value: anytype) !void {
try std.json.stringify(value, std.json.StringifyOptions{
.whitespace = self.whitespace,
}, self.stream);
@@ -224,7 +224,7 @@ pub fn WriteStream(comptime OutStream: type, comptime max_depth: usize) type {
}
pub fn writeStream(
- out_stream: var,
+ out_stream: anytype,
comptime max_depth: usize,
) WriteStream(@TypeOf(out_stream), max_depth) {
return WriteStream(@TypeOf(out_stream), max_depth).init(out_stream);
diff --git a/lib/std/log.zig b/lib/std/log.zig
new file mode 100644
index 0000000000..3fb75b7e37
--- /dev/null
+++ b/lib/std/log.zig
@@ -0,0 +1,202 @@
+const std = @import("std.zig");
+const builtin = std.builtin;
+const root = @import("root");
+
+//! std.log is standardized interface for logging which allows for the logging
+//! of programs and libraries using this interface to be formatted and filtered
+//! by the implementer of the root.log function.
+//!
+//! The scope parameter should be used to give context to the logging. For
+//! example, a library called 'libfoo' might use .libfoo as its scope.
+//!
+//! An example root.log might look something like this:
+//!
+//! ```
+//! const std = @import("std");
+//!
+//! // Set the log level to warning
+//! pub const log_level: std.log.Level = .warn;
+//!
+//! // Define root.log to override the std implementation
+//! pub fn log(
+//! comptime level: std.log.Level,
+//! comptime scope: @TypeOf(.EnumLiteral),
+//! comptime format: []const u8,
+//! args: anytype,
+//! ) void {
+//! // Ignore all non-critical logging from sources other than
+//! // .my_project and .nice_library
+//! const scope_prefix = "(" ++ switch (scope) {
+//! .my_project, .nice_library => @tagName(scope),
+//! else => if (@enumToInt(level) <= @enumToInt(std.log.Level.crit))
+//! @tagName(scope)
+//! else
+//! return,
+//! } ++ "): ";
+//!
+//! const prefix = "[" ++ @tagName(level) ++ "] " ++ scope_prefix;
+//!
+//! // Print the message to stderr, silently ignoring any errors
+//! const held = std.debug.getStderrMutex().acquire();
+//! defer held.release();
+//! const stderr = std.debug.getStderrStream();
+//! nosuspend stderr.print(prefix ++ format, args) catch return;
+//! }
+//!
+//! pub fn main() void {
+//! // Won't be printed as log_level is .warn
+//! std.log.info(.my_project, "Starting up.\n", .{});
+//! std.log.err(.nice_library, "Something went very wrong, sorry.\n", .{});
+//! // Won't be printed as it gets filtered out by our log function
+//! std.log.err(.lib_that_logs_too_much, "Added 1 + 1\n", .{});
+//! }
+//! ```
+//! Which produces the following output:
+//! ```
+//! [err] (nice_library): Something went very wrong, sorry.
+//! ```
+
+pub const Level = enum {
+ /// Emergency: a condition that cannot be handled, usually followed by a
+ /// panic.
+ emerg,
+ /// Alert: a condition that should be corrected immediately (e.g. database
+ /// corruption).
+ alert,
+ /// Critical: A bug has been detected or something has gone wrong and it
+ /// will have an effect on the operation of the program.
+ crit,
+ /// Error: A bug has been detected or something has gone wrong but it is
+ /// recoverable.
+ err,
+ /// Warning: it is uncertain if something has gone wrong or not, but the
+ /// circumstances would be worth investigating.
+ warn,
+ /// Notice: non-error but significant conditions.
+ notice,
+ /// Informational: general messages about the state of the program.
+ info,
+ /// Debug: messages only useful for debugging.
+ debug,
+};
+
+/// The default log level is based on build mode. Note that in ReleaseSmall
+/// builds the default level is emerg but no messages will be stored/logged
+/// by the default logger to save space.
+pub const default_level: Level = switch (builtin.mode) {
+ .Debug => .debug,
+ .ReleaseSafe => .notice,
+ .ReleaseFast => .err,
+ .ReleaseSmall => .emerg,
+};
+
+/// The current log level. This is set to root.log_level if present, otherwise
+/// log.default_level.
+pub const level: Level = if (@hasDecl(root, "log_level"))
+ root.log_level
+else
+ default_level;
+
+fn log(
+ comptime message_level: Level,
+ comptime scope: @Type(.EnumLiteral),
+ comptime format: []const u8,
+ args: anytype,
+) void {
+ if (@enumToInt(message_level) <= @enumToInt(level)) {
+ if (@hasDecl(root, "log")) {
+ root.log(message_level, scope, format, args);
+ } else if (builtin.mode != .ReleaseSmall) {
+ const held = std.debug.getStderrMutex().acquire();
+ defer held.release();
+ const stderr = std.io.getStdErr().writer();
+ nosuspend stderr.print(format, args) catch return;
+ }
+ }
+}
+
+/// Log an emergency message to stderr. This log level is intended to be used
+/// for conditions that cannot be handled and is usually followed by a panic.
+pub fn emerg(
+ comptime scope: @Type(.EnumLiteral),
+ comptime format: []const u8,
+ args: anytype,
+) void {
+ @setCold(true);
+ log(.emerg, scope, format, args);
+}
+
+/// Log an alert message to stderr. This log level is intended to be used for
+/// conditions that should be corrected immediately (e.g. database corruption).
+pub fn alert(
+ comptime scope: @Type(.EnumLiteral),
+ comptime format: []const u8,
+ args: anytype,
+) void {
+ @setCold(true);
+ log(.alert, scope, format, args);
+}
+
+/// Log a critical message to stderr. This log level is intended to be used
+/// when a bug has been detected or something has gone wrong and it will have
+/// an effect on the operation of the program.
+pub fn crit(
+ comptime scope: @Type(.EnumLiteral),
+ comptime format: []const u8,
+ args: anytype,
+) void {
+ @setCold(true);
+ log(.crit, scope, format, args);
+}
+
+/// Log an error message to stderr. This log level is intended to be used when
+/// a bug has been detected or something has gone wrong but it is recoverable.
+pub fn err(
+ comptime scope: @Type(.EnumLiteral),
+ comptime format: []const u8,
+ args: anytype,
+) void {
+ @setCold(true);
+ log(.err, scope, format, args);
+}
+
+/// Log a warning message to stderr. This log level is intended to be used if
+/// it is uncertain whether something has gone wrong or not, but the
+/// circumstances would be worth investigating.
+pub fn warn(
+ comptime scope: @Type(.EnumLiteral),
+ comptime format: []const u8,
+ args: anytype,
+) void {
+ log(.warn, scope, format, args);
+}
+
+/// Log a notice message to stderr. This log level is intended to be used for
+/// non-error but significant conditions.
+pub fn notice(
+ comptime scope: @Type(.EnumLiteral),
+ comptime format: []const u8,
+ args: anytype,
+) void {
+ log(.notice, scope, format, args);
+}
+
+/// Log an info message to stderr. This log level is intended to be used for
+/// general messages about the state of the program.
+pub fn info(
+ comptime scope: @Type(.EnumLiteral),
+ comptime format: []const u8,
+ args: anytype,
+) void {
+ log(.info, scope, format, args);
+}
+
+/// Log a debug message to stderr. This log level is intended to be used for
+/// messages which are only useful for debugging.
+pub fn debug(
+ comptime scope: @Type(.EnumLiteral),
+ comptime format: []const u8,
+ args: anytype,
+) void {
+ log(.debug, scope, format, args);
+}
diff --git a/lib/std/math.zig b/lib/std/math.zig
index 5cf6d40d8a..111a618cef 100644
--- a/lib/std/math.zig
+++ b/lib/std/math.zig
@@ -104,7 +104,7 @@ pub fn approxEq(comptime T: type, x: T, y: T, epsilon: T) bool {
}
// TODO: Hide the following in an internal module.
-pub fn forceEval(value: var) void {
+pub fn forceEval(value: anytype) void {
const T = @TypeOf(value);
switch (T) {
f16 => {
@@ -122,6 +122,11 @@ pub fn forceEval(value: var) void {
const p = @ptrCast(*volatile f64, &x);
p.* = x;
},
+ f128 => {
+ var x: f128 = undefined;
+ const p = @ptrCast(*volatile f128, &x);
+ p.* = x;
+ },
else => {
@compileError("forceEval not implemented for " ++ @typeName(T));
},
@@ -254,7 +259,7 @@ pub fn Min(comptime A: type, comptime B: type) type {
/// Returns the smaller number. When one of the parameter's type's full range fits in the other,
/// the return type is the smaller type.
-pub fn min(x: var, y: var) Min(@TypeOf(x), @TypeOf(y)) {
+pub fn min(x: anytype, y: anytype) Min(@TypeOf(x), @TypeOf(y)) {
const Result = Min(@TypeOf(x), @TypeOf(y));
if (x < y) {
// TODO Zig should allow this as an implicit cast because x is immutable and in this
@@ -305,7 +310,7 @@ test "math.min" {
}
}
-pub fn max(x: var, y: var) @TypeOf(x, y) {
+pub fn max(x: anytype, y: anytype) @TypeOf(x, y) {
return if (x > y) x else y;
}
@@ -313,7 +318,7 @@ test "math.max" {
testing.expect(max(@as(i32, -1), @as(i32, 2)) == 2);
}
-pub fn clamp(val: var, lower: var, upper: var) @TypeOf(val, lower, upper) {
+pub fn clamp(val: anytype, lower: anytype, upper: anytype) @TypeOf(val, lower, upper) {
assert(lower <= upper);
return max(lower, min(val, upper));
}
@@ -349,7 +354,7 @@ pub fn sub(comptime T: type, a: T, b: T) (error{Overflow}!T) {
return if (@subWithOverflow(T, a, b, &answer)) error.Overflow else answer;
}
-pub fn negate(x: var) !@TypeOf(x) {
+pub fn negate(x: anytype) !@TypeOf(x) {
return sub(@TypeOf(x), 0, x);
}
@@ -360,7 +365,7 @@ pub fn shlExact(comptime T: type, a: T, shift_amt: Log2Int(T)) !T {
/// Shifts left. Overflowed bits are truncated.
/// A negative shift amount results in a right shift.
-pub fn shl(comptime T: type, a: T, shift_amt: var) T {
+pub fn shl(comptime T: type, a: T, shift_amt: anytype) T {
const abs_shift_amt = absCast(shift_amt);
const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt);
@@ -386,7 +391,7 @@ test "math.shl" {
/// Shifts right. Overflowed bits are truncated.
/// A negative shift amount results in a left shift.
-pub fn shr(comptime T: type, a: T, shift_amt: var) T {
+pub fn shr(comptime T: type, a: T, shift_amt: anytype) T {
const abs_shift_amt = absCast(shift_amt);
const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt);
@@ -414,7 +419,7 @@ test "math.shr" {
/// Rotates right. Only unsigned values can be rotated.
/// Negative shift values results in shift modulo the bit count.
-pub fn rotr(comptime T: type, x: T, r: var) T {
+pub fn rotr(comptime T: type, x: T, r: anytype) T {
if (T.is_signed) {
@compileError("cannot rotate signed integer");
} else {
@@ -433,7 +438,7 @@ test "math.rotr" {
/// Rotates left. Only unsigned values can be rotated.
/// Negative shift values results in shift modulo the bit count.
-pub fn rotl(comptime T: type, x: T, r: var) T {
+pub fn rotl(comptime T: type, x: T, r: anytype) T {
if (T.is_signed) {
@compileError("cannot rotate signed integer");
} else {
@@ -536,7 +541,7 @@ fn testOverflow() void {
testing.expect((shlExact(i32, 0b11, 4) catch unreachable) == 0b110000);
}
-pub fn absInt(x: var) !@TypeOf(x) {
+pub fn absInt(x: anytype) !@TypeOf(x) {
const T = @TypeOf(x);
comptime assert(@typeInfo(T) == .Int); // must pass an integer to absInt
comptime assert(T.is_signed); // must pass a signed integer to absInt
@@ -684,7 +689,7 @@ fn testRem() void {
/// Returns the absolute value of the integer parameter.
/// Result is an unsigned integer.
-pub fn absCast(x: var) switch (@typeInfo(@TypeOf(x))) {
+pub fn absCast(x: anytype) switch (@typeInfo(@TypeOf(x))) {
.ComptimeInt => comptime_int,
.Int => |intInfo| std.meta.Int(false, intInfo.bits),
else => @compileError("absCast only accepts integers"),
@@ -719,7 +724,7 @@ test "math.absCast" {
/// Returns the negation of the integer parameter.
/// Result is a signed integer.
-pub fn negateCast(x: var) !std.meta.Int(true, @TypeOf(x).bit_count) {
+pub fn negateCast(x: anytype) !std.meta.Int(true, @TypeOf(x).bit_count) {
if (@TypeOf(x).is_signed) return negate(x);
const int = std.meta.Int(true, @TypeOf(x).bit_count);
@@ -742,7 +747,7 @@ test "math.negateCast" {
/// Cast an integer to a different integer type. If the value doesn't fit,
/// return an error.
-pub fn cast(comptime T: type, x: var) (error{Overflow}!T) {
+pub fn cast(comptime T: type, x: anytype) (error{Overflow}!T) {
comptime assert(@typeInfo(T) == .Int); // must pass an integer
comptime assert(@typeInfo(@TypeOf(x)) == .Int); // must pass an integer
if (maxInt(@TypeOf(x)) > maxInt(T) and x > maxInt(T)) {
@@ -767,7 +772,7 @@ test "math.cast" {
pub const AlignCastError = error{UnalignedMemory};
/// Align cast a pointer but return an error if it's the wrong alignment
-pub fn alignCast(comptime alignment: u29, ptr: var) AlignCastError!@TypeOf(@alignCast(alignment, ptr)) {
+pub fn alignCast(comptime alignment: u29, ptr: anytype) AlignCastError!@TypeOf(@alignCast(alignment, ptr)) {
const addr = @ptrToInt(ptr);
if (addr % alignment != 0) {
return error.UnalignedMemory;
@@ -775,7 +780,7 @@ pub fn alignCast(comptime alignment: u29, ptr: var) AlignCastError!@TypeOf(@alig
return @alignCast(alignment, ptr);
}
-pub fn isPowerOfTwo(v: var) bool {
+pub fn isPowerOfTwo(v: anytype) bool {
assert(v != 0);
return (v & (v - 1)) == 0;
}
@@ -892,7 +897,7 @@ test "std.math.log2_int_ceil" {
testing.expect(log2_int_ceil(u32, 10) == 4);
}
-pub fn lossyCast(comptime T: type, value: var) T {
+pub fn lossyCast(comptime T: type, value: anytype) T {
switch (@typeInfo(@TypeOf(value))) {
.Int => return @intToFloat(T, value),
.Float => return @floatCast(T, value),
@@ -1026,7 +1031,7 @@ pub const Order = enum {
};
/// Given two numbers, this function returns the order they are with respect to each other.
-pub fn order(a: var, b: var) Order {
+pub fn order(a: anytype, b: anytype) Order {
if (a == b) {
return .eq;
} else if (a < b) {
@@ -1042,19 +1047,14 @@ pub fn order(a: var, b: var) Order {
pub const CompareOperator = enum {
/// Less than (`<`)
lt,
-
/// Less than or equal (`<=`)
lte,
-
/// Equal (`==`)
eq,
-
/// Greater than or equal (`>=`)
gte,
-
/// Greater than (`>`)
gt,
-
/// Not equal (`!=`)
neq,
};
@@ -1062,7 +1062,7 @@ pub const CompareOperator = enum {
/// This function does the same thing as comparison operators, however the
/// operator is a runtime-known enum value. Works on any operands that
/// support comparison operators.
-pub fn compare(a: var, op: CompareOperator, b: var) bool {
+pub fn compare(a: anytype, op: CompareOperator, b: anytype) bool {
return switch (op) {
.lt => a < b,
.lte => a <= b,
diff --git a/lib/std/math/acos.zig b/lib/std/math/acos.zig
index aec0d4706a..cdd86601fd 100644
--- a/lib/std/math/acos.zig
+++ b/lib/std/math/acos.zig
@@ -12,7 +12,7 @@ const expect = std.testing.expect;
///
/// Special cases:
/// - acos(x) = nan if x < -1 or x > 1
-pub fn acos(x: var) @TypeOf(x) {
+pub fn acos(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => acos32(x),
diff --git a/lib/std/math/acosh.zig b/lib/std/math/acosh.zig
index 0f99335058..9a594f9cc4 100644
--- a/lib/std/math/acosh.zig
+++ b/lib/std/math/acosh.zig
@@ -14,7 +14,7 @@ const expect = std.testing.expect;
/// Special cases:
/// - acosh(x) = snan if x < 1
/// - acosh(nan) = nan
-pub fn acosh(x: var) @TypeOf(x) {
+pub fn acosh(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => acosh32(x),
diff --git a/lib/std/math/asin.zig b/lib/std/math/asin.zig
index db57e2088f..4cff69fc1b 100644
--- a/lib/std/math/asin.zig
+++ b/lib/std/math/asin.zig
@@ -13,7 +13,7 @@ const expect = std.testing.expect;
/// Special Cases:
/// - asin(+-0) = +-0
/// - asin(x) = nan if x < -1 or x > 1
-pub fn asin(x: var) @TypeOf(x) {
+pub fn asin(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => asin32(x),
diff --git a/lib/std/math/asinh.zig b/lib/std/math/asinh.zig
index ab1b650139..940b953d06 100644
--- a/lib/std/math/asinh.zig
+++ b/lib/std/math/asinh.zig
@@ -15,7 +15,7 @@ const maxInt = std.math.maxInt;
/// - asinh(+-0) = +-0
/// - asinh(+-inf) = +-inf
/// - asinh(nan) = nan
-pub fn asinh(x: var) @TypeOf(x) {
+pub fn asinh(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => asinh32(x),
diff --git a/lib/std/math/atan.zig b/lib/std/math/atan.zig
index eb9154b5fe..9342b6ed59 100644
--- a/lib/std/math/atan.zig
+++ b/lib/std/math/atan.zig
@@ -13,7 +13,7 @@ const expect = std.testing.expect;
/// Special Cases:
/// - atan(+-0) = +-0
/// - atan(+-inf) = +-pi/2
-pub fn atan(x: var) @TypeOf(x) {
+pub fn atan(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => atan32(x),
diff --git a/lib/std/math/atanh.zig b/lib/std/math/atanh.zig
index e58a10fff5..de742bd4cd 100644
--- a/lib/std/math/atanh.zig
+++ b/lib/std/math/atanh.zig
@@ -15,7 +15,7 @@ const maxInt = std.math.maxInt;
/// - atanh(+-1) = +-inf with signal
/// - atanh(x) = nan if |x| > 1 with signal
/// - atanh(nan) = nan
-pub fn atanh(x: var) @TypeOf(x) {
+pub fn atanh(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => atanh_32(x),
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index 9379f881db..b6d7731f1a 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -12,7 +12,7 @@ const assert = std.debug.assert;
/// Returns the number of limbs needed to store `scalar`, which must be a
/// primitive integer value.
-pub fn calcLimbLen(scalar: var) usize {
+pub fn calcLimbLen(scalar: anytype) usize {
const T = @TypeOf(scalar);
switch (@typeInfo(T)) {
.Int => |info| {
@@ -110,7 +110,7 @@ pub const Mutable = struct {
/// `value` is a primitive integer type.
/// Asserts the value fits within the provided `limbs_buffer`.
/// Note: `calcLimbLen` can be used to figure out how big an array to allocate for `limbs_buffer`.
- pub fn init(limbs_buffer: []Limb, value: var) Mutable {
+ pub fn init(limbs_buffer: []Limb, value: anytype) Mutable {
limbs_buffer[0] = 0;
var self: Mutable = .{
.limbs = limbs_buffer,
@@ -169,7 +169,7 @@ pub const Mutable = struct {
/// Asserts the value fits within the limbs buffer.
/// Note: `calcLimbLen` can be used to figure out how big the limbs buffer
/// needs to be to store a specific value.
- pub fn set(self: *Mutable, value: var) void {
+ pub fn set(self: *Mutable, value: anytype) void {
const T = @TypeOf(value);
switch (@typeInfo(T)) {
@@ -281,7 +281,7 @@ pub const Mutable = struct {
///
/// Asserts the result fits in `r`. An upper bound on the number of limbs needed by
/// r is `math.max(a.limbs.len, calcLimbLen(scalar)) + 1`.
- pub fn addScalar(r: *Mutable, a: Const, scalar: var) void {
+ pub fn addScalar(r: *Mutable, a: Const, scalar: anytype) void {
var limbs: [calcLimbLen(scalar)]Limb = undefined;
const operand = init(&limbs, scalar).toConst();
return add(r, a, operand);
@@ -1058,7 +1058,7 @@ pub const Const = struct {
self: Const,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
- out_stream: var,
+ out_stream: anytype,
) !void {
comptime var radix = 10;
comptime var uppercase = false;
@@ -1105,7 +1105,7 @@ pub const Const = struct {
assert(base <= 16);
if (self.eqZero()) {
- return mem.dupe(allocator, u8, "0");
+ return allocator.dupe(u8, "0");
}
const string = try allocator.alloc(u8, self.sizeInBaseUpperBound(base));
errdefer allocator.free(string);
@@ -1261,7 +1261,7 @@ pub const Const = struct {
}
/// Same as `order` but the right-hand operand is a primitive integer.
- pub fn orderAgainstScalar(lhs: Const, scalar: var) math.Order {
+ pub fn orderAgainstScalar(lhs: Const, scalar: anytype) math.Order {
var limbs: [calcLimbLen(scalar)]Limb = undefined;
const rhs = Mutable.init(&limbs, scalar);
return order(lhs, rhs.toConst());
@@ -1333,7 +1333,7 @@ pub const Managed = struct {
/// Creates a new `Managed` with value `value`.
///
/// This is identical to an `init`, followed by a `set`.
- pub fn initSet(allocator: *Allocator, value: var) !Managed {
+ pub fn initSet(allocator: *Allocator, value: anytype) !Managed {
var s = try Managed.init(allocator);
try s.set(value);
return s;
@@ -1496,7 +1496,7 @@ pub const Managed = struct {
}
/// Sets an Managed to value. Value must be an primitive integer type.
- pub fn set(self: *Managed, value: var) Allocator.Error!void {
+ pub fn set(self: *Managed, value: anytype) Allocator.Error!void {
try self.ensureCapacity(calcLimbLen(value));
var m = self.toMutable();
m.set(value);
@@ -1549,7 +1549,7 @@ pub const Managed = struct {
self: Managed,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
- out_stream: var,
+ out_stream: anytype,
) !void {
return self.toConst().format(fmt, options, out_stream);
}
@@ -1607,7 +1607,7 @@ pub const Managed = struct {
/// scalar is a primitive integer type.
///
/// Returns an error if memory could not be allocated.
- pub fn addScalar(r: *Managed, a: Const, scalar: var) Allocator.Error!void {
+ pub fn addScalar(r: *Managed, a: Const, scalar: anytype) Allocator.Error!void {
try r.ensureCapacity(math.max(a.limbs.len, calcLimbLen(scalar)) + 1);
var m = r.toMutable();
m.addScalar(a, scalar);
diff --git a/lib/std/math/big/rational.zig b/lib/std/math/big/rational.zig
index 3624a16139..6f62a462b8 100644
--- a/lib/std/math/big/rational.zig
+++ b/lib/std/math/big/rational.zig
@@ -43,7 +43,7 @@ pub const Rational = struct {
}
/// Set a Rational from a primitive integer type.
- pub fn setInt(self: *Rational, a: var) !void {
+ pub fn setInt(self: *Rational, a: anytype) !void {
try self.p.set(a);
try self.q.set(1);
}
@@ -280,7 +280,7 @@ pub const Rational = struct {
}
/// Set a rational from an integer ratio.
- pub fn setRatio(self: *Rational, p: var, q: var) !void {
+ pub fn setRatio(self: *Rational, p: anytype, q: anytype) !void {
try self.p.set(p);
try self.q.set(q);
diff --git a/lib/std/math/cbrt.zig b/lib/std/math/cbrt.zig
index 2b219d5368..42163b96dc 100644
--- a/lib/std/math/cbrt.zig
+++ b/lib/std/math/cbrt.zig
@@ -14,7 +14,7 @@ const expect = std.testing.expect;
/// - cbrt(+-0) = +-0
/// - cbrt(+-inf) = +-inf
/// - cbrt(nan) = nan
-pub fn cbrt(x: var) @TypeOf(x) {
+pub fn cbrt(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => cbrt32(x),
diff --git a/lib/std/math/ceil.zig b/lib/std/math/ceil.zig
index b94e13a176..39de46f361 100644
--- a/lib/std/math/ceil.zig
+++ b/lib/std/math/ceil.zig
@@ -15,11 +15,12 @@ const expect = std.testing.expect;
/// - ceil(+-0) = +-0
/// - ceil(+-inf) = +-inf
/// - ceil(nan) = nan
-pub fn ceil(x: var) @TypeOf(x) {
+pub fn ceil(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => ceil32(x),
f64 => ceil64(x),
+ f128 => ceil128(x),
else => @compileError("ceil not implemented for " ++ @typeName(T)),
};
}
@@ -86,9 +87,37 @@ fn ceil64(x: f64) f64 {
}
}
+fn ceil128(x: f128) f128 {
+ const u = @bitCast(u128, x);
+ const e = (u >> 112) & 0x7FFF;
+ var y: f128 = undefined;
+
+ if (e >= 0x3FFF + 112 or x == 0) return x;
+
+ if (u >> 127 != 0) {
+ y = x - math.f128_toint + math.f128_toint - x;
+ } else {
+ y = x + math.f128_toint - math.f128_toint - x;
+ }
+
+ if (e <= 0x3FFF - 1) {
+ math.forceEval(y);
+ if (u >> 127 != 0) {
+ return -0.0;
+ } else {
+ return 1.0;
+ }
+ } else if (y < 0) {
+ return x + y + 1;
+ } else {
+ return x + y;
+ }
+}
+
test "math.ceil" {
expect(ceil(@as(f32, 0.0)) == ceil32(0.0));
expect(ceil(@as(f64, 0.0)) == ceil64(0.0));
+ expect(ceil(@as(f128, 0.0)) == ceil128(0.0));
}
test "math.ceil32" {
@@ -103,6 +132,12 @@ test "math.ceil64" {
expect(ceil64(0.2) == 1.0);
}
+test "math.ceil128" {
+ expect(ceil128(1.3) == 2.0);
+ expect(ceil128(-1.3) == -1.0);
+ expect(ceil128(0.2) == 1.0);
+}
+
test "math.ceil32.special" {
expect(ceil32(0.0) == 0.0);
expect(ceil32(-0.0) == -0.0);
@@ -118,3 +153,11 @@ test "math.ceil64.special" {
expect(math.isNegativeInf(ceil64(-math.inf(f64))));
expect(math.isNan(ceil64(math.nan(f64))));
}
+
+test "math.ceil128.special" {
+ expect(ceil128(0.0) == 0.0);
+ expect(ceil128(-0.0) == -0.0);
+ expect(math.isPositiveInf(ceil128(math.inf(f128))));
+ expect(math.isNegativeInf(ceil128(-math.inf(f128))));
+ expect(math.isNan(ceil128(math.nan(f128))));
+}
diff --git a/lib/std/math/complex/abs.zig b/lib/std/math/complex/abs.zig
index 75b967f3d2..db31aef42a 100644
--- a/lib/std/math/complex/abs.zig
+++ b/lib/std/math/complex/abs.zig
@@ -5,7 +5,7 @@ const cmath = math.complex;
const Complex = cmath.Complex;
/// Returns the absolute value (modulus) of z.
-pub fn abs(z: var) @TypeOf(z.re) {
+pub fn abs(z: anytype) @TypeOf(z.re) {
const T = @TypeOf(z.re);
return math.hypot(T, z.re, z.im);
}
diff --git a/lib/std/math/complex/acos.zig b/lib/std/math/complex/acos.zig
index 24a645375c..072fd77f08 100644
--- a/lib/std/math/complex/acos.zig
+++ b/lib/std/math/complex/acos.zig
@@ -5,7 +5,7 @@ const cmath = math.complex;
const Complex = cmath.Complex;
/// Returns the arc-cosine of z.
-pub fn acos(z: var) Complex(@TypeOf(z.re)) {
+pub fn acos(z: anytype) Complex(@TypeOf(z.re)) {
const T = @TypeOf(z.re);
const q = cmath.asin(z);
return Complex(T).new(@as(T, math.pi) / 2 - q.re, -q.im);
diff --git a/lib/std/math/complex/acosh.zig b/lib/std/math/complex/acosh.zig
index 996334034a..59117a8b27 100644
--- a/lib/std/math/complex/acosh.zig
+++ b/lib/std/math/complex/acosh.zig
@@ -5,7 +5,7 @@ const cmath = math.complex;
const Complex = cmath.Complex;
/// Returns the hyperbolic arc-cosine of z.
-pub fn acosh(z: var) Complex(@TypeOf(z.re)) {
+pub fn acosh(z: anytype) Complex(@TypeOf(z.re)) {
const T = @TypeOf(z.re);
const q = cmath.acos(z);
return Complex(T).new(-q.im, q.re);
diff --git a/lib/std/math/complex/arg.zig b/lib/std/math/complex/arg.zig
index f690e92143..6cf959a081 100644
--- a/lib/std/math/complex/arg.zig
+++ b/lib/std/math/complex/arg.zig
@@ -5,7 +5,7 @@ const cmath = math.complex;
const Complex = cmath.Complex;
/// Returns the angular component (in radians) of z.
-pub fn arg(z: var) @TypeOf(z.re) {
+pub fn arg(z: anytype) @TypeOf(z.re) {
const T = @TypeOf(z.re);
return math.atan2(T, z.im, z.re);
}
diff --git a/lib/std/math/complex/asin.zig b/lib/std/math/complex/asin.zig
index 01fa33156a..9f7cd396aa 100644
--- a/lib/std/math/complex/asin.zig
+++ b/lib/std/math/complex/asin.zig
@@ -5,7 +5,7 @@ const cmath = math.complex;
const Complex = cmath.Complex;
// Returns the arc-sine of z.
-pub fn asin(z: var) Complex(@TypeOf(z.re)) {
+pub fn asin(z: anytype) Complex(@TypeOf(z.re)) {
const T = @TypeOf(z.re);
const x = z.re;
const y = z.im;
diff --git a/lib/std/math/complex/asinh.zig b/lib/std/math/complex/asinh.zig
index 47d8244adb..0c3c2bd115 100644
--- a/lib/std/math/complex/asinh.zig
+++ b/lib/std/math/complex/asinh.zig
@@ -5,7 +5,7 @@ const cmath = math.complex;
const Complex = cmath.Complex;
/// Returns the hyperbolic arc-sine of z.
-pub fn asinh(z: var) Complex(@TypeOf(z.re)) {
+pub fn asinh(z: anytype) Complex(@TypeOf(z.re)) {
const T = @TypeOf(z.re);
const q = Complex(T).new(-z.im, z.re);
const r = cmath.asin(q);
diff --git a/lib/std/math/complex/atan.zig b/lib/std/math/complex/atan.zig
index 5ba6f7b0d2..98bde3e125 100644
--- a/lib/std/math/complex/atan.zig
+++ b/lib/std/math/complex/atan.zig
@@ -12,7 +12,7 @@ const cmath = math.complex;
const Complex = cmath.Complex;
/// Returns the arc-tangent of z.
-pub fn atan(z: var) @TypeOf(z) {
+pub fn atan(z: anytype) @TypeOf(z) {
const T = @TypeOf(z.re);
return switch (T) {
f32 => atan32(z),
diff --git a/lib/std/math/complex/atanh.zig b/lib/std/math/complex/atanh.zig
index 8b70306224..a07c2969e4 100644
--- a/lib/std/math/complex/atanh.zig
+++ b/lib/std/math/complex/atanh.zig
@@ -5,7 +5,7 @@ const cmath = math.complex;
const Complex = cmath.Complex;
/// Returns the hyperbolic arc-tangent of z.
-pub fn atanh(z: var) Complex(@TypeOf(z.re)) {
+pub fn atanh(z: anytype) Complex(@TypeOf(z.re)) {
const T = @TypeOf(z.re);
const q = Complex(T).new(-z.im, z.re);
const r = cmath.atan(q);
diff --git a/lib/std/math/complex/conj.zig b/lib/std/math/complex/conj.zig
index 1065d4bb73..42a34e7dfc 100644
--- a/lib/std/math/complex/conj.zig
+++ b/lib/std/math/complex/conj.zig
@@ -5,7 +5,7 @@ const cmath = math.complex;
const Complex = cmath.Complex;
/// Returns the complex conjugate of z.
-pub fn conj(z: var) Complex(@TypeOf(z.re)) {
+pub fn conj(z: anytype) Complex(@TypeOf(z.re)) {
const T = @TypeOf(z.re);
return Complex(T).new(z.re, -z.im);
}
diff --git a/lib/std/math/complex/cos.zig b/lib/std/math/complex/cos.zig
index 1aefa73db5..9daf89c730 100644
--- a/lib/std/math/complex/cos.zig
+++ b/lib/std/math/complex/cos.zig
@@ -5,7 +5,7 @@ const cmath = math.complex;
const Complex = cmath.Complex;
/// Returns the cosine of z.
-pub fn cos(z: var) Complex(@TypeOf(z.re)) {
+pub fn cos(z: anytype) Complex(@TypeOf(z.re)) {
const T = @TypeOf(z.re);
const p = Complex(T).new(-z.im, z.re);
return cmath.cosh(p);
diff --git a/lib/std/math/complex/cosh.zig b/lib/std/math/complex/cosh.zig
index a9ac893602..bd51629bd4 100644
--- a/lib/std/math/complex/cosh.zig
+++ b/lib/std/math/complex/cosh.zig
@@ -14,7 +14,7 @@ const Complex = cmath.Complex;
const ldexp_cexp = @import("ldexp.zig").ldexp_cexp;
/// Returns the hyperbolic arc-cosine of z.
-pub fn cosh(z: var) Complex(@TypeOf(z.re)) {
+pub fn cosh(z: anytype) Complex(@TypeOf(z.re)) {
const T = @TypeOf(z.re);
return switch (T) {
f32 => cosh32(z),
diff --git a/lib/std/math/complex/exp.zig b/lib/std/math/complex/exp.zig
index 9f9e3db807..6f6061a947 100644
--- a/lib/std/math/complex/exp.zig
+++ b/lib/std/math/complex/exp.zig
@@ -14,7 +14,7 @@ const Complex = cmath.Complex;
const ldexp_cexp = @import("ldexp.zig").ldexp_cexp;
/// Returns e raised to the power of z (e^z).
-pub fn exp(z: var) @TypeOf(z) {
+pub fn exp(z: anytype) @TypeOf(z) {
const T = @TypeOf(z.re);
return switch (T) {
diff --git a/lib/std/math/complex/ldexp.zig b/lib/std/math/complex/ldexp.zig
index 9eccd4bb98..c23b9b346e 100644
--- a/lib/std/math/complex/ldexp.zig
+++ b/lib/std/math/complex/ldexp.zig
@@ -11,7 +11,7 @@ const cmath = math.complex;
const Complex = cmath.Complex;
/// Returns exp(z) scaled to avoid overflow.
-pub fn ldexp_cexp(z: var, expt: i32) @TypeOf(z) {
+pub fn ldexp_cexp(z: anytype, expt: i32) @TypeOf(z) {
const T = @TypeOf(z.re);
return switch (T) {
diff --git a/lib/std/math/complex/log.zig b/lib/std/math/complex/log.zig
index f1fad3175e..ec02c6c325 100644
--- a/lib/std/math/complex/log.zig
+++ b/lib/std/math/complex/log.zig
@@ -5,7 +5,7 @@ const cmath = math.complex;
const Complex = cmath.Complex;
/// Returns the natural logarithm of z.
-pub fn log(z: var) Complex(@TypeOf(z.re)) {
+pub fn log(z: anytype) Complex(@TypeOf(z.re)) {
const T = @TypeOf(z.re);
const r = cmath.abs(z);
const phi = cmath.arg(z);
diff --git a/lib/std/math/complex/proj.zig b/lib/std/math/complex/proj.zig
index 349f6b3abb..e208ae0370 100644
--- a/lib/std/math/complex/proj.zig
+++ b/lib/std/math/complex/proj.zig
@@ -5,7 +5,7 @@ const cmath = math.complex;
const Complex = cmath.Complex;
/// Returns the projection of z onto the riemann sphere.
-pub fn proj(z: var) Complex(@TypeOf(z.re)) {
+pub fn proj(z: anytype) Complex(@TypeOf(z.re)) {
const T = @TypeOf(z.re);
if (math.isInf(z.re) or math.isInf(z.im)) {
diff --git a/lib/std/math/complex/sin.zig b/lib/std/math/complex/sin.zig
index 87dc57911b..1b10f8fca6 100644
--- a/lib/std/math/complex/sin.zig
+++ b/lib/std/math/complex/sin.zig
@@ -5,7 +5,7 @@ const cmath = math.complex;
const Complex = cmath.Complex;
/// Returns the sine of z.
-pub fn sin(z: var) Complex(@TypeOf(z.re)) {
+pub fn sin(z: anytype) Complex(@TypeOf(z.re)) {
const T = @TypeOf(z.re);
const p = Complex(T).new(-z.im, z.re);
const q = cmath.sinh(p);
diff --git a/lib/std/math/complex/sinh.zig b/lib/std/math/complex/sinh.zig
index 7dd880c71c..32f2a730fb 100644
--- a/lib/std/math/complex/sinh.zig
+++ b/lib/std/math/complex/sinh.zig
@@ -14,7 +14,7 @@ const Complex = cmath.Complex;
const ldexp_cexp = @import("ldexp.zig").ldexp_cexp;
/// Returns the hyperbolic sine of z.
-pub fn sinh(z: var) @TypeOf(z) {
+pub fn sinh(z: anytype) @TypeOf(z) {
const T = @TypeOf(z.re);
return switch (T) {
f32 => sinh32(z),
diff --git a/lib/std/math/complex/sqrt.zig b/lib/std/math/complex/sqrt.zig
index 57e73f6cd1..0edb02a7a9 100644
--- a/lib/std/math/complex/sqrt.zig
+++ b/lib/std/math/complex/sqrt.zig
@@ -12,7 +12,7 @@ const Complex = cmath.Complex;
/// Returns the square root of z. The real and imaginary parts of the result have the same sign
/// as the imaginary part of z.
-pub fn sqrt(z: var) @TypeOf(z) {
+pub fn sqrt(z: anytype) @TypeOf(z) {
const T = @TypeOf(z.re);
return switch (T) {
diff --git a/lib/std/math/complex/tan.zig b/lib/std/math/complex/tan.zig
index 70304803db..050898c573 100644
--- a/lib/std/math/complex/tan.zig
+++ b/lib/std/math/complex/tan.zig
@@ -5,7 +5,7 @@ const cmath = math.complex;
const Complex = cmath.Complex;
/// Returns the tanget of z.
-pub fn tan(z: var) Complex(@TypeOf(z.re)) {
+pub fn tan(z: anytype) Complex(@TypeOf(z.re)) {
const T = @TypeOf(z.re);
const q = Complex(T).new(-z.im, z.re);
const r = cmath.tanh(q);
diff --git a/lib/std/math/complex/tanh.zig b/lib/std/math/complex/tanh.zig
index afd2e6aee4..1d614cca58 100644
--- a/lib/std/math/complex/tanh.zig
+++ b/lib/std/math/complex/tanh.zig
@@ -12,7 +12,7 @@ const cmath = math.complex;
const Complex = cmath.Complex;
/// Returns the hyperbolic tangent of z.
-pub fn tanh(z: var) @TypeOf(z) {
+pub fn tanh(z: anytype) @TypeOf(z) {
const T = @TypeOf(z.re);
return switch (T) {
f32 => tanh32(z),
diff --git a/lib/std/math/cos.zig b/lib/std/math/cos.zig
index aa336769b1..df5c0a53be 100644
--- a/lib/std/math/cos.zig
+++ b/lib/std/math/cos.zig
@@ -13,7 +13,7 @@ const expect = std.testing.expect;
/// Special Cases:
/// - cos(+-inf) = nan
/// - cos(nan) = nan
-pub fn cos(x: var) @TypeOf(x) {
+pub fn cos(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => cos_(f32, x),
diff --git a/lib/std/math/cosh.zig b/lib/std/math/cosh.zig
index 1cd8c5f27f..bab47dcdbd 100644
--- a/lib/std/math/cosh.zig
+++ b/lib/std/math/cosh.zig
@@ -17,7 +17,7 @@ const maxInt = std.math.maxInt;
/// - cosh(+-0) = 1
/// - cosh(+-inf) = +inf
/// - cosh(nan) = nan
-pub fn cosh(x: var) @TypeOf(x) {
+pub fn cosh(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => cosh32(x),
diff --git a/lib/std/math/exp.zig b/lib/std/math/exp.zig
index da80b201c0..c84d929adf 100644
--- a/lib/std/math/exp.zig
+++ b/lib/std/math/exp.zig
@@ -14,7 +14,7 @@ const builtin = @import("builtin");
/// Special Cases:
/// - exp(+inf) = +inf
/// - exp(nan) = nan
-pub fn exp(x: var) @TypeOf(x) {
+pub fn exp(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => exp32(x),
diff --git a/lib/std/math/exp2.zig b/lib/std/math/exp2.zig
index 411f789187..da391189b2 100644
--- a/lib/std/math/exp2.zig
+++ b/lib/std/math/exp2.zig
@@ -13,7 +13,7 @@ const expect = std.testing.expect;
/// Special Cases:
/// - exp2(+inf) = +inf
/// - exp2(nan) = nan
-pub fn exp2(x: var) @TypeOf(x) {
+pub fn exp2(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => exp2_32(x),
diff --git a/lib/std/math/expm1.zig b/lib/std/math/expm1.zig
index 91752e9f80..80cdefae20 100644
--- a/lib/std/math/expm1.zig
+++ b/lib/std/math/expm1.zig
@@ -18,7 +18,7 @@ const expect = std.testing.expect;
/// - expm1(+inf) = +inf
/// - expm1(-inf) = -1
/// - expm1(nan) = nan
-pub fn expm1(x: var) @TypeOf(x) {
+pub fn expm1(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => expm1_32(x),
diff --git a/lib/std/math/expo2.zig b/lib/std/math/expo2.zig
index e70e365f26..f404570fb6 100644
--- a/lib/std/math/expo2.zig
+++ b/lib/std/math/expo2.zig
@@ -7,7 +7,7 @@
const math = @import("../math.zig");
/// Returns exp(x) / 2 for x >= log(maxFloat(T)).
-pub fn expo2(x: var) @TypeOf(x) {
+pub fn expo2(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => expo2f(x),
diff --git a/lib/std/math/fabs.zig b/lib/std/math/fabs.zig
index a659e35ca2..ca91f594fd 100644
--- a/lib/std/math/fabs.zig
+++ b/lib/std/math/fabs.zig
@@ -14,7 +14,7 @@ const maxInt = std.math.maxInt;
/// Special Cases:
/// - fabs(+-inf) = +inf
/// - fabs(nan) = nan
-pub fn fabs(x: var) @TypeOf(x) {
+pub fn fabs(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f16 => fabs16(x),
diff --git a/lib/std/math/floor.zig b/lib/std/math/floor.zig
index 1eda362e69..3a71cc7cdf 100644
--- a/lib/std/math/floor.zig
+++ b/lib/std/math/floor.zig
@@ -15,12 +15,13 @@ const math = std.math;
/// - floor(+-0) = +-0
/// - floor(+-inf) = +-inf
/// - floor(nan) = nan
-pub fn floor(x: var) @TypeOf(x) {
+pub fn floor(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f16 => floor16(x),
f32 => floor32(x),
f64 => floor64(x),
+ f128 => floor128(x),
else => @compileError("floor not implemented for " ++ @typeName(T)),
};
}
@@ -122,10 +123,38 @@ fn floor64(x: f64) f64 {
}
}
+fn floor128(x: f128) f128 {
+ const u = @bitCast(u128, x);
+ const e = (u >> 112) & 0x7FFF;
+ var y: f128 = undefined;
+
+ if (e >= 0x3FFF + 112 or x == 0) return x;
+
+ if (u >> 127 != 0) {
+ y = x - math.f128_toint + math.f128_toint - x;
+ } else {
+ y = x + math.f128_toint - math.f128_toint - x;
+ }
+
+ if (e <= 0x3FFF - 1) {
+ math.forceEval(y);
+ if (u >> 127 != 0) {
+ return -1.0;
+ } else {
+ return 0.0;
+ }
+ } else if (y > 0) {
+ return x + y - 1;
+ } else {
+ return x + y;
+ }
+}
+
test "math.floor" {
expect(floor(@as(f16, 1.3)) == floor16(1.3));
expect(floor(@as(f32, 1.3)) == floor32(1.3));
expect(floor(@as(f64, 1.3)) == floor64(1.3));
+ expect(floor(@as(f128, 1.3)) == floor128(1.3));
}
test "math.floor16" {
@@ -146,6 +175,12 @@ test "math.floor64" {
expect(floor64(0.2) == 0.0);
}
+test "math.floor128" {
+ expect(floor128(1.3) == 1.0);
+ expect(floor128(-1.3) == -2.0);
+ expect(floor128(0.2) == 0.0);
+}
+
test "math.floor16.special" {
expect(floor16(0.0) == 0.0);
expect(floor16(-0.0) == -0.0);
@@ -169,3 +204,11 @@ test "math.floor64.special" {
expect(math.isNegativeInf(floor64(-math.inf(f64))));
expect(math.isNan(floor64(math.nan(f64))));
}
+
+test "math.floor128.special" {
+ expect(floor128(0.0) == 0.0);
+ expect(floor128(-0.0) == -0.0);
+ expect(math.isPositiveInf(floor128(math.inf(f128))));
+ expect(math.isNegativeInf(floor128(-math.inf(f128))));
+ expect(math.isNan(floor128(math.nan(f128))));
+}
diff --git a/lib/std/math/frexp.zig b/lib/std/math/frexp.zig
index cfdf9f838d..0e4558dc37 100644
--- a/lib/std/math/frexp.zig
+++ b/lib/std/math/frexp.zig
@@ -24,7 +24,7 @@ pub const frexp64_result = frexp_result(f64);
/// - frexp(+-0) = +-0, 0
/// - frexp(+-inf) = +-inf, 0
/// - frexp(nan) = nan, undefined
-pub fn frexp(x: var) frexp_result(@TypeOf(x)) {
+pub fn frexp(x: anytype) frexp_result(@TypeOf(x)) {
const T = @TypeOf(x);
return switch (T) {
f32 => frexp32(x),
diff --git a/lib/std/math/ilogb.zig b/lib/std/math/ilogb.zig
index 22e3fbaa97..748cf9ea0d 100644
--- a/lib/std/math/ilogb.zig
+++ b/lib/std/math/ilogb.zig
@@ -16,7 +16,7 @@ const minInt = std.math.minInt;
/// - ilogb(+-inf) = maxInt(i32)
/// - ilogb(0) = maxInt(i32)
/// - ilogb(nan) = maxInt(i32)
-pub fn ilogb(x: var) i32 {
+pub fn ilogb(x: anytype) i32 {
const T = @TypeOf(x);
return switch (T) {
f32 => ilogb32(x),
diff --git a/lib/std/math/isfinite.zig b/lib/std/math/isfinite.zig
index 26b3ce54a1..0681eae0b7 100644
--- a/lib/std/math/isfinite.zig
+++ b/lib/std/math/isfinite.zig
@@ -4,7 +4,7 @@ const expect = std.testing.expect;
const maxInt = std.math.maxInt;
/// Returns whether x is a finite value.
-pub fn isFinite(x: var) bool {
+pub fn isFinite(x: anytype) bool {
const T = @TypeOf(x);
switch (T) {
f16 => {
diff --git a/lib/std/math/isinf.zig b/lib/std/math/isinf.zig
index 6eacab52ad..19357d89d1 100644
--- a/lib/std/math/isinf.zig
+++ b/lib/std/math/isinf.zig
@@ -4,7 +4,7 @@ const expect = std.testing.expect;
const maxInt = std.math.maxInt;
/// Returns whether x is an infinity, ignoring sign.
-pub fn isInf(x: var) bool {
+pub fn isInf(x: anytype) bool {
const T = @TypeOf(x);
switch (T) {
f16 => {
@@ -30,7 +30,7 @@ pub fn isInf(x: var) bool {
}
/// Returns whether x is an infinity with a positive sign.
-pub fn isPositiveInf(x: var) bool {
+pub fn isPositiveInf(x: anytype) bool {
const T = @TypeOf(x);
switch (T) {
f16 => {
@@ -52,7 +52,7 @@ pub fn isPositiveInf(x: var) bool {
}
/// Returns whether x is an infinity with a negative sign.
-pub fn isNegativeInf(x: var) bool {
+pub fn isNegativeInf(x: anytype) bool {
const T = @TypeOf(x);
switch (T) {
f16 => {
diff --git a/lib/std/math/isnan.zig b/lib/std/math/isnan.zig
index ac865f0d0c..797c115d1d 100644
--- a/lib/std/math/isnan.zig
+++ b/lib/std/math/isnan.zig
@@ -4,12 +4,12 @@ const expect = std.testing.expect;
const maxInt = std.math.maxInt;
/// Returns whether x is a nan.
-pub fn isNan(x: var) bool {
+pub fn isNan(x: anytype) bool {
return x != x;
}
/// Returns whether x is a signalling nan.
-pub fn isSignalNan(x: var) bool {
+pub fn isSignalNan(x: anytype) bool {
// Note: A signalling nan is identical to a standard nan right now but may have a different bit
// representation in the future when required.
return isNan(x);
diff --git a/lib/std/math/isnormal.zig b/lib/std/math/isnormal.zig
index 917b4ebfcf..a3144f2784 100644
--- a/lib/std/math/isnormal.zig
+++ b/lib/std/math/isnormal.zig
@@ -4,7 +4,7 @@ const expect = std.testing.expect;
const maxInt = std.math.maxInt;
// Returns whether x has a normalized representation (i.e. integer part of mantissa is 1).
-pub fn isNormal(x: var) bool {
+pub fn isNormal(x: anytype) bool {
const T = @TypeOf(x);
switch (T) {
f16 => {
diff --git a/lib/std/math/ln.zig b/lib/std/math/ln.zig
index 555a786907..99e54c4cc7 100644
--- a/lib/std/math/ln.zig
+++ b/lib/std/math/ln.zig
@@ -15,7 +15,7 @@ const expect = std.testing.expect;
/// - ln(0) = -inf
/// - ln(x) = nan if x < 0
/// - ln(nan) = nan
-pub fn ln(x: var) @TypeOf(x) {
+pub fn ln(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
switch (@typeInfo(T)) {
.ComptimeFloat => {
diff --git a/lib/std/math/log10.zig b/lib/std/math/log10.zig
index 7367af28c6..e55bd8c1e8 100644
--- a/lib/std/math/log10.zig
+++ b/lib/std/math/log10.zig
@@ -16,7 +16,7 @@ const maxInt = std.math.maxInt;
/// - log10(0) = -inf
/// - log10(x) = nan if x < 0
/// - log10(nan) = nan
-pub fn log10(x: var) @TypeOf(x) {
+pub fn log10(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
switch (@typeInfo(T)) {
.ComptimeFloat => {
diff --git a/lib/std/math/log1p.zig b/lib/std/math/log1p.zig
index 5e92cfdea3..e24ba8d84d 100644
--- a/lib/std/math/log1p.zig
+++ b/lib/std/math/log1p.zig
@@ -17,7 +17,7 @@ const expect = std.testing.expect;
/// - log1p(-1) = -inf
/// - log1p(x) = nan if x < -1
/// - log1p(nan) = nan
-pub fn log1p(x: var) @TypeOf(x) {
+pub fn log1p(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => log1p_32(x),
diff --git a/lib/std/math/log2.zig b/lib/std/math/log2.zig
index 54f8bc2baa..95d06a2b60 100644
--- a/lib/std/math/log2.zig
+++ b/lib/std/math/log2.zig
@@ -16,7 +16,7 @@ const maxInt = std.math.maxInt;
/// - log2(0) = -inf
/// - log2(x) = nan if x < 0
/// - log2(nan) = nan
-pub fn log2(x: var) @TypeOf(x) {
+pub fn log2(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
switch (@typeInfo(T)) {
.ComptimeFloat => {
diff --git a/lib/std/math/modf.zig b/lib/std/math/modf.zig
index 6fd89e3dda..5ab5318a79 100644
--- a/lib/std/math/modf.zig
+++ b/lib/std/math/modf.zig
@@ -24,7 +24,7 @@ pub const modf64_result = modf_result(f64);
/// Special Cases:
/// - modf(+-inf) = +-inf, nan
/// - modf(nan) = nan, nan
-pub fn modf(x: var) modf_result(@TypeOf(x)) {
+pub fn modf(x: anytype) modf_result(@TypeOf(x)) {
const T = @TypeOf(x);
return switch (T) {
f32 => modf32(x),
diff --git a/lib/std/math/round.zig b/lib/std/math/round.zig
index dceb3ed770..854adee4ba 100644
--- a/lib/std/math/round.zig
+++ b/lib/std/math/round.zig
@@ -15,11 +15,12 @@ const math = std.math;
/// - round(+-0) = +-0
/// - round(+-inf) = +-inf
/// - round(nan) = nan
-pub fn round(x: var) @TypeOf(x) {
+pub fn round(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => round32(x),
f64 => round64(x),
+ f128 => round128(x),
else => @compileError("round not implemented for " ++ @typeName(T)),
};
}
@@ -90,9 +91,43 @@ fn round64(x_: f64) f64 {
}
}
+fn round128(x_: f128) f128 {
+ var x = x_;
+ const u = @bitCast(u128, x);
+ const e = (u >> 112) & 0x7FFF;
+ var y: f128 = undefined;
+
+ if (e >= 0x3FFF + 112) {
+ return x;
+ }
+ if (u >> 127 != 0) {
+ x = -x;
+ }
+ if (e < 0x3FFF - 1) {
+ math.forceEval(x + math.f64_toint);
+ return 0 * @bitCast(f128, u);
+ }
+
+ y = x + math.f128_toint - math.f128_toint - x;
+ if (y > 0.5) {
+ y = y + x - 1;
+ } else if (y <= -0.5) {
+ y = y + x + 1;
+ } else {
+ y = y + x;
+ }
+
+ if (u >> 127 != 0) {
+ return -y;
+ } else {
+ return y;
+ }
+}
+
test "math.round" {
expect(round(@as(f32, 1.3)) == round32(1.3));
expect(round(@as(f64, 1.3)) == round64(1.3));
+ expect(round(@as(f128, 1.3)) == round128(1.3));
}
test "math.round32" {
@@ -109,6 +144,13 @@ test "math.round64" {
expect(round64(1.8) == 2.0);
}
+test "math.round128" {
+ expect(round128(1.3) == 1.0);
+ expect(round128(-1.3) == -1.0);
+ expect(round128(0.2) == 0.0);
+ expect(round128(1.8) == 2.0);
+}
+
test "math.round32.special" {
expect(round32(0.0) == 0.0);
expect(round32(-0.0) == -0.0);
@@ -124,3 +166,11 @@ test "math.round64.special" {
expect(math.isNegativeInf(round64(-math.inf(f64))));
expect(math.isNan(round64(math.nan(f64))));
}
+
+test "math.round128.special" {
+ expect(round128(0.0) == 0.0);
+ expect(round128(-0.0) == -0.0);
+ expect(math.isPositiveInf(round128(math.inf(f128))));
+ expect(math.isNegativeInf(round128(-math.inf(f128))));
+ expect(math.isNan(round128(math.nan(f128))));
+}
diff --git a/lib/std/math/scalbn.zig b/lib/std/math/scalbn.zig
index bab109f334..71a8110ce7 100644
--- a/lib/std/math/scalbn.zig
+++ b/lib/std/math/scalbn.zig
@@ -9,7 +9,7 @@ const math = std.math;
const expect = std.testing.expect;
/// Returns x * 2^n.
-pub fn scalbn(x: var, n: i32) @TypeOf(x) {
+pub fn scalbn(x: anytype, n: i32) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => scalbn32(x, n),
diff --git a/lib/std/math/signbit.zig b/lib/std/math/signbit.zig
index 9cb62b5042..49397f7bd4 100644
--- a/lib/std/math/signbit.zig
+++ b/lib/std/math/signbit.zig
@@ -3,7 +3,7 @@ const math = std.math;
const expect = std.testing.expect;
/// Returns whether x is negative or negative 0.
-pub fn signbit(x: var) bool {
+pub fn signbit(x: anytype) bool {
const T = @TypeOf(x);
return switch (T) {
f16 => signbit16(x),
diff --git a/lib/std/math/sin.zig b/lib/std/math/sin.zig
index e88f5eeeaf..df3b294ca6 100644
--- a/lib/std/math/sin.zig
+++ b/lib/std/math/sin.zig
@@ -14,7 +14,7 @@ const expect = std.testing.expect;
/// - sin(+-0) = +-0
/// - sin(+-inf) = nan
/// - sin(nan) = nan
-pub fn sin(x: var) @TypeOf(x) {
+pub fn sin(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => sin_(T, x),
diff --git a/lib/std/math/sinh.zig b/lib/std/math/sinh.zig
index 0e2cb5a3d5..26e0e05f38 100644
--- a/lib/std/math/sinh.zig
+++ b/lib/std/math/sinh.zig
@@ -17,7 +17,7 @@ const maxInt = std.math.maxInt;
/// - sinh(+-0) = +-0
/// - sinh(+-inf) = +-inf
/// - sinh(nan) = nan
-pub fn sinh(x: var) @TypeOf(x) {
+pub fn sinh(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => sinh32(x),
diff --git a/lib/std/math/sqrt.zig b/lib/std/math/sqrt.zig
index 097b0152f7..2f0d251432 100644
--- a/lib/std/math/sqrt.zig
+++ b/lib/std/math/sqrt.zig
@@ -13,7 +13,7 @@ const maxInt = std.math.maxInt;
/// - sqrt(x) = nan if x < 0
/// - sqrt(nan) = nan
/// TODO Decide if all this logic should be implemented directly in the @sqrt bultin function.
-pub fn sqrt(x: var) Sqrt(@TypeOf(x)) {
+pub fn sqrt(x: anytype) Sqrt(@TypeOf(x)) {
const T = @TypeOf(x);
switch (@typeInfo(T)) {
.Float, .ComptimeFloat => return @sqrt(x),
diff --git a/lib/std/math/tan.zig b/lib/std/math/tan.zig
index 86f473f448..2cd5a407df 100644
--- a/lib/std/math/tan.zig
+++ b/lib/std/math/tan.zig
@@ -14,7 +14,7 @@ const expect = std.testing.expect;
/// - tan(+-0) = +-0
/// - tan(+-inf) = nan
/// - tan(nan) = nan
-pub fn tan(x: var) @TypeOf(x) {
+pub fn tan(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => tan_(f32, x),
diff --git a/lib/std/math/tanh.zig b/lib/std/math/tanh.zig
index 1cad399729..7697db5271 100644
--- a/lib/std/math/tanh.zig
+++ b/lib/std/math/tanh.zig
@@ -17,7 +17,7 @@ const maxInt = std.math.maxInt;
/// - sinh(+-0) = +-0
/// - sinh(+-inf) = +-1
/// - sinh(nan) = nan
-pub fn tanh(x: var) @TypeOf(x) {
+pub fn tanh(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => tanh32(x),
diff --git a/lib/std/math/trunc.zig b/lib/std/math/trunc.zig
index b70f0c6be3..df24b77111 100644
--- a/lib/std/math/trunc.zig
+++ b/lib/std/math/trunc.zig
@@ -15,11 +15,12 @@ const maxInt = std.math.maxInt;
/// - trunc(+-0) = +-0
/// - trunc(+-inf) = +-inf
/// - trunc(nan) = nan
-pub fn trunc(x: var) @TypeOf(x) {
+pub fn trunc(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => trunc32(x),
f64 => trunc64(x),
+ f128 => trunc128(x),
else => @compileError("trunc not implemented for " ++ @typeName(T)),
};
}
@@ -66,9 +67,31 @@ fn trunc64(x: f64) f64 {
}
}
+fn trunc128(x: f128) f128 {
+ const u = @bitCast(u128, x);
+ var e = @intCast(i32, ((u >> 112) & 0x7FFF)) - 0x3FFF + 16;
+ var m: u128 = undefined;
+
+ if (e >= 112 + 16) {
+ return x;
+ }
+ if (e < 16) {
+ e = 1;
+ }
+
+ m = @as(u128, maxInt(u128)) >> @intCast(u7, e);
+ if (u & m == 0) {
+ return x;
+ } else {
+ math.forceEval(x + 0x1p120);
+ return @bitCast(f128, u & ~m);
+ }
+}
+
test "math.trunc" {
expect(trunc(@as(f32, 1.3)) == trunc32(1.3));
expect(trunc(@as(f64, 1.3)) == trunc64(1.3));
+ expect(trunc(@as(f128, 1.3)) == trunc128(1.3));
}
test "math.trunc32" {
@@ -83,6 +106,12 @@ test "math.trunc64" {
expect(trunc64(0.2) == 0.0);
}
+test "math.trunc128" {
+ expect(trunc128(1.3) == 1.0);
+ expect(trunc128(-1.3) == -1.0);
+ expect(trunc128(0.2) == 0.0);
+}
+
test "math.trunc32.special" {
expect(trunc32(0.0) == 0.0); // 0x3F800000
expect(trunc32(-0.0) == -0.0);
@@ -98,3 +127,11 @@ test "math.trunc64.special" {
expect(math.isNegativeInf(trunc64(-math.inf(f64))));
expect(math.isNan(trunc64(math.nan(f64))));
}
+
+test "math.trunc128.special" {
+ expect(trunc128(0.0) == 0.0);
+ expect(trunc128(-0.0) == -0.0);
+ expect(math.isPositiveInf(trunc128(math.inf(f128))));
+ expect(math.isNegativeInf(trunc128(-math.inf(f128))));
+ expect(math.isNan(trunc128(math.nan(f128))));
+}
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index b942fd3bf4..ac7bde47c1 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -8,6 +8,7 @@ const meta = std.meta;
const trait = meta.trait;
const testing = std.testing;
+// https://github.com/ziglang/zig/issues/2564
pub const page_size = switch (builtin.arch) {
.wasm32, .wasm64 => 64 * 1024,
else => 4 * 1024,
@@ -16,6 +17,52 @@ pub const page_size = switch (builtin.arch) {
pub const Allocator = struct {
pub const Error = error{OutOfMemory};
+ /// Attempt to allocate at least `len` bytes aligned to `ptr_align`.
+ ///
+ /// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
+ /// otherwise, the length must be aligned to `len_align`.
+ ///
+ /// `len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
+ allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Error![]u8,
+
+ /// Attempt to expand or shrink memory in place. `buf.len` must equal the most recent
+ /// length returned by `allocFn` or `resizeFn`.
+ ///
+ /// Passing a `new_len` of 0 frees and invalidates the buffer such that it can no
+ /// longer be passed to `resizeFn`.
+ ///
+ /// error.OutOfMemory can only be returned if `new_len` is greater than `buf.len`.
+ /// If `buf` cannot be expanded to accomodate `new_len`, then the allocation MUST be
+ /// unmodified and error.OutOfMemory MUST be returned.
+ ///
+ /// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
+ /// otherwise, the length must be aligned to `len_align`.
+ ///
+ /// `new_len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
+ resizeFn: fn (self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Error!usize,
+
+ pub fn callAllocFn(self: *Allocator, new_len: usize, alignment: u29, len_align: u29) Error![]u8 {
+ return self.allocFn(self, new_len, alignment, len_align);
+ }
+
+ pub fn callResizeFn(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Error!usize {
+ return self.resizeFn(self, buf, new_len, len_align);
+ }
+
+ /// Set to resizeFn if in-place resize is not supported.
+ pub fn noResize(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Error!usize {
+ if (new_len > buf.len)
+ return error.OutOfMemory;
+ return new_len;
+ }
+
+ /// Call `resizeFn`, but caller guarantees that `new_len` <= `buf.len` meaning
+ /// error.OutOfMemory should be impossible.
+ pub fn shrinkBytes(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) usize {
+ assert(new_len <= buf.len);
+ return self.callResizeFn(buf, new_len, len_align) catch unreachable;
+ }
+
/// Realloc is used to modify the size or alignment of an existing allocation,
/// as well as to provide the allocator with an opportunity to move an allocation
/// to a better location.
@@ -24,7 +71,7 @@ pub const Allocator = struct {
/// When the size/alignment is less than or equal to the previous allocation,
/// this function returns `error.OutOfMemory` when the allocator decides the client
/// would be better off keeping the extra alignment/size. Clients will call
- /// `shrinkFn` when they require the allocator to track a new alignment/size,
+ /// `callResizeFn` when they require the allocator to track a new alignment/size,
/// and so this function should only return success when the allocator considers
/// the reallocation desirable from the allocator's perspective.
/// As an example, `std.ArrayList` tracks a "capacity", and therefore can handle
@@ -37,16 +84,15 @@ pub const Allocator = struct {
/// as `old_mem` was when `reallocFn` is called. The bytes of
/// `return_value[old_mem.len..]` have undefined values.
/// The returned slice must have its pointer aligned at least to `new_alignment` bytes.
- reallocFn: fn (
+ fn reallocBytes(
self: *Allocator,
/// Guaranteed to be the same as what was returned from most recent call to
- /// `reallocFn` or `shrinkFn`.
+ /// `allocFn` or `resizeFn`.
/// If `old_mem.len == 0` then this is a new allocation and `new_byte_count`
/// is guaranteed to be >= 1.
old_mem: []u8,
/// If `old_mem.len == 0` then this is `undefined`, otherwise:
- /// Guaranteed to be the same as what was returned from most recent call to
- /// `reallocFn` or `shrinkFn`.
+ /// Guaranteed to be the same as what was passed to `allocFn`.
/// Guaranteed to be >= 1.
/// Guaranteed to be a power of 2.
old_alignment: u29,
@@ -57,23 +103,49 @@ pub const Allocator = struct {
/// Guaranteed to be a power of 2.
/// Returned slice's pointer must have this alignment.
new_alignment: u29,
- ) Error![]u8,
+ /// 0 indicates the length of the slice returned MUST match `new_byte_count` exactly
+ /// non-zero means the length of the returned slice must be aligned by `len_align`
+ /// `new_len` must be aligned by `len_align`
+ len_align: u29,
+ ) Error![]u8 {
+ if (old_mem.len == 0) {
+ const new_mem = try self.callAllocFn(new_byte_count, new_alignment, len_align);
+ @memset(new_mem.ptr, undefined, new_byte_count);
+ return new_mem;
+ }
- /// This function deallocates memory. It must succeed.
- shrinkFn: fn (
- self: *Allocator,
- /// Guaranteed to be the same as what was returned from most recent call to
- /// `reallocFn` or `shrinkFn`.
- old_mem: []u8,
- /// Guaranteed to be the same as what was returned from most recent call to
- /// `reallocFn` or `shrinkFn`.
- old_alignment: u29,
- /// Guaranteed to be less than or equal to `old_mem.len`.
- new_byte_count: usize,
- /// If `new_byte_count == 0` then this is `undefined`, otherwise:
- /// Guaranteed to be less than or equal to `old_alignment`.
- new_alignment: u29,
- ) []u8,
+ if (isAligned(@ptrToInt(old_mem.ptr), new_alignment)) {
+ if (new_byte_count <= old_mem.len) {
+ const shrunk_len = self.shrinkBytes(old_mem, new_byte_count, len_align);
+ return old_mem.ptr[0..shrunk_len];
+ }
+ if (self.callResizeFn(old_mem, new_byte_count, len_align)) |resized_len| {
+ assert(resized_len >= new_byte_count);
+ @memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count);
+ return old_mem.ptr[0..resized_len];
+ } else |_| {}
+ }
+ if (new_byte_count <= old_mem.len and new_alignment <= old_alignment) {
+ return error.OutOfMemory;
+ }
+ return self.moveBytes(old_mem, new_byte_count, new_alignment, len_align);
+ }
+
+ /// Move the given memory to a new location in the given allocator to accomodate a new
+ /// size and alignment.
+ fn moveBytes(self: *Allocator, old_mem: []u8, new_len: usize, new_alignment: u29, len_align: u29) Error![]u8 {
+ assert(old_mem.len > 0);
+ assert(new_len > 0);
+ const new_mem = try self.callAllocFn(new_len, new_alignment, len_align);
+ @memcpy(new_mem.ptr, old_mem.ptr, std.math.min(new_len, old_mem.len));
+ // DISABLED TO AVOID BUGS IN TRANSLATE C
+ // use './zig build test-translate-c' to reproduce, some of the symbols in the
+ // generated C code will be a sequence of 0xaa (the undefined value), meaning
+ // it is printing data that has been freed
+ //@memset(old_mem.ptr, undefined, old_mem.len);
+ _ = self.shrinkBytes(old_mem, 0, 0);
+ return new_mem;
+ }
/// Returns a pointer to undefined memory.
/// Call `destroy` with the result to free the memory.
@@ -85,12 +157,11 @@ pub const Allocator = struct {
/// `ptr` should be the return value of `create`, or otherwise
/// have the same address and alignment property.
- pub fn destroy(self: *Allocator, ptr: var) void {
+ pub fn destroy(self: *Allocator, ptr: anytype) void {
const T = @TypeOf(ptr).Child;
if (@sizeOf(T) == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
- const shrink_result = self.shrinkFn(self, non_const_ptr[0..@sizeOf(T)], @alignOf(T), 0, 1);
- assert(shrink_result.len == 0);
+ _ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], 0, 0);
}
/// Allocates an array of `n` items of type `T` and sets all the
@@ -144,6 +215,7 @@ pub const Allocator = struct {
return self.allocWithOptions(Elem, n, null, sentinel);
}
+ /// Deprecated: use `allocAdvanced`
pub fn alignedAlloc(
self: *Allocator,
comptime T: type,
@@ -151,8 +223,20 @@ pub const Allocator = struct {
comptime alignment: ?u29,
n: usize,
) Error![]align(alignment orelse @alignOf(T)) T {
+ return self.allocAdvanced(T, alignment, n, .exact);
+ }
+
+ const Exact = enum { exact, at_least };
+ pub fn allocAdvanced(
+ self: *Allocator,
+ comptime T: type,
+ /// null means naturally aligned
+ comptime alignment: ?u29,
+ n: usize,
+ exact: Exact,
+ ) Error![]align(alignment orelse @alignOf(T)) T {
const a = if (alignment) |a| blk: {
- if (a == @alignOf(T)) return alignedAlloc(self, T, null, n);
+ if (a == @alignOf(T)) return allocAdvanced(self, T, null, n, exact);
break :blk a;
} else @alignOf(T);
@@ -161,15 +245,19 @@ pub const Allocator = struct {
}
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
- const byte_slice = try self.reallocFn(self, &[0]u8{}, undefined, byte_count, a);
- assert(byte_slice.len == byte_count);
+ // TODO The `if (alignment == null)` blocks are workarounds for zig not being able to
+ // access certain type information about T without creating a circular dependency in async
+ // functions that heap-allocate their own frame with @Frame(func).
+ const sizeOfT = if (alignment == null) @intCast(u29, @divExact(byte_count, n)) else @sizeOf(T);
+ const byte_slice = try self.callAllocFn(byte_count, a, if (exact == .exact) @as(u29, 0) else sizeOfT);
+ switch (exact) {
+ .exact => assert(byte_slice.len == byte_count),
+ .at_least => assert(byte_slice.len >= byte_count),
+ }
@memset(byte_slice.ptr, undefined, byte_slice.len);
if (alignment == null) {
- // TODO This is a workaround for zig not being able to successfully do
- // @bytesToSlice(T, @alignCast(a, byte_slice)) without resolving alignment of T,
- // which causes a circular dependency in async functions which try to heap-allocate
- // their own frame with @Frame(func).
- return @intToPtr([*]T, @ptrToInt(byte_slice.ptr))[0..n];
+ // This if block is a workaround (see comment above)
+ return @intToPtr([*]T, @ptrToInt(byte_slice.ptr))[0..@divExact(byte_slice.len, @sizeOf(T))];
} else {
return mem.bytesAsSlice(T, @alignCast(a, byte_slice));
}
@@ -185,27 +273,46 @@ pub const Allocator = struct {
/// in `std.ArrayList.shrink`.
/// If you need guaranteed success, call `shrink`.
/// If `new_n` is 0, this is the same as `free` and it always succeeds.
- pub fn realloc(self: *Allocator, old_mem: var, new_n: usize) t: {
+ pub fn realloc(self: *Allocator, old_mem: anytype, new_n: usize) t: {
+ const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
+ break :t Error![]align(Slice.alignment) Slice.child;
+ } {
+ const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
+ return self.reallocAdvanced(old_mem, old_alignment, new_n, .exact);
+ }
+
+ pub fn reallocAtLeast(self: *Allocator, old_mem: anytype, new_n: usize) t: {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
break :t Error![]align(Slice.alignment) Slice.child;
} {
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
- return self.alignedRealloc(old_mem, old_alignment, new_n);
+ return self.reallocAdvanced(old_mem, old_alignment, new_n, .at_least);
+ }
+
+ // Deprecated: use `reallocAdvanced`
+ pub fn alignedRealloc(
+ self: *Allocator,
+ old_mem: anytype,
+ comptime new_alignment: u29,
+ new_n: usize,
+ ) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
+ return self.reallocAdvanced(old_mem, new_alignment, new_n, .exact);
}
/// This is the same as `realloc`, except caller may additionally request
/// a new alignment, which can be larger, smaller, or the same as the old
/// allocation.
- pub fn alignedRealloc(
+ pub fn reallocAdvanced(
self: *Allocator,
- old_mem: var,
+ old_mem: anytype,
comptime new_alignment: u29,
new_n: usize,
+ exact: Exact,
) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
const T = Slice.child;
if (old_mem.len == 0) {
- return self.alignedAlloc(T, new_alignment, new_n);
+ return self.allocAdvanced(T, new_alignment, new_n, exact);
}
if (new_n == 0) {
self.free(old_mem);
@@ -215,12 +322,8 @@ pub const Allocator = struct {
const old_byte_slice = mem.sliceAsBytes(old_mem);
const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
// Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure
- const byte_slice = try self.reallocFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment);
- assert(byte_slice.len == byte_count);
- if (new_n > old_mem.len) {
- @memset(byte_slice.ptr + old_byte_slice.len, undefined, byte_slice.len - old_byte_slice.len);
- }
- return mem.bytesAsSlice(T, @alignCast(new_alignment, byte_slice));
+ const new_byte_slice = try self.reallocBytes(old_byte_slice, Slice.alignment, byte_count, new_alignment, if (exact == .exact) @as(u29, 0) else @sizeOf(T));
+ return mem.bytesAsSlice(T, @alignCast(new_alignment, new_byte_slice));
}
/// Prefer calling realloc to shrink if you can tolerate failure, such as
@@ -228,7 +331,7 @@ pub const Allocator = struct {
/// Shrink always succeeds, and `new_n` must be <= `old_mem.len`.
/// Returned slice has same alignment as old_mem.
/// Shrinking to 0 is the same as calling `free`.
- pub fn shrink(self: *Allocator, old_mem: var, new_n: usize) t: {
+ pub fn shrink(self: *Allocator, old_mem: anytype, new_n: usize) t: {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
break :t []align(Slice.alignment) Slice.child;
} {
@@ -241,19 +344,16 @@ pub const Allocator = struct {
/// allocation.
pub fn alignedShrink(
self: *Allocator,
- old_mem: var,
+ old_mem: anytype,
comptime new_alignment: u29,
new_n: usize,
) []align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
const T = Slice.child;
- if (new_n == 0) {
- self.free(old_mem);
- return old_mem[0..0];
- }
-
- assert(new_n <= old_mem.len);
+ if (new_n == old_mem.len)
+ return old_mem;
+ assert(new_n < old_mem.len);
assert(new_alignment <= Slice.alignment);
// Here we skip the overflow checking on the multiplication because
@@ -262,22 +362,20 @@ pub const Allocator = struct {
const old_byte_slice = mem.sliceAsBytes(old_mem);
@memset(old_byte_slice.ptr + byte_count, undefined, old_byte_slice.len - byte_count);
- const byte_slice = self.shrinkFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment);
- assert(byte_slice.len == byte_count);
- return mem.bytesAsSlice(T, @alignCast(new_alignment, byte_slice));
+ _ = self.shrinkBytes(old_byte_slice, byte_count, 0);
+ return old_mem[0..new_n];
}
/// Free an array allocated with `alloc`. To free a single item,
/// see `destroy`.
- pub fn free(self: *Allocator, memory: var) void {
+ pub fn free(self: *Allocator, memory: anytype) void {
const Slice = @typeInfo(@TypeOf(memory)).Pointer;
const bytes = mem.sliceAsBytes(memory);
const bytes_len = bytes.len + if (Slice.sentinel != null) @sizeOf(Slice.child) else 0;
if (bytes_len == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
@memset(non_const_ptr, undefined, bytes_len);
- const shrink_result = self.shrinkFn(self, non_const_ptr[0..bytes_len], Slice.alignment, 0, 1);
- assert(shrink_result.len == 0);
+ _ = self.shrinkBytes(non_const_ptr[0..bytes_len], 0, 0);
}
/// Copies `m` to newly allocated memory. Caller owns the memory.
@@ -296,16 +394,96 @@ pub const Allocator = struct {
}
};
+/// Detects and asserts if the std.mem.Allocator interface is violated by the caller
+/// or the allocator.
+pub fn ValidationAllocator(comptime T: type) type {
+ return struct {
+ const Self = @This();
+ allocator: Allocator,
+ underlying_allocator: T,
+ pub fn init(allocator: T) @This() {
+ return .{
+ .allocator = .{
+ .allocFn = alloc,
+ .resizeFn = resize,
+ },
+ .underlying_allocator = allocator,
+ };
+ }
+ fn getUnderlyingAllocatorPtr(self: *@This()) *Allocator {
+ if (T == *Allocator) return self.underlying_allocator;
+ if (*T == *Allocator) return &self.underlying_allocator;
+ return &self.underlying_allocator.allocator;
+ }
+ pub fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) Allocator.Error![]u8 {
+ assert(n > 0);
+ assert(mem.isValidAlign(ptr_align));
+ if (len_align != 0) {
+ assert(mem.isAlignedAnyAlign(n, len_align));
+ assert(n >= len_align);
+ }
+
+ const self = @fieldParentPtr(@This(), "allocator", allocator);
+ const result = try self.getUnderlyingAllocatorPtr().callAllocFn(n, ptr_align, len_align);
+ assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align));
+ if (len_align == 0) {
+ assert(result.len == n);
+ } else {
+ assert(result.len >= n);
+ assert(mem.isAlignedAnyAlign(result.len, len_align));
+ }
+ return result;
+ }
+ pub fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) Allocator.Error!usize {
+ assert(buf.len > 0);
+ if (len_align != 0) {
+ assert(mem.isAlignedAnyAlign(new_len, len_align));
+ assert(new_len >= len_align);
+ }
+ const self = @fieldParentPtr(@This(), "allocator", allocator);
+ const result = try self.getUnderlyingAllocatorPtr().callResizeFn(buf, new_len, len_align);
+ if (len_align == 0) {
+ assert(result == new_len);
+ } else {
+ assert(result >= new_len);
+ assert(mem.isAlignedAnyAlign(result, len_align));
+ }
+ return result;
+ }
+ pub usingnamespace if (T == *Allocator or !@hasDecl(T, "reset")) struct {} else struct {
+ pub fn reset(self: *Self) void {
+ self.underlying_allocator.reset();
+ }
+ };
+ };
+}
+
+pub fn validationWrap(allocator: anytype) ValidationAllocator(@TypeOf(allocator)) {
+ return ValidationAllocator(@TypeOf(allocator)).init(allocator);
+}
+
+/// An allocator helper function. Adjusts an allocation length satisfy `len_align`.
+/// `full_len` should be the full capacity of the allocation which may be greater
+/// than the `len` that was requsted. This function should only be used by allocators
+/// that are unaffected by `len_align`.
+pub fn alignAllocLen(full_len: usize, alloc_len: usize, len_align: u29) usize {
+ assert(alloc_len > 0);
+ assert(alloc_len >= len_align);
+ assert(full_len >= alloc_len);
+ if (len_align == 0)
+ return alloc_len;
+ const adjusted = alignBackwardAnyAlign(full_len, len_align);
+ assert(adjusted >= alloc_len);
+ return adjusted;
+}
+
var failAllocator = Allocator{
- .reallocFn = failAllocatorRealloc,
- .shrinkFn = failAllocatorShrink,
+ .allocFn = failAllocatorAlloc,
+ .resizeFn = Allocator.noResize,
};
-fn failAllocatorRealloc(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
+fn failAllocatorAlloc(self: *Allocator, n: usize, alignment: u29, len_align: u29) Allocator.Error![]u8 {
return error.OutOfMemory;
}
-fn failAllocatorShrink(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
- @panic("failAllocatorShrink should never be called because it cannot allocate");
-}
test "mem.Allocator basics" {
testing.expectError(error.OutOfMemory, failAllocator.alloc(u8, 1));
@@ -341,6 +519,7 @@ pub fn copyBackwards(comptime T: type, dest: []T, source: []const T) void {
}
}
+/// Sets all elements of `dest` to `value`.
pub fn set(comptime T: type, dest: []T, value: T) void {
for (dest) |*d|
d.* = value;
@@ -373,7 +552,7 @@ pub fn zeroes(comptime T: type) T {
if (@sizeOf(T) == 0) return T{};
if (comptime meta.containerLayout(T) == .Extern) {
var item: T = undefined;
- @memset(@ptrCast([*]u8, &item), 0, @sizeOf(T));
+ set(u8, asBytes(&item), 0);
return item;
} else {
var structure: T = undefined;
@@ -498,6 +677,8 @@ test "mem.zeroes" {
}
}
+/// Sets a slice to zeroes.
+/// Prevents the store from being optimized out.
pub fn secureZero(comptime T: type, s: []T) void {
// NOTE: We do not use a volatile slice cast here since LLVM cannot
// see that it can be replaced by a memset.
@@ -519,7 +700,7 @@ test "mem.secureZero" {
/// Initializes all fields of the struct with their default value, or zero values if no default value is present.
/// If the field is present in the provided initial values, it will have that value instead.
/// Structs are initialized recursively.
-pub fn zeroInit(comptime T: type, init: var) T {
+pub fn zeroInit(comptime T: type, init: anytype) T {
comptime const Init = @TypeOf(init);
switch (@typeInfo(T)) {
@@ -528,6 +709,13 @@ pub fn zeroInit(comptime T: type, init: var) T {
.Struct => |init_info| {
var value = std.mem.zeroes(T);
+ if (init_info.is_tuple) {
+ inline for (init_info.fields) |field, i| {
+ @field(value, struct_info.fields[i].name) = @field(init, field.name);
+ }
+ return value;
+ }
+
inline for (init_info.fields) |field| {
if (!@hasField(T, field.name)) {
@compileError("Encountered an initializer for `" ++ field.name ++ "`, but it is not a field of " ++ @typeName(T));
@@ -544,8 +732,8 @@ pub fn zeroInit(comptime T: type, init: var) T {
@field(value, field.name) = @field(init, field.name);
},
}
- } else if (field.default_value != null) {
- @field(value, field.name) = field.default_value;
+ } else if (field.default_value) |default_value| {
+ @field(value, field.name) = default_value;
}
}
@@ -572,24 +760,40 @@ test "zeroInit" {
b: ?bool,
c: I,
e: [3]u8,
- f: i64,
+ f: i64 = -1,
};
const s = zeroInit(S, .{
.a = 42,
});
- testing.expectEqual(s, S{
+ testing.expectEqual(S{
.a = 42,
.b = null,
.c = .{
.d = 0,
},
.e = [3]u8{ 0, 0, 0 },
- .f = 0,
- });
+ .f = -1,
+ }, s);
+
+ const Color = struct {
+ r: u8,
+ g: u8,
+ b: u8,
+ a: u8,
+ };
+
+ const c = zeroInit(Color, .{ 255, 255 });
+ testing.expectEqual(Color{
+ .r = 255,
+ .g = 255,
+ .b = 0,
+ .a = 0,
+ }, c);
}
+/// Compares two slices of numbers lexicographically. O(n).
pub fn order(comptime T: type, lhs: []const T, rhs: []const T) math.Order {
const n = math.min(lhs.len, rhs.len);
var i: usize = 0;
@@ -719,7 +923,7 @@ test "Span" {
///
/// When there is both a sentinel and an array length or slice length, the
/// length value is used instead of the sentinel.
-pub fn span(ptr: var) Span(@TypeOf(ptr)) {
+pub fn span(ptr: anytype) Span(@TypeOf(ptr)) {
if (@typeInfo(@TypeOf(ptr)) == .Optional) {
if (ptr) |non_null| {
return span(non_null);
@@ -747,7 +951,7 @@ test "span" {
/// Same as `span`, except when there is both a sentinel and an array
/// length or slice length, scans the memory for the sentinel value
/// rather than using the length.
-pub fn spanZ(ptr: var) Span(@TypeOf(ptr)) {
+pub fn spanZ(ptr: anytype) Span(@TypeOf(ptr)) {
if (@typeInfo(@TypeOf(ptr)) == .Optional) {
if (ptr) |non_null| {
return spanZ(non_null);
@@ -776,7 +980,7 @@ test "spanZ" {
/// or a slice, and returns the length.
/// In the case of a sentinel-terminated array, it uses the array length.
/// For C pointers it assumes it is a pointer-to-many with a 0 sentinel.
-pub fn len(value: var) usize {
+pub fn len(value: anytype) usize {
return switch (@typeInfo(@TypeOf(value))) {
.Array => |info| info.len,
.Vector => |info| info.len,
@@ -824,7 +1028,7 @@ test "len" {
/// In the case of a sentinel-terminated array, it scans the array
/// for a sentinel and uses that for the length, rather than using the array length.
/// For C pointers it assumes it is a pointer-to-many with a 0 sentinel.
-pub fn lenZ(ptr: var) usize {
+pub fn lenZ(ptr: anytype) usize {
return switch (@typeInfo(@TypeOf(ptr))) {
.Array => |info| if (info.sentinel) |sentinel|
indexOfSentinel(info.child, sentinel, &ptr)
@@ -1492,12 +1696,23 @@ pub const SplitIterator = struct {
/// Naively combines a series of slices with a separator.
/// Allocates memory for the result, which must be freed by the caller.
pub fn join(allocator: *Allocator, separator: []const u8, slices: []const []const u8) ![]u8 {
+ return joinMaybeZ(allocator, separator, slices, false);
+}
+
+/// Naively combines a series of slices with a separator and null terminator.
+/// Allocates memory for the result, which must be freed by the caller.
+pub fn joinZ(allocator: *Allocator, separator: []const u8, slices: []const []const u8) ![:0]u8 {
+ const out = try joinMaybeZ(allocator, separator, slices, true);
+ return out[0 .. out.len - 1 :0];
+}
+
+fn joinMaybeZ(allocator: *Allocator, separator: []const u8, slices: []const []const u8, zero: bool) ![]u8 {
if (slices.len == 0) return &[0]u8{};
const total_len = blk: {
var sum: usize = separator.len * (slices.len - 1);
- for (slices) |slice|
- sum += slice.len;
+ for (slices) |slice| sum += slice.len;
+ if (zero) sum += 1;
break :blk sum;
};
@@ -1513,6 +1728,8 @@ pub fn join(allocator: *Allocator, separator: []const u8, slices: []const []cons
buf_index += slice.len;
}
+ if (zero) buf[buf.len - 1] = 0;
+
// No need for shrink since buf is exactly the correct size.
return buf;
}
@@ -1535,6 +1752,27 @@ test "mem.join" {
}
}
+test "mem.joinZ" {
+ {
+ const str = try joinZ(testing.allocator, ",", &[_][]const u8{ "a", "b", "c" });
+ defer testing.allocator.free(str);
+ testing.expect(eql(u8, str, "a,b,c"));
+ testing.expectEqual(str[str.len], 0);
+ }
+ {
+ const str = try joinZ(testing.allocator, ",", &[_][]const u8{"a"});
+ defer testing.allocator.free(str);
+ testing.expect(eql(u8, str, "a"));
+ testing.expectEqual(str[str.len], 0);
+ }
+ {
+ const str = try joinZ(testing.allocator, ",", &[_][]const u8{ "a", "", "b", "", "c" });
+ defer testing.allocator.free(str);
+ testing.expect(eql(u8, str, "a,,b,,c"));
+ testing.expectEqual(str[str.len], 0);
+ }
+}
+
/// Copies each T from slices into a new slice that exactly holds all the elements.
pub fn concat(allocator: *Allocator, comptime T: type, slices: []const []const T) ![]T {
if (slices.len == 0) return &[0]T{};
@@ -1727,6 +1965,8 @@ fn testWriteIntImpl() void {
}));
}
+/// Returns the smallest number in a slice. O(n).
+/// `slice` must not be empty.
pub fn min(comptime T: type, slice: []const T) T {
var best = slice[0];
for (slice[1..]) |item| {
@@ -1739,6 +1979,8 @@ test "mem.min" {
testing.expect(min(u8, "abcdefg") == 'a');
}
+/// Returns the largest number in a slice. O(n).
+/// `slice` must not be empty.
pub fn max(comptime T: type, slice: []const T) T {
var best = slice[0];
for (slice[1..]) |item| {
@@ -1855,7 +2097,7 @@ fn AsBytesReturnType(comptime P: type) type {
}
/// Given a pointer to a single item, returns a slice of the underlying bytes, preserving constness.
-pub fn asBytes(ptr: var) AsBytesReturnType(@TypeOf(ptr)) {
+pub fn asBytes(ptr: anytype) AsBytesReturnType(@TypeOf(ptr)) {
const P = @TypeOf(ptr);
return @ptrCast(AsBytesReturnType(P), ptr);
}
@@ -1894,8 +2136,8 @@ test "asBytes" {
testing.expect(eql(u8, asBytes(&zero), ""));
}
-///Given any value, returns a copy of its bytes in an array.
-pub fn toBytes(value: var) [@sizeOf(@TypeOf(value))]u8 {
+/// Given any value, returns a copy of its bytes in an array.
+pub fn toBytes(value: anytype) [@sizeOf(@TypeOf(value))]u8 {
return asBytes(&value).*;
}
@@ -1928,9 +2170,9 @@ fn BytesAsValueReturnType(comptime T: type, comptime B: type) type {
return if (comptime trait.isConstPtr(B)) *align(alignment) const T else *align(alignment) T;
}
-///Given a pointer to an array of bytes, returns a pointer to a value of the specified type
+/// Given a pointer to an array of bytes, returns a pointer to a value of the specified type
/// backed by those bytes, preserving constness.
-pub fn bytesAsValue(comptime T: type, bytes: var) BytesAsValueReturnType(T, @TypeOf(bytes)) {
+pub fn bytesAsValue(comptime T: type, bytes: anytype) BytesAsValueReturnType(T, @TypeOf(bytes)) {
return @ptrCast(BytesAsValueReturnType(T, @TypeOf(bytes)), bytes);
}
@@ -1971,9 +2213,9 @@ test "bytesAsValue" {
testing.expect(meta.eql(inst, inst2.*));
}
-///Given a pointer to an array of bytes, returns a value of the specified type backed by a
+/// Given a pointer to an array of bytes, returns a value of the specified type backed by a
/// copy of those bytes.
-pub fn bytesToValue(comptime T: type, bytes: var) T {
+pub fn bytesToValue(comptime T: type, bytes: anytype) T {
return bytesAsValue(T, bytes).*;
}
test "bytesToValue" {
@@ -2001,7 +2243,7 @@ fn BytesAsSliceReturnType(comptime T: type, comptime bytesType: type) type {
return if (trait.isConstPtr(bytesType)) []align(alignment) const T else []align(alignment) T;
}
-pub fn bytesAsSlice(comptime T: type, bytes: var) BytesAsSliceReturnType(T, @TypeOf(bytes)) {
+pub fn bytesAsSlice(comptime T: type, bytes: anytype) BytesAsSliceReturnType(T, @TypeOf(bytes)) {
// let's not give an undefined pointer to @ptrCast
// it may be equal to zero and fail a null check
if (bytes.len == 0) {
@@ -2080,7 +2322,7 @@ fn SliceAsBytesReturnType(comptime sliceType: type) type {
return if (trait.isConstPtr(sliceType)) []align(alignment) const u8 else []align(alignment) u8;
}
-pub fn sliceAsBytes(slice: var) SliceAsBytesReturnType(@TypeOf(slice)) {
+pub fn sliceAsBytes(slice: anytype) SliceAsBytesReturnType(@TypeOf(slice)) {
const Slice = @TypeOf(slice);
// let's not give an undefined pointer to @ptrCast
@@ -2191,6 +2433,15 @@ test "alignForward" {
}
/// Round an address up to the previous aligned address
+/// Unlike `alignBackward`, `alignment` can be any positive number, not just a power of 2.
+pub fn alignBackwardAnyAlign(i: usize, alignment: usize) usize {
+ if (@popCount(usize, alignment) == 1)
+ return alignBackward(i, alignment);
+ assert(alignment != 0);
+ return i - @mod(i, alignment);
+}
+
+/// Round an address up to the previous aligned address
/// The alignment must be a power of 2 and greater than 0.
pub fn alignBackward(addr: usize, alignment: usize) usize {
return alignBackwardGeneric(usize, addr, alignment);
@@ -2206,6 +2457,19 @@ pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T {
return addr & ~(alignment - 1);
}
+/// Returns whether `alignment` is a valid alignment, meaning it is
+/// a positive power of 2.
+pub fn isValidAlign(alignment: u29) bool {
+ return @popCount(u29, alignment) == 1;
+}
+
+pub fn isAlignedAnyAlign(i: usize, alignment: usize) bool {
+ if (@popCount(usize, alignment) == 1)
+ return isAligned(i, alignment);
+ assert(alignment != 0);
+ return 0 == @mod(i, alignment);
+}
+
/// Given an address and an alignment, return true if the address is a multiple of the alignment
/// The alignment must be a power of 2 and greater than 0.
pub fn isAligned(addr: usize, alignment: usize) bool {
diff --git a/lib/std/meta.zig b/lib/std/meta.zig
index 75f507da74..6340a44fae 100644
--- a/lib/std/meta.zig
+++ b/lib/std/meta.zig
@@ -6,10 +6,11 @@ const math = std.math;
const testing = std.testing;
pub const trait = @import("meta/trait.zig");
+pub const TrailerFlags = @import("meta/trailer_flags.zig").TrailerFlags;
const TypeInfo = builtin.TypeInfo;
-pub fn tagName(v: var) []const u8 {
+pub fn tagName(v: anytype) []const u8 {
const T = @TypeOf(v);
switch (@typeInfo(T)) {
.ErrorSet => return @errorName(v),
@@ -250,7 +251,7 @@ test "std.meta.containerLayout" {
testing.expect(containerLayout(U3) == .Extern);
}
-pub fn declarations(comptime T: type) []TypeInfo.Declaration {
+pub fn declarations(comptime T: type) []const TypeInfo.Declaration {
return switch (@typeInfo(T)) {
.Struct => |info| info.decls,
.Enum => |info| info.decls,
@@ -274,7 +275,7 @@ test "std.meta.declarations" {
fn a() void {}
};
- const decls = comptime [_][]TypeInfo.Declaration{
+ const decls = comptime [_][]const TypeInfo.Declaration{
declarations(E1),
declarations(S1),
declarations(U1),
@@ -323,10 +324,10 @@ test "std.meta.declarationInfo" {
}
pub fn fields(comptime T: type) switch (@typeInfo(T)) {
- .Struct => []TypeInfo.StructField,
- .Union => []TypeInfo.UnionField,
- .ErrorSet => []TypeInfo.Error,
- .Enum => []TypeInfo.EnumField,
+ .Struct => []const TypeInfo.StructField,
+ .Union => []const TypeInfo.UnionField,
+ .ErrorSet => []const TypeInfo.Error,
+ .Enum => []const TypeInfo.EnumField,
else => @compileError("Expected struct, union, error set or enum type, found '" ++ @typeName(T) ++ "'"),
} {
return switch (@typeInfo(T)) {
@@ -430,7 +431,7 @@ test "std.meta.TagType" {
}
///Returns the active tag of a tagged union
-pub fn activeTag(u: var) @TagType(@TypeOf(u)) {
+pub fn activeTag(u: anytype) @TagType(@TypeOf(u)) {
const T = @TypeOf(u);
return @as(@TagType(T), u);
}
@@ -480,7 +481,7 @@ test "std.meta.TagPayloadType" {
/// Compares two of any type for equality. Containers are compared on a field-by-field basis,
/// where possible. Pointers are not followed.
-pub fn eql(a: var, b: @TypeOf(a)) bool {
+pub fn eql(a: anytype, b: @TypeOf(a)) bool {
const T = @TypeOf(a);
switch (@typeInfo(T)) {
@@ -627,7 +628,7 @@ test "intToEnum with error return" {
pub const IntToEnumError = error{InvalidEnumTag};
-pub fn intToEnum(comptime Tag: type, tag_int: var) IntToEnumError!Tag {
+pub fn intToEnum(comptime Tag: type, tag_int: anytype) IntToEnumError!Tag {
inline for (@typeInfo(Tag).Enum.fields) |f| {
const this_tag_value = @field(Tag, f.name);
if (tag_int == @enumToInt(this_tag_value)) {
@@ -693,3 +694,85 @@ pub fn Vector(comptime len: u32, comptime child: type) type {
},
});
}
+
+/// Given a type and value, cast the value to the type as c would.
+/// This is for translate-c and is not intended for general use.
+pub fn cast(comptime DestType: type, target: anytype) DestType {
+ const TargetType = @TypeOf(target);
+ switch (@typeInfo(DestType)) {
+ .Pointer => {
+ switch (@typeInfo(TargetType)) {
+ .Int, .ComptimeInt => {
+ return @intToPtr(DestType, target);
+ },
+ .Pointer => |ptr| {
+ return @ptrCast(DestType, @alignCast(ptr.alignment, target));
+ },
+ .Optional => |opt| {
+ if (@typeInfo(opt.child) == .Pointer) {
+ return @ptrCast(DestType, @alignCast(@alignOf(opt.child.Child), target));
+ }
+ },
+ else => {},
+ }
+ },
+ .Optional => |opt| {
+ if (@typeInfo(opt.child) == .Pointer) {
+ switch (@typeInfo(TargetType)) {
+ .Int, .ComptimeInt => {
+ return @intToPtr(DestType, target);
+ },
+ .Pointer => |ptr| {
+ return @ptrCast(DestType, @alignCast(ptr.alignment, target));
+ },
+ .Optional => |target_opt| {
+ if (@typeInfo(target_opt.child) == .Pointer) {
+ return @ptrCast(DestType, @alignCast(@alignOf(target_opt.child.Child), target));
+ }
+ },
+ else => {},
+ }
+ }
+ },
+ .Enum, .EnumLiteral => {
+ if (@typeInfo(TargetType) == .Int or @typeInfo(TargetType) == .ComptimeInt) {
+ return @intToEnum(DestType, target);
+ }
+ },
+ .Int, .ComptimeInt => {
+ switch (@typeInfo(TargetType)) {
+ .Pointer => {
+ return @as(DestType, @ptrToInt(target));
+ },
+ .Optional => |opt| {
+ if (@typeInfo(opt.child) == .Pointer) {
+ return @as(DestType, @ptrToInt(target));
+ }
+ },
+ .Enum, .EnumLiteral => {
+ return @as(DestType, @enumToInt(target));
+ },
+ else => {},
+ }
+ },
+ else => {},
+ }
+ return @as(DestType, target);
+}
+
+test "std.meta.cast" {
+ const E = enum(u2) {
+ Zero,
+ One,
+ Two,
+ };
+
+ var i = @as(i64, 10);
+
+ testing.expect(cast(?*c_void, 0) == @intToPtr(?*c_void, 0));
+ testing.expect(cast(*u8, 16) == @intToPtr(*u8, 16));
+ testing.expect(cast(u64, @as(u32, 10)) == @as(u64, 10));
+ testing.expect(cast(E, 1) == .One);
+ testing.expect(cast(u8, E.Two) == 2);
+ testing.expect(cast(*u64, &i).* == @as(u64, 10));
+}
diff --git a/lib/std/meta/trailer_flags.zig b/lib/std/meta/trailer_flags.zig
new file mode 100644
index 0000000000..0da7ca815a
--- /dev/null
+++ b/lib/std/meta/trailer_flags.zig
@@ -0,0 +1,145 @@
+const std = @import("../std.zig");
+const meta = std.meta;
+const testing = std.testing;
+const mem = std.mem;
+const assert = std.debug.assert;
+
+/// This is useful for saving memory when allocating an object that has many
+/// optional components. The optional objects are allocated sequentially in
+/// memory, and a single integer is used to represent each optional object
+/// and whether it is present based on each corresponding bit.
+pub fn TrailerFlags(comptime Fields: type) type {
+ return struct {
+ bits: Int,
+
+ pub const Int = @Type(.{ .Int = .{ .bits = bit_count, .is_signed = false } });
+ pub const bit_count = @typeInfo(Fields).Struct.fields.len;
+
+ pub const Self = @This();
+
+ pub fn has(self: Self, comptime name: []const u8) bool {
+ const field_index = meta.fieldIndex(Fields, name).?;
+ return (self.bits & (1 << field_index)) != 0;
+ }
+
+ pub fn get(self: Self, p: [*]align(@alignOf(Fields)) const u8, comptime name: []const u8) ?Field(name) {
+ if (!self.has(name))
+ return null;
+ return self.ptrConst(p, name).*;
+ }
+
+ pub fn setFlag(self: *Self, comptime name: []const u8) void {
+ const field_index = meta.fieldIndex(Fields, name).?;
+ self.bits |= 1 << field_index;
+ }
+
+ /// `fields` is a struct with each field set to an optional value.
+ /// Missing fields are assumed to be `null`.
+ /// Only the non-null bits are observed and are used to set the flag bits.
+ pub fn init(fields: anytype) Self {
+ var self: Self = .{ .bits = 0 };
+ inline for (@typeInfo(@TypeOf(fields)).Struct.fields) |field| {
+ const opt: ?Field(field.name) = @field(fields, field.name);
+ const field_index = meta.fieldIndex(Fields, field.name).?;
+ self.bits |= @as(Int, @boolToInt(opt != null)) << field_index;
+ }
+ return self;
+ }
+
+ /// `fields` is a struct with each field set to an optional value (same as `init`).
+ /// Missing fields are assumed to be `null`.
+ pub fn setMany(self: Self, p: [*]align(@alignOf(Fields)) u8, fields: anytype) void {
+ inline for (@typeInfo(@TypeOf(fields)).Struct.fields) |field| {
+ const opt: ?Field(field.name) = @field(fields, field.name);
+ if (opt) |value| {
+ self.set(p, field.name, value);
+ }
+ }
+ }
+
+ pub fn set(
+ self: Self,
+ p: [*]align(@alignOf(Fields)) u8,
+ comptime name: []const u8,
+ value: Field(name),
+ ) void {
+ self.ptr(p, name).* = value;
+ }
+
+ pub fn ptr(self: Self, p: [*]align(@alignOf(Fields)) u8, comptime name: []const u8) *Field(name) {
+ if (@sizeOf(Field(name)) == 0)
+ return undefined;
+ const off = self.offset(p, name);
+ return @ptrCast(*Field(name), @alignCast(@alignOf(Field(name)), p + off));
+ }
+
+ pub fn ptrConst(self: Self, p: [*]align(@alignOf(Fields)) const u8, comptime name: []const u8) *const Field(name) {
+ if (@sizeOf(Field(name)) == 0)
+ return undefined;
+ const off = self.offset(p, name);
+ return @ptrCast(*const Field(name), @alignCast(@alignOf(Field(name)), p + off));
+ }
+
+ pub fn offset(self: Self, p: [*]align(@alignOf(Fields)) const u8, comptime name: []const u8) usize {
+ var off: usize = 0;
+ inline for (@typeInfo(Fields).Struct.fields) |field, i| {
+ const active = (self.bits & (1 << i)) != 0;
+ if (comptime mem.eql(u8, field.name, name)) {
+ assert(active);
+ return mem.alignForwardGeneric(usize, off, @alignOf(field.field_type));
+ } else if (active) {
+ off = mem.alignForwardGeneric(usize, off, @alignOf(field.field_type));
+ off += @sizeOf(field.field_type);
+ }
+ }
+ @compileError("no field named " ++ name ++ " in type " ++ @typeName(Fields));
+ }
+
+ pub fn Field(comptime name: []const u8) type {
+ return meta.fieldInfo(Fields, name).field_type;
+ }
+
+ pub fn sizeInBytes(self: Self) usize {
+ var off: usize = 0;
+ inline for (@typeInfo(Fields).Struct.fields) |field, i| {
+ if (@sizeOf(field.field_type) == 0)
+ continue;
+ if ((self.bits & (1 << i)) != 0) {
+ off = mem.alignForwardGeneric(usize, off, @alignOf(field.field_type));
+ off += @sizeOf(field.field_type);
+ }
+ }
+ return off;
+ }
+ };
+}
+
+test "TrailerFlags" {
+ const Flags = TrailerFlags(struct {
+ a: i32,
+ b: bool,
+ c: u64,
+ });
+ var flags = Flags.init(.{
+ .b = true,
+ .c = 1234,
+ });
+ const slice = try testing.allocator.allocAdvanced(u8, 8, flags.sizeInBytes(), .exact);
+ defer testing.allocator.free(slice);
+
+ flags.set(slice.ptr, "b", false);
+ flags.set(slice.ptr, "c", 12345678);
+
+ testing.expect(flags.get(slice.ptr, "a") == null);
+ testing.expect(!flags.get(slice.ptr, "b").?);
+ testing.expect(flags.get(slice.ptr, "c").? == 12345678);
+
+ flags.setMany(slice.ptr, .{
+ .b = true,
+ .c = 5678,
+ });
+
+ testing.expect(flags.get(slice.ptr, "a") == null);
+ testing.expect(flags.get(slice.ptr, "b").?);
+ testing.expect(flags.get(slice.ptr, "c").? == 5678);
+}
diff --git a/lib/std/meta/trait.zig b/lib/std/meta/trait.zig
index 11aa8457ee..9e5b080e85 100644
--- a/lib/std/meta/trait.zig
+++ b/lib/std/meta/trait.zig
@@ -9,7 +9,7 @@ const meta = @import("../meta.zig");
pub const TraitFn = fn (type) bool;
-pub fn multiTrait(comptime traits: var) TraitFn {
+pub fn multiTrait(comptime traits: anytype) TraitFn {
const Closure = struct {
pub fn trait(comptime T: type) bool {
inline for (traits) |t|
@@ -342,7 +342,20 @@ test "std.meta.trait.isContainer" {
testing.expect(!isContainer(u8));
}
-pub fn hasDecls(comptime T: type, comptime names: var) bool {
+pub fn isTuple(comptime T: type) bool {
+ return is(.Struct)(T) and @typeInfo(T).Struct.is_tuple;
+}
+
+test "std.meta.trait.isTuple" {
+ const t1 = struct {};
+ const t2 = .{ .a = 0 };
+ const t3 = .{ 1, 2, 3 };
+ testing.expect(!isTuple(t1));
+ testing.expect(!isTuple(@TypeOf(t2)));
+ testing.expect(isTuple(@TypeOf(t3)));
+}
+
+pub fn hasDecls(comptime T: type, comptime names: anytype) bool {
inline for (names) |name| {
if (!@hasDecl(T, name))
return false;
@@ -368,7 +381,7 @@ test "std.meta.trait.hasDecls" {
testing.expect(!hasDecls(TestStruct2, tuple));
}
-pub fn hasFields(comptime T: type, comptime names: var) bool {
+pub fn hasFields(comptime T: type, comptime names: anytype) bool {
inline for (names) |name| {
if (!@hasField(T, name))
return false;
@@ -394,7 +407,7 @@ test "std.meta.trait.hasFields" {
testing.expect(!hasFields(TestStruct2, .{ "a", "b", "useless" }));
}
-pub fn hasFunctions(comptime T: type, comptime names: var) bool {
+pub fn hasFunctions(comptime T: type, comptime names: anytype) bool {
inline for (names) |name| {
if (!hasFn(name)(T))
return false;
diff --git a/lib/std/net.zig b/lib/std/net.zig
index 229731b617..71bab383fa 100644
--- a/lib/std/net.zig
+++ b/lib/std/net.zig
@@ -427,7 +427,7 @@ pub const Address = extern union {
self: Address,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
- out_stream: var,
+ out_stream: anytype,
) !void {
switch (self.any.family) {
os.AF_INET => {
@@ -682,7 +682,7 @@ pub fn getAddressList(allocator: *mem.Allocator, name: []const u8, port: u16) !*
if (info.canonname) |n| {
if (result.canon_name == null) {
- result.canon_name = try mem.dupe(arena, u8, mem.spanZ(n));
+ result.canon_name = try arena.dupe(u8, mem.spanZ(n));
}
}
i += 1;
@@ -1404,8 +1404,8 @@ fn resMSendRc(
fn dnsParse(
r: []const u8,
- ctx: var,
- comptime callback: var,
+ ctx: anytype,
+ comptime callback: anytype,
) !void {
// This implementation is ported from musl libc.
// A more idiomatic "ziggy" implementation would be welcome.
diff --git a/lib/std/os.zig b/lib/std/os.zig
index 0558390b9e..dfb47208ca 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -300,6 +300,10 @@ pub const ReadError = error{
/// This error occurs when no global event loop is configured,
/// and reading from the file descriptor would block.
WouldBlock,
+
+ /// In WASI, this error occurs when the file descriptor does
+ /// not hold the required rights to read from it.
+ AccessDenied,
} || UnexpectedError;
/// Returns the number of bytes that were read, which can be less than
@@ -335,6 +339,7 @@ pub fn read(fd: fd_t, buf: []u8) ReadError!usize {
wasi.ENOMEM => return error.SystemResources,
wasi.ECONNRESET => return error.ConnectionResetByPeer,
wasi.ETIMEDOUT => return error.ConnectionTimedOut,
+ wasi.ENOTCAPABLE => return error.AccessDenied,
else => |err| return unexpectedErrno(err),
}
}
@@ -402,6 +407,7 @@ pub fn readv(fd: fd_t, iov: []const iovec) ReadError!usize {
wasi.EISDIR => return error.IsDir,
wasi.ENOBUFS => return error.SystemResources,
wasi.ENOMEM => return error.SystemResources,
+ wasi.ENOTCAPABLE => return error.AccessDenied,
else => |err| return unexpectedErrno(err),
}
}
@@ -466,6 +472,7 @@ pub fn pread(fd: fd_t, buf: []u8, offset: u64) PReadError!usize {
wasi.ENXIO => return error.Unseekable,
wasi.ESPIPE => return error.Unseekable,
wasi.EOVERFLOW => return error.Unseekable,
+ wasi.ENOTCAPABLE => return error.AccessDenied,
else => |err| return unexpectedErrno(err),
}
}
@@ -500,8 +507,11 @@ pub fn pread(fd: fd_t, buf: []u8, offset: u64) PReadError!usize {
pub const TruncateError = error{
FileTooBig,
InputOutput,
- CannotTruncate,
FileBusy,
+
+ /// In WASI, this error occurs when the file descriptor does
+ /// not hold the required rights to call `ftruncate` on it.
+ AccessDenied,
} || UnexpectedError;
pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void {
@@ -522,7 +532,7 @@ pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void {
switch (rc) {
.SUCCESS => return,
.INVALID_HANDLE => unreachable, // Handle not open for writing
- .ACCESS_DENIED => return error.CannotTruncate,
+ .ACCESS_DENIED => return error.AccessDenied,
else => return windows.unexpectedStatus(rc),
}
}
@@ -532,10 +542,11 @@ pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void {
wasi.EINTR => unreachable,
wasi.EFBIG => return error.FileTooBig,
wasi.EIO => return error.InputOutput,
- wasi.EPERM => return error.CannotTruncate,
+ wasi.EPERM => return error.AccessDenied,
wasi.ETXTBSY => return error.FileBusy,
wasi.EBADF => unreachable, // Handle not open for writing
wasi.EINVAL => unreachable, // Handle not open for writing
+ wasi.ENOTCAPABLE => return error.AccessDenied,
else => |err| return unexpectedErrno(err),
}
}
@@ -554,7 +565,7 @@ pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void {
EINTR => continue,
EFBIG => return error.FileTooBig,
EIO => return error.InputOutput,
- EPERM => return error.CannotTruncate,
+ EPERM => return error.AccessDenied,
ETXTBSY => return error.FileBusy,
EBADF => unreachable, // Handle not open for writing
EINVAL => unreachable, // Handle not open for writing
@@ -604,6 +615,7 @@ pub fn preadv(fd: fd_t, iov: []const iovec, offset: u64) PReadError!usize {
wasi.ENXIO => return error.Unseekable,
wasi.ESPIPE => return error.Unseekable,
wasi.EOVERFLOW => return error.Unseekable,
+ wasi.ENOTCAPABLE => return error.AccessDenied,
else => |err| return unexpectedErrno(err),
}
}
@@ -641,6 +653,9 @@ pub const WriteError = error{
FileTooBig,
InputOutput,
NoSpaceLeft,
+
+ /// In WASI, this error may occur when the file descriptor does
+ /// not hold the required rights to write to it.
AccessDenied,
BrokenPipe,
SystemResources,
@@ -697,6 +712,7 @@ pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize {
wasi.ENOSPC => return error.NoSpaceLeft,
wasi.EPERM => return error.AccessDenied,
wasi.EPIPE => return error.BrokenPipe,
+ wasi.ENOTCAPABLE => return error.AccessDenied,
else => |err| return unexpectedErrno(err),
}
}
@@ -774,6 +790,7 @@ pub fn writev(fd: fd_t, iov: []const iovec_const) WriteError!usize {
wasi.ENOSPC => return error.NoSpaceLeft,
wasi.EPERM => return error.AccessDenied,
wasi.EPIPE => return error.BrokenPipe,
+ wasi.ENOTCAPABLE => return error.AccessDenied,
else => |err| return unexpectedErrno(err),
}
}
@@ -856,6 +873,7 @@ pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize {
wasi.ENXIO => return error.Unseekable,
wasi.ESPIPE => return error.Unseekable,
wasi.EOVERFLOW => return error.Unseekable,
+ wasi.ENOTCAPABLE => return error.AccessDenied,
else => |err| return unexpectedErrno(err),
}
}
@@ -949,6 +967,7 @@ pub fn pwritev(fd: fd_t, iov: []const iovec_const, offset: u64) PWriteError!usiz
wasi.ENXIO => return error.Unseekable,
wasi.ESPIPE => return error.Unseekable,
wasi.EOVERFLOW => return error.Unseekable,
+ wasi.ENOTCAPABLE => return error.AccessDenied,
else => |err| return unexpectedErrno(err),
}
}
@@ -984,6 +1003,8 @@ pub fn pwritev(fd: fd_t, iov: []const iovec_const, offset: u64) PWriteError!usiz
}
pub const OpenError = error{
+ /// In WASI, this error may occur when the file descriptor does
+ /// not hold the required rights to open a new resource relative to it.
AccessDenied,
SymLinkLoop,
ProcessFdQuotaExceeded,
@@ -1113,6 +1134,7 @@ pub fn openatWasi(dir_fd: fd_t, file_path: []const u8, oflags: oflags_t, fdflags
wasi.EPERM => return error.AccessDenied,
wasi.EEXIST => return error.PathAlreadyExists,
wasi.EBUSY => return error.DeviceBusy,
+ wasi.ENOTCAPABLE => return error.AccessDenied,
else => |err| return unexpectedErrno(err),
}
}
@@ -1499,6 +1521,8 @@ pub fn getcwd(out_buffer: []u8) GetCwdError![]u8 {
}
pub const SymLinkError = error{
+ /// In WASI, this error may occur when the file descriptor does
+ /// not hold the required rights to create a new symbolic link relative to it.
AccessDenied,
DiskQuota,
PathAlreadyExists,
@@ -1520,15 +1544,17 @@ pub const SymLinkError = error{
/// If `sym_link_path` exists, it will not be overwritten.
/// See also `symlinkC` and `symlinkW`.
pub fn symlink(target_path: []const u8, sym_link_path: []const u8) SymLinkError!void {
+ if (builtin.os.tag == .wasi) {
+ @compileError("symlink is not supported in WASI; use symlinkat instead");
+ }
if (builtin.os.tag == .windows) {
const target_path_w = try windows.sliceToPrefixedFileW(target_path);
const sym_link_path_w = try windows.sliceToPrefixedFileW(sym_link_path);
return windows.CreateSymbolicLinkW(sym_link_path_w.span().ptr, target_path_w.span().ptr, 0);
- } else {
- const target_path_c = try toPosixPath(target_path);
- const sym_link_path_c = try toPosixPath(sym_link_path);
- return symlinkZ(&target_path_c, &sym_link_path_c);
}
+ const target_path_c = try toPosixPath(target_path);
+ const sym_link_path_c = try toPosixPath(sym_link_path);
+ return symlinkZ(&target_path_c, &sym_link_path_c);
}
pub const symlinkC = @compileError("deprecated: renamed to symlinkZ");
@@ -1561,15 +1587,66 @@ pub fn symlinkZ(target_path: [*:0]const u8, sym_link_path: [*:0]const u8) SymLin
}
}
+/// Similar to `symlink`, however, creates a symbolic link named `sym_link_path` which contains the string
+/// `target_path` **relative** to `newdirfd` directory handle.
+/// A symbolic link (also known as a soft link) may point to an existing file or to a nonexistent
+/// one; the latter case is known as a dangling link.
+/// If `sym_link_path` exists, it will not be overwritten.
+/// See also `symlinkatWasi`, `symlinkatZ` and `symlinkatW`.
pub fn symlinkat(target_path: []const u8, newdirfd: fd_t, sym_link_path: []const u8) SymLinkError!void {
+ if (builtin.os.tag == .wasi) {
+ return symlinkatWasi(target_path, newdirfd, sym_link_path);
+ }
+ if (builtin.os.tag == .windows) {
+ const target_path_w = try windows.sliceToPrefixedFileW(target_path);
+ const sym_link_path_w = try windows.sliceToPrefixedFileW(sym_link_path);
+ return symlinkatW(target_path_w.span().ptr, newdirfd, sym_link_path_w.span().ptr);
+ }
const target_path_c = try toPosixPath(target_path);
const sym_link_path_c = try toPosixPath(sym_link_path);
- return symlinkatZ(target_path_c, newdirfd, sym_link_path_c);
+ return symlinkatZ(&target_path_c, newdirfd, &sym_link_path_c);
}
pub const symlinkatC = @compileError("deprecated: renamed to symlinkatZ");
+/// WASI-only. The same as `symlinkat` but targeting WASI.
+/// See also `symlinkat`.
+pub fn symlinkatWasi(target_path: []const u8, newdirfd: fd_t, sym_link_path: []const u8) SymLinkError!void {
+ switch (wasi.path_symlink(target_path.ptr, target_path.len, newdirfd, sym_link_path.ptr, sym_link_path.len)) {
+ wasi.ESUCCESS => {},
+ wasi.EFAULT => unreachable,
+ wasi.EINVAL => unreachable,
+ wasi.EACCES => return error.AccessDenied,
+ wasi.EPERM => return error.AccessDenied,
+ wasi.EDQUOT => return error.DiskQuota,
+ wasi.EEXIST => return error.PathAlreadyExists,
+ wasi.EIO => return error.FileSystem,
+ wasi.ELOOP => return error.SymLinkLoop,
+ wasi.ENAMETOOLONG => return error.NameTooLong,
+ wasi.ENOENT => return error.FileNotFound,
+ wasi.ENOTDIR => return error.NotDir,
+ wasi.ENOMEM => return error.SystemResources,
+ wasi.ENOSPC => return error.NoSpaceLeft,
+ wasi.EROFS => return error.ReadOnlyFileSystem,
+ wasi.ENOTCAPABLE => return error.AccessDenied,
+ else => |err| return unexpectedErrno(err),
+ }
+}
+
+/// Windows-only. The same as `symlinkat` except the paths are null-terminated, WTF-16 encoded.
+/// See also `symlinkat`.
+pub fn symlinkatW(target_path: [*:0]const u16, newdirfd: fd_t, sym_link_path: [*:0]const u16) SymLinkError!void {
+ @compileError("TODO implement on Windows");
+}
+
+/// The same as `symlinkat` except the parameters are null-terminated pointers.
+/// See also `symlinkat`.
pub fn symlinkatZ(target_path: [*:0]const u8, newdirfd: fd_t, sym_link_path: [*:0]const u8) SymLinkError!void {
+ if (builtin.os.tag == .windows) {
+ const target_path_w = try windows.cStrToPrefixedFileW(target_path);
+ const sym_link_path_w = try windows.cStrToPrefixedFileW(sym_link_path);
+ return symlinkatW(target_path_w.span().ptr, newdirfd, sym_link_path.span().ptr);
+ }
switch (errno(system.symlinkat(target_path, newdirfd, sym_link_path))) {
0 => return,
EFAULT => unreachable,
@@ -1592,6 +1669,9 @@ pub fn symlinkatZ(target_path: [*:0]const u8, newdirfd: fd_t, sym_link_path: [*:
pub const UnlinkError = error{
FileNotFound,
+
+ /// In WASI, this error may occur when the file descriptor does
+ /// not hold the required rights to unlink a resource by path relative to it.
AccessDenied,
FileBusy,
FileSystem,
@@ -1613,7 +1693,9 @@ pub const UnlinkError = error{
/// Delete a name and possibly the file it refers to.
/// See also `unlinkC`.
pub fn unlink(file_path: []const u8) UnlinkError!void {
- if (builtin.os.tag == .windows) {
+ if (builtin.os.tag == .wasi) {
+ @compileError("unlink is not supported in WASI; use unlinkat instead");
+ } else if (builtin.os.tag == .windows) {
const file_path_w = try windows.sliceToPrefixedFileW(file_path);
return windows.DeleteFileW(file_path_w.span().ptr);
} else {
@@ -1670,6 +1752,8 @@ pub fn unlinkat(dirfd: fd_t, file_path: []const u8, flags: u32) UnlinkatError!vo
pub const unlinkatC = @compileError("deprecated: renamed to unlinkatZ");
+/// WASI-only. Same as `unlinkat` but targeting WASI.
+/// See also `unlinkat`.
pub fn unlinkatWasi(dirfd: fd_t, file_path: []const u8, flags: u32) UnlinkatError!void {
const remove_dir = (flags & AT_REMOVEDIR) != 0;
const res = if (remove_dir)
@@ -1691,6 +1775,7 @@ pub fn unlinkatWasi(dirfd: fd_t, file_path: []const u8, flags: u32) UnlinkatErro
wasi.ENOMEM => return error.SystemResources,
wasi.EROFS => return error.ReadOnlyFileSystem,
wasi.ENOTEMPTY => return error.DirNotEmpty,
+ wasi.ENOTCAPABLE => return error.AccessDenied,
wasi.EINVAL => unreachable, // invalid flags, or pathname has . as last component
wasi.EBADF => unreachable, // always a race condition
@@ -1793,6 +1878,8 @@ pub fn unlinkatW(dirfd: fd_t, sub_path_w: [*:0]const u16, flags: u32) UnlinkatEr
}
const RenameError = error{
+ /// In WASI, this error may occur when the file descriptor does
+ /// not hold the required rights to rename a resource by path relative to it.
AccessDenied,
FileBusy,
DiskQuota,
@@ -1816,7 +1903,9 @@ const RenameError = error{
/// Change the name or location of a file.
pub fn rename(old_path: []const u8, new_path: []const u8) RenameError!void {
- if (builtin.os.tag == .windows) {
+ if (builtin.os.tag == .wasi) {
+ @compileError("rename is not supported in WASI; use renameat instead");
+ } else if (builtin.os.tag == .windows) {
const old_path_w = try windows.sliceToPrefixedFileW(old_path);
const new_path_w = try windows.sliceToPrefixedFileW(new_path);
return renameW(old_path_w.span().ptr, new_path_w.span().ptr);
@@ -1887,7 +1976,8 @@ pub fn renameat(
}
}
-/// Same as `renameat` expect only WASI.
+/// WASI-only. Same as `renameat` expect targeting WASI.
+/// See also `renameat`.
pub fn renameatWasi(old_dir_fd: fd_t, old_path: []const u8, new_dir_fd: fd_t, new_path: []const u8) RenameError!void {
switch (wasi.path_rename(old_dir_fd, old_path.ptr, old_path.len, new_dir_fd, new_path.ptr, new_path.len)) {
wasi.ESUCCESS => return,
@@ -1909,6 +1999,7 @@ pub fn renameatWasi(old_dir_fd: fd_t, old_path: []const u8, new_dir_fd: fd_t, ne
wasi.ENOTEMPTY => return error.PathAlreadyExists,
wasi.EROFS => return error.ReadOnlyFileSystem,
wasi.EXDEV => return error.RenameAcrossMountPoints,
+ wasi.ENOTCAPABLE => return error.AccessDenied,
else => |err| return unexpectedErrno(err),
}
}
@@ -2007,23 +2098,6 @@ pub fn renameatW(
}
}
-pub const MakeDirError = error{
- AccessDenied,
- DiskQuota,
- PathAlreadyExists,
- SymLinkLoop,
- LinkQuotaExceeded,
- NameTooLong,
- FileNotFound,
- SystemResources,
- NoSpaceLeft,
- NotDir,
- ReadOnlyFileSystem,
- InvalidUtf8,
- BadPathName,
- NoDevice,
-} || UnexpectedError;
-
pub fn mkdirat(dir_fd: fd_t, sub_dir_path: []const u8, mode: u32) MakeDirError!void {
if (builtin.os.tag == .windows) {
const sub_dir_path_w = try windows.sliceToPrefixedFileW(sub_dir_path);
@@ -2055,6 +2129,7 @@ pub fn mkdiratWasi(dir_fd: fd_t, sub_dir_path: []const u8, mode: u32) MakeDirErr
wasi.ENOSPC => return error.NoSpaceLeft,
wasi.ENOTDIR => return error.NotDir,
wasi.EROFS => return error.ReadOnlyFileSystem,
+ wasi.ENOTCAPABLE => return error.AccessDenied,
else => |err| return unexpectedErrno(err),
}
}
@@ -2089,10 +2164,31 @@ pub fn mkdiratW(dir_fd: fd_t, sub_path_w: [*:0]const u16, mode: u32) MakeDirErro
windows.CloseHandle(sub_dir_handle);
}
+pub const MakeDirError = error{
+ /// In WASI, this error may occur when the file descriptor does
+ /// not hold the required rights to create a new directory relative to it.
+ AccessDenied,
+ DiskQuota,
+ PathAlreadyExists,
+ SymLinkLoop,
+ LinkQuotaExceeded,
+ NameTooLong,
+ FileNotFound,
+ SystemResources,
+ NoSpaceLeft,
+ NotDir,
+ ReadOnlyFileSystem,
+ InvalidUtf8,
+ BadPathName,
+ NoDevice,
+} || UnexpectedError;
+
/// Create a directory.
/// `mode` is ignored on Windows.
pub fn mkdir(dir_path: []const u8, mode: u32) MakeDirError!void {
- if (builtin.os.tag == .windows) {
+ if (builtin.os.tag == .wasi) {
+ @compileError("mkdir is not supported in WASI; use mkdirat instead");
+ } else if (builtin.os.tag == .windows) {
const sub_dir_handle = try windows.CreateDirectory(null, dir_path, null);
windows.CloseHandle(sub_dir_handle);
return;
@@ -2145,7 +2241,9 @@ pub const DeleteDirError = error{
/// Deletes an empty directory.
pub fn rmdir(dir_path: []const u8) DeleteDirError!void {
- if (builtin.os.tag == .windows) {
+ if (builtin.os.tag == .wasi) {
+ @compileError("rmdir is not supported in WASI; use unlinkat instead");
+ } else if (builtin.os.tag == .windows) {
const dir_path_w = try windows.sliceToPrefixedFileW(dir_path);
return windows.RemoveDirectoryW(dir_path_w.span().ptr);
} else {
@@ -2194,7 +2292,9 @@ pub const ChangeCurDirError = error{
/// Changes the current working directory of the calling process.
/// `dir_path` is recommended to be a UTF-8 encoded string.
pub fn chdir(dir_path: []const u8) ChangeCurDirError!void {
- if (builtin.os.tag == .windows) {
+ if (builtin.os.tag == .wasi) {
+ @compileError("chdir is not supported in WASI");
+ } else if (builtin.os.tag == .windows) {
const dir_path_w = try windows.sliceToPrefixedFileW(dir_path);
@compileError("TODO implement chdir for Windows");
} else {
@@ -2246,6 +2346,8 @@ pub fn fchdir(dirfd: fd_t) FchdirError!void {
}
pub const ReadLinkError = error{
+ /// In WASI, this error may occur when the file descriptor does
+ /// not hold the required rights to read value of a symbolic link relative to it.
AccessDenied,
FileSystem,
SymLinkLoop,
@@ -2258,9 +2360,11 @@ pub const ReadLinkError = error{
/// Read value of a symbolic link.
/// The return value is a slice of `out_buffer` from index 0.
pub fn readlink(file_path: []const u8, out_buffer: []u8) ReadLinkError![]u8 {
- if (builtin.os.tag == .windows) {
+ if (builtin.os.tag == .wasi) {
+ @compileError("readlink is not supported in WASI; use readlinkat instead");
+ } else if (builtin.os.tag == .windows) {
const file_path_w = try windows.sliceToPrefixedFileW(file_path);
- @compileError("TODO implement readlink for Windows");
+ return readlinkW(file_path_w.span().ptr, out_buffer);
} else {
const file_path_c = try toPosixPath(file_path);
return readlinkZ(&file_path_c, out_buffer);
@@ -2269,11 +2373,17 @@ pub fn readlink(file_path: []const u8, out_buffer: []u8) ReadLinkError![]u8 {
pub const readlinkC = @compileError("deprecated: renamed to readlinkZ");
+/// Windows-only. Same as `readlink` expecte `file_path` is null-terminated, WTF16 encoded.
+/// Seel also `readlinkZ`.
+pub fn readlinkW(file_path: [*:0]const u16, out_buffer: []u8) ReadLinkError![]u8 {
+ @compileError("TODO implement readlink for Windows");
+}
+
/// Same as `readlink` except `file_path` is null-terminated.
pub fn readlinkZ(file_path: [*:0]const u8, out_buffer: []u8) ReadLinkError![]u8 {
if (builtin.os.tag == .windows) {
const file_path_w = try windows.cStrToPrefixedFileW(file_path);
- @compileError("TODO implement readlink for Windows");
+ return readlinkW(file_path_w.span().ptr, out_buffer);
}
const rc = system.readlink(file_path, out_buffer.ptr, out_buffer.len);
switch (errno(rc)) {
@@ -2291,12 +2401,55 @@ pub fn readlinkZ(file_path: [*:0]const u8, out_buffer: []u8) ReadLinkError![]u8
}
}
+/// Similar to `readlink` except reads value of a symbolink link **relative** to `dirfd` directory handle.
+/// The return value is a slice of `out_buffer` from index 0.
+/// See also `readlinkatWasi`, `realinkatZ` and `realinkatW`.
+pub fn readlinkat(dirfd: fd_t, file_path: []const u8, out_buffer: []u8) ReadLinkError![]u8 {
+ if (builtin.os.tag == .wasi) {
+ return readlinkatWasi(dirfd, file_path, out_buffer);
+ }
+ if (builtin.os.tag == .windows) {
+ const file_path_w = try windows.cStrToPrefixedFileW(file_path);
+ return readlinkatW(dirfd, file_path.span().ptr, out_buffer);
+ }
+ const file_path_c = try toPosixPath(file_path);
+ return readlinkatZ(dirfd, &file_path_c, out_buffer);
+}
+
pub const readlinkatC = @compileError("deprecated: renamed to readlinkatZ");
+/// WASI-only. Same as `readlinkat` but targets WASI.
+/// See also `readlinkat`.
+pub fn readlinkatWasi(dirfd: fd_t, file_path: []const u8, out_buffer: []u8) ReadLinkError![]u8 {
+ var bufused: usize = undefined;
+ switch (wasi.path_readlink(dirfd, file_path.ptr, file_path.len, out_buffer.ptr, out_buffer.len, &bufused)) {
+ wasi.ESUCCESS => return out_buffer[0..bufused],
+ wasi.EACCES => return error.AccessDenied,
+ wasi.EFAULT => unreachable,
+ wasi.EINVAL => unreachable,
+ wasi.EIO => return error.FileSystem,
+ wasi.ELOOP => return error.SymLinkLoop,
+ wasi.ENAMETOOLONG => return error.NameTooLong,
+ wasi.ENOENT => return error.FileNotFound,
+ wasi.ENOMEM => return error.SystemResources,
+ wasi.ENOTDIR => return error.NotDir,
+ wasi.ENOTCAPABLE => return error.AccessDenied,
+ else => |err| return unexpectedErrno(err),
+ }
+}
+
+/// Windows-only. Same as `readlinkat` except `file_path` is null-terminated, WTF16 encoded.
+/// See also `readlinkat`.
+pub fn readlinkatW(dirfd: fd_t, file_path: [*:0]const u16, out_buffer: []u8) ReadLinkError![]u8 {
+ @compileError("TODO implement on Windows");
+}
+
+/// Same as `readlinkat` except `file_path` is null-terminated.
+/// See also `readlinkat`.
pub fn readlinkatZ(dirfd: fd_t, file_path: [*:0]const u8, out_buffer: []u8) ReadLinkError![]u8 {
if (builtin.os.tag == .windows) {
const file_path_w = try windows.cStrToPrefixedFileW(file_path);
- @compileError("TODO implement readlink for Windows");
+ return readlinkatW(dirfd, file_path_w.span().ptr, out_buffer);
}
const rc = system.readlinkat(dirfd, file_path, out_buffer.ptr, out_buffer.len);
switch (errno(rc)) {
@@ -2958,9 +3111,13 @@ pub fn waitpid(pid: i32, flags: u32) u32 {
pub const FStatError = error{
SystemResources,
+
+ /// In WASI, this error may occur when the file descriptor does
+ /// not hold the required rights to get its filestat information.
AccessDenied,
} || UnexpectedError;
+/// Return information about a file descriptor.
pub fn fstat(fd: fd_t) FStatError!Stat {
if (builtin.os.tag == .wasi) {
var stat: wasi.filestat_t = undefined;
@@ -2970,9 +3127,13 @@ pub fn fstat(fd: fd_t) FStatError!Stat {
wasi.EBADF => unreachable, // Always a race condition.
wasi.ENOMEM => return error.SystemResources,
wasi.EACCES => return error.AccessDenied,
+ wasi.ENOTCAPABLE => return error.AccessDenied,
else => |err| return unexpectedErrno(err),
}
}
+ if (builtin.os.tag == .windows) {
+ @compileError("fstat is not yet implemented on Windows");
+ }
var stat: Stat = undefined;
switch (errno(system.fstat(fd, &stat))) {
@@ -2987,13 +3148,43 @@ pub fn fstat(fd: fd_t) FStatError!Stat {
pub const FStatAtError = FStatError || error{ NameTooLong, FileNotFound };
+/// Similar to `fstat`, but returns stat of a resource pointed to by `pathname`
+/// which is relative to `dirfd` handle.
+/// See also `fstatatZ` and `fstatatWasi`.
pub fn fstatat(dirfd: fd_t, pathname: []const u8, flags: u32) FStatAtError!Stat {
- const pathname_c = try toPosixPath(pathname);
- return fstatatZ(dirfd, &pathname_c, flags);
+ if (builtin.os.tag == .wasi) {
+ return fstatatWasi(dirfd, pathname, flags);
+ } else if (builtin.os.tag == .windows) {
+ @compileError("fstatat is not yet implemented on Windows");
+ } else {
+ const pathname_c = try toPosixPath(pathname);
+ return fstatatZ(dirfd, &pathname_c, flags);
+ }
}
pub const fstatatC = @compileError("deprecated: renamed to fstatatZ");
+/// WASI-only. Same as `fstatat` but targeting WASI.
+/// See also `fstatat`.
+pub fn fstatatWasi(dirfd: fd_t, pathname: []const u8, flags: u32) FStatAtError!Stat {
+ var stat: wasi.filestat_t = undefined;
+ switch (wasi.path_filestat_get(dirfd, flags, pathname.ptr, pathname.len, &stat)) {
+ wasi.ESUCCESS => return Stat.fromFilestat(stat),
+ wasi.EINVAL => unreachable,
+ wasi.EBADF => unreachable, // Always a race condition.
+ wasi.ENOMEM => return error.SystemResources,
+ wasi.EACCES => return error.AccessDenied,
+ wasi.EFAULT => unreachable,
+ wasi.ENAMETOOLONG => return error.NameTooLong,
+ wasi.ENOENT => return error.FileNotFound,
+ wasi.ENOTDIR => return error.FileNotFound,
+ wasi.ENOTCAPABLE => return error.AccessDenied,
+ else => |err| return unexpectedErrno(err),
+ }
+}
+
+/// Same as `fstatat` but `pathname` is null-terminated.
+/// See also `fstatat`.
pub fn fstatatZ(dirfd: fd_t, pathname: [*:0]const u8, flags: u32) FStatAtError!Stat {
var stat: Stat = undefined;
switch (errno(system.fstatat(dirfd, pathname, &stat, flags))) {
@@ -3493,7 +3684,13 @@ pub fn gettimeofday(tv: ?*timeval, tz: ?*timezone) void {
}
}
-pub const SeekError = error{Unseekable} || UnexpectedError;
+pub const SeekError = error{
+ Unseekable,
+
+ /// In WASI, this error may occur when the file descriptor does
+ /// not hold the required rights to seek on it.
+ AccessDenied,
+} || UnexpectedError;
/// Repositions read/write file offset relative to the beginning.
pub fn lseek_SET(fd: fd_t, offset: u64) SeekError!void {
@@ -3521,6 +3718,7 @@ pub fn lseek_SET(fd: fd_t, offset: u64) SeekError!void {
wasi.EOVERFLOW => return error.Unseekable,
wasi.ESPIPE => return error.Unseekable,
wasi.ENXIO => return error.Unseekable,
+ wasi.ENOTCAPABLE => return error.AccessDenied,
else => |err| return unexpectedErrno(err),
}
}
@@ -3562,6 +3760,7 @@ pub fn lseek_CUR(fd: fd_t, offset: i64) SeekError!void {
wasi.EOVERFLOW => return error.Unseekable,
wasi.ESPIPE => return error.Unseekable,
wasi.ENXIO => return error.Unseekable,
+ wasi.ENOTCAPABLE => return error.AccessDenied,
else => |err| return unexpectedErrno(err),
}
}
@@ -3602,6 +3801,7 @@ pub fn lseek_END(fd: fd_t, offset: i64) SeekError!void {
wasi.EOVERFLOW => return error.Unseekable,
wasi.ESPIPE => return error.Unseekable,
wasi.ENXIO => return error.Unseekable,
+ wasi.ENOTCAPABLE => return error.AccessDenied,
else => |err| return unexpectedErrno(err),
}
}
@@ -3642,6 +3842,7 @@ pub fn lseek_CUR_get(fd: fd_t) SeekError!u64 {
wasi.EOVERFLOW => return error.Unseekable,
wasi.ESPIPE => return error.Unseekable,
wasi.ENXIO => return error.Unseekable,
+ wasi.ENOTCAPABLE => return error.AccessDenied,
else => |err| return unexpectedErrno(err),
}
}
@@ -3867,7 +4068,7 @@ pub fn nanosleep(seconds: u64, nanoseconds: u64) void {
}
pub fn dl_iterate_phdr(
- context: var,
+ context: anytype,
comptime Error: type,
comptime callback: fn (info: *dl_phdr_info, size: usize, context: @TypeOf(context)) Error!void,
) Error!void {
diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig
index cc3b4f5741..e508f5ae20 100644
--- a/lib/std/os/test.zig
+++ b/lib/std/os/test.zig
@@ -18,135 +18,49 @@ const AtomicOrder = builtin.AtomicOrder;
const tmpDir = std.testing.tmpDir;
const Dir = std.fs.Dir;
-test "makePath, put some files in it, deleteTree" {
- var tmp = tmpDir(.{});
- defer tmp.cleanup();
-
- try tmp.dir.makePath("os_test_tmp" ++ fs.path.sep_str ++ "b" ++ fs.path.sep_str ++ "c");
- try tmp.dir.writeFile("os_test_tmp" ++ fs.path.sep_str ++ "b" ++ fs.path.sep_str ++ "c" ++ fs.path.sep_str ++ "file.txt", "nonsense");
- try tmp.dir.writeFile("os_test_tmp" ++ fs.path.sep_str ++ "b" ++ fs.path.sep_str ++ "file2.txt", "blah");
- try tmp.dir.deleteTree("os_test_tmp");
- if (tmp.dir.openDir("os_test_tmp", .{})) |dir| {
- @panic("expected error");
- } else |err| {
- expect(err == error.FileNotFound);
- }
-}
-
-test "access file" {
- if (builtin.os.tag == .wasi) return error.SkipZigTest;
-
- var tmp = tmpDir(.{});
- defer tmp.cleanup();
+test "fstatat" {
+ // enable when `fstat` and `fstatat` are implemented on Windows
+ if (builtin.os.tag == .windows) return error.SkipZigTest;
- try tmp.dir.makePath("os_test_tmp");
- if (tmp.dir.access("os_test_tmp" ++ fs.path.sep_str ++ "file.txt", .{})) |ok| {
- @panic("expected error");
- } else |err| {
- expect(err == error.FileNotFound);
- }
-
- try tmp.dir.writeFile("os_test_tmp" ++ fs.path.sep_str ++ "file.txt", "");
- try tmp.dir.access("os_test_tmp" ++ fs.path.sep_str ++ "file.txt", .{});
- try tmp.dir.deleteTree("os_test_tmp");
-}
-
-fn testThreadIdFn(thread_id: *Thread.Id) void {
- thread_id.* = Thread.getCurrentId();
-}
-
-test "sendfile" {
var tmp = tmpDir(.{});
defer tmp.cleanup();
- try tmp.dir.makePath("os_test_tmp");
- defer tmp.dir.deleteTree("os_test_tmp") catch {};
-
- var dir = try tmp.dir.openDir("os_test_tmp", .{});
- defer dir.close();
-
- const line1 = "line1\n";
- const line2 = "second line\n";
- var vecs = [_]os.iovec_const{
- .{
- .iov_base = line1,
- .iov_len = line1.len,
- },
- .{
- .iov_base = line2,
- .iov_len = line2.len,
- },
- };
+ // create dummy file
+ const contents = "nonsense";
+ try tmp.dir.writeFile("file.txt", contents);
- var src_file = try dir.createFile("sendfile1.txt", .{ .read = true });
- defer src_file.close();
-
- try src_file.writevAll(&vecs);
-
- var dest_file = try dir.createFile("sendfile2.txt", .{ .read = true });
- defer dest_file.close();
-
- const header1 = "header1\n";
- const header2 = "second header\n";
- const trailer1 = "trailer1\n";
- const trailer2 = "second trailer\n";
- var hdtr = [_]os.iovec_const{
- .{
- .iov_base = header1,
- .iov_len = header1.len,
- },
- .{
- .iov_base = header2,
- .iov_len = header2.len,
- },
- .{
- .iov_base = trailer1,
- .iov_len = trailer1.len,
- },
- .{
- .iov_base = trailer2,
- .iov_len = trailer2.len,
- },
- };
+ // fetch file's info on the opened fd directly
+ const file = try tmp.dir.openFile("file.txt", .{});
+ const stat = try os.fstat(file.handle);
+ defer file.close();
- var written_buf: [100]u8 = undefined;
- try dest_file.writeFileAll(src_file, .{
- .in_offset = 1,
- .in_len = 10,
- .headers_and_trailers = &hdtr,
- .header_count = 2,
- });
- const amt = try dest_file.preadAll(&written_buf, 0);
- expect(mem.eql(u8, written_buf[0..amt], "header1\nsecond header\nine1\nsecontrailer1\nsecond trailer\n"));
+ // now repeat but using `fstatat` instead
+ const flags = if (builtin.os.tag == .wasi) 0x0 else os.AT_SYMLINK_NOFOLLOW;
+ const statat = try os.fstatat(tmp.dir.fd, "file.txt", flags);
+ expectEqual(stat, statat);
}
-test "fs.copyFile" {
- const data = "u6wj+JmdF3qHsFPE BUlH2g4gJCmEz0PP";
- const src_file = "tmp_test_copy_file.txt";
- const dest_file = "tmp_test_copy_file2.txt";
- const dest_file2 = "tmp_test_copy_file3.txt";
+test "readlinkat" {
+ // enable when `readlinkat` and `symlinkat` are implemented on Windows
+ if (builtin.os.tag == .windows) return error.SkipZigTest;
var tmp = tmpDir(.{});
defer tmp.cleanup();
- try tmp.dir.writeFile(src_file, data);
- defer tmp.dir.deleteFile(src_file) catch {};
+ // create file
+ try tmp.dir.writeFile("file.txt", "nonsense");
- try tmp.dir.copyFile(src_file, tmp.dir, dest_file, .{});
- defer tmp.dir.deleteFile(dest_file) catch {};
+ // create a symbolic link
+ try os.symlinkat("file.txt", tmp.dir.fd, "link");
- try tmp.dir.copyFile(src_file, tmp.dir, dest_file2, .{ .override_mode = File.default_mode });
- defer tmp.dir.deleteFile(dest_file2) catch {};
-
- try expectFileContents(tmp.dir, dest_file, data);
- try expectFileContents(tmp.dir, dest_file2, data);
+ // read the link
+ var buffer: [fs.MAX_PATH_BYTES]u8 = undefined;
+ const read_link = try os.readlinkat(tmp.dir.fd, "link", buffer[0..]);
+ expect(mem.eql(u8, "file.txt", read_link));
}
-fn expectFileContents(dir: Dir, file_path: []const u8, data: []const u8) !void {
- const contents = try dir.readFileAlloc(testing.allocator, file_path, 1000);
- defer testing.allocator.free(contents);
-
- testing.expectEqualSlices(u8, data, contents);
+fn testThreadIdFn(thread_id: *Thread.Id) void {
+ thread_id.* = Thread.getCurrentId();
}
test "std.Thread.getCurrentId" {
@@ -201,29 +115,6 @@ test "cpu count" {
expect(cpu_count >= 1);
}
-test "AtomicFile" {
- const test_out_file = "tmp_atomic_file_test_dest.txt";
- const test_content =
- \\ hello!
- \\ this is a test file
- ;
-
- var tmp = tmpDir(.{});
- defer tmp.cleanup();
-
- {
- var af = try tmp.dir.atomicFile(test_out_file, .{});
- defer af.deinit();
- try af.file.writeAll(test_content);
- try af.finish();
- }
- const content = try tmp.dir.readFileAlloc(testing.allocator, test_out_file, 9999);
- defer testing.allocator.free(content);
- expect(mem.eql(u8, content, test_content));
-
- try tmp.dir.deleteFile(test_out_file);
-}
-
test "thread local storage" {
if (builtin.single_threaded) return error.SkipZigTest;
const thread1 = try Thread.spawn({}, testTls);
@@ -258,13 +149,6 @@ test "getcwd" {
_ = os.getcwd(&buf) catch undefined;
}
-test "realpath" {
- if (builtin.os.tag == .wasi) return error.SkipZigTest;
-
- var buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
- testing.expectError(error.FileNotFound, fs.realpath("definitely_bogus_does_not_exist1234", &buf));
-}
-
test "sigaltstack" {
if (builtin.os.tag == .windows or builtin.os.tag == .wasi) return error.SkipZigTest;
diff --git a/lib/std/os/uefi.zig b/lib/std/os/uefi.zig
index 1d2f88794d..c037075cd8 100644
--- a/lib/std/os/uefi.zig
+++ b/lib/std/os/uefi.zig
@@ -28,7 +28,7 @@ pub const Guid = extern struct {
self: @This(),
comptime f: []const u8,
options: std.fmt.FormatOptions,
- out_stream: var,
+ out_stream: anytype,
) Errors!void {
if (f.len == 0) {
return std.fmt.format(out_stream, "{x:0>8}-{x:0>4}-{x:0>4}-{x:0>2}{x:0>2}-{x:0>12}", .{
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index 953a16a2ea..1ed1ef1f54 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -901,7 +901,13 @@ pub fn WSAStartup(majorVersion: u8, minorVersion: u8) !ws2_32.WSADATA {
var wsadata: ws2_32.WSADATA = undefined;
return switch (ws2_32.WSAStartup((@as(WORD, minorVersion) << 8) | majorVersion, &wsadata)) {
0 => wsadata,
- else => |err| unexpectedWSAError(@intToEnum(ws2_32.WinsockError, @intCast(u16, err))),
+ else => |err_int| switch (@intToEnum(ws2_32.WinsockError, @intCast(u16, err_int))) {
+ .WSASYSNOTREADY => return error.SystemNotAvailable,
+ .WSAVERNOTSUPPORTED => return error.VersionNotSupported,
+ .WSAEINPROGRESS => return error.BlockingOperationInProgress,
+ .WSAEPROCLIM => return error.SystemResources,
+ else => |err| return unexpectedWSAError(err),
+ },
};
}
@@ -909,6 +915,9 @@ pub fn WSACleanup() !void {
return switch (ws2_32.WSACleanup()) {
0 => {},
ws2_32.SOCKET_ERROR => switch (ws2_32.WSAGetLastError()) {
+ .WSANOTINITIALISED => return error.NotInitialized,
+ .WSAENETDOWN => return error.NetworkNotAvailable,
+ .WSAEINPROGRESS => return error.BlockingOperationInProgress,
else => |err| return unexpectedWSAError(err),
},
else => unreachable,
diff --git a/lib/std/os/windows/bits.zig b/lib/std/os/windows/bits.zig
index 27f949227a..203c9d466a 100644
--- a/lib/std/os/windows/bits.zig
+++ b/lib/std/os/windows/bits.zig
@@ -593,6 +593,7 @@ pub const FILE_CURRENT = 1;
pub const FILE_END = 2;
pub const HEAP_CREATE_ENABLE_EXECUTE = 0x00040000;
+pub const HEAP_REALLOC_IN_PLACE_ONLY = 0x00000010;
pub const HEAP_GENERATE_EXCEPTIONS = 0x00000004;
pub const HEAP_NO_SERIALIZE = 0x00000001;
diff --git a/lib/std/os/windows/ws2_32.zig b/lib/std/os/windows/ws2_32.zig
index 8b16f54361..1e36a72038 100644
--- a/lib/std/os/windows/ws2_32.zig
+++ b/lib/std/os/windows/ws2_32.zig
@@ -163,16 +163,16 @@ pub const IPPROTO_UDP = 17;
pub const IPPROTO_ICMPV6 = 58;
pub const IPPROTO_RM = 113;
-pub const AI_PASSIVE = 0x00001;
-pub const AI_CANONNAME = 0x00002;
-pub const AI_NUMERICHOST = 0x00004;
-pub const AI_NUMERICSERV = 0x00008;
-pub const AI_ADDRCONFIG = 0x00400;
-pub const AI_V4MAPPED = 0x00800;
-pub const AI_NON_AUTHORITATIVE = 0x04000;
-pub const AI_SECURE = 0x08000;
+pub const AI_PASSIVE = 0x00001;
+pub const AI_CANONNAME = 0x00002;
+pub const AI_NUMERICHOST = 0x00004;
+pub const AI_NUMERICSERV = 0x00008;
+pub const AI_ADDRCONFIG = 0x00400;
+pub const AI_V4MAPPED = 0x00800;
+pub const AI_NON_AUTHORITATIVE = 0x04000;
+pub const AI_SECURE = 0x08000;
pub const AI_RETURN_PREFERRED_NAMES = 0x10000;
-pub const AI_DISABLE_IDN_ENCODING = 0x80000;
+pub const AI_DISABLE_IDN_ENCODING = 0x80000;
pub const FIONBIO = -2147195266;
diff --git a/lib/std/pdb.zig b/lib/std/pdb.zig
index 62681968a6..a702a8aed6 100644
--- a/lib/std/pdb.zig
+++ b/lib/std/pdb.zig
@@ -469,7 +469,7 @@ pub const Pdb = struct {
msf: Msf,
- pub fn openFile(self: *Pdb, coff_ptr: *coff.Coff, file_name: []u8) !void {
+ pub fn openFile(self: *Pdb, coff_ptr: *coff.Coff, file_name: []const u8) !void {
self.in_file = try fs.cwd().openFile(file_name, .{ .intended_io_mode = .blocking });
self.allocator = coff_ptr.allocator;
self.coff = coff_ptr;
diff --git a/lib/std/priority_queue.zig b/lib/std/priority_queue.zig
index dfd2379da2..69b9e06a5b 100644
--- a/lib/std/priority_queue.zig
+++ b/lib/std/priority_queue.zig
@@ -333,7 +333,7 @@ test "std.PriorityQueue: addSlice" {
test "std.PriorityQueue: fromOwnedSlice" {
const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 };
- const heap_items = try std.mem.dupe(testing.allocator, u32, items[0..]);
+ const heap_items = try testing.allocator.dupe(u32, items[0..]);
var queue = PQ.fromOwnedSlice(testing.allocator, lessThan, heap_items[0..]);
defer queue.deinit();
diff --git a/lib/std/process.zig b/lib/std/process.zig
index a65f6da3af..1a0dfad474 100644
--- a/lib/std/process.zig
+++ b/lib/std/process.zig
@@ -30,7 +30,7 @@ pub fn getCwdAlloc(allocator: *Allocator) ![]u8 {
var current_buf: []u8 = &stack_buf;
while (true) {
if (os.getcwd(current_buf)) |slice| {
- return mem.dupe(allocator, u8, slice);
+ return allocator.dupe(u8, slice);
} else |err| switch (err) {
error.NameTooLong => {
// The path is too long to fit in stack_buf. Allocate geometrically
@@ -169,7 +169,7 @@ pub fn getEnvVarOwned(allocator: *mem.Allocator, key: []const u8) GetEnvVarOwned
};
} else {
const result = os.getenv(key) orelse return error.EnvironmentVariableNotFound;
- return mem.dupe(allocator, u8, result);
+ return allocator.dupe(u8, result);
}
}
@@ -281,9 +281,6 @@ pub const ArgIteratorWasi = struct {
pub const ArgIteratorWindows = struct {
index: usize,
cmd_line: [*]const u8,
- in_quote: bool,
- quote_count: usize,
- seen_quote_count: usize,
pub const NextError = error{OutOfMemory};
@@ -295,9 +292,6 @@ pub const ArgIteratorWindows = struct {
return ArgIteratorWindows{
.index = 0,
.cmd_line = cmd_line,
- .in_quote = false,
- .quote_count = countQuotes(cmd_line),
- .seen_quote_count = 0,
};
}
@@ -328,6 +322,7 @@ pub const ArgIteratorWindows = struct {
}
var backslash_count: usize = 0;
+ var in_quote = false;
while (true) : (self.index += 1) {
const byte = self.cmd_line[self.index];
switch (byte) {
@@ -335,14 +330,14 @@ pub const ArgIteratorWindows = struct {
'"' => {
const quote_is_real = backslash_count % 2 == 0;
if (quote_is_real) {
- self.seen_quote_count += 1;
+ in_quote = !in_quote;
}
},
'\\' => {
backslash_count += 1;
},
' ', '\t' => {
- if (self.seen_quote_count % 2 == 0 or self.seen_quote_count == self.quote_count) {
+ if (!in_quote) {
return true;
}
backslash_count = 0;
@@ -360,6 +355,7 @@ pub const ArgIteratorWindows = struct {
defer buf.deinit();
var backslash_count: usize = 0;
+ var in_quote = false;
while (true) : (self.index += 1) {
const byte = self.cmd_line[self.index];
switch (byte) {
@@ -370,10 +366,7 @@ pub const ArgIteratorWindows = struct {
backslash_count = 0;
if (quote_is_real) {
- self.seen_quote_count += 1;
- if (self.seen_quote_count == self.quote_count and self.seen_quote_count % 2 == 1) {
- try buf.append('"');
- }
+ in_quote = !in_quote;
} else {
try buf.append('"');
}
@@ -384,7 +377,7 @@ pub const ArgIteratorWindows = struct {
' ', '\t' => {
try self.emitBackslashes(&buf, backslash_count);
backslash_count = 0;
- if (self.seen_quote_count % 2 == 1 and self.seen_quote_count != self.quote_count) {
+ if (in_quote) {
try buf.append(byte);
} else {
return buf.toOwnedSlice();
@@ -405,26 +398,6 @@ pub const ArgIteratorWindows = struct {
try buf.append('\\');
}
}
-
- fn countQuotes(cmd_line: [*]const u8) usize {
- var result: usize = 0;
- var backslash_count: usize = 0;
- var index: usize = 0;
- while (true) : (index += 1) {
- const byte = cmd_line[index];
- switch (byte) {
- 0 => return result,
- '\\' => backslash_count += 1,
- '"' => {
- result += 1 - (backslash_count % 2);
- backslash_count = 0;
- },
- else => {
- backslash_count = 0;
- },
- }
- }
- }
};
pub const ArgIterator = struct {
@@ -463,7 +436,7 @@ pub const ArgIterator = struct {
if (builtin.os.tag == .windows) {
return self.inner.next(allocator);
} else {
- return mem.dupe(allocator, u8, self.inner.next() orelse return null);
+ return allocator.dupe(u8, self.inner.next() orelse return null);
}
}
@@ -578,7 +551,7 @@ test "windows arg parsing" {
testWindowsCmdLine("a\\\\\\b d\"e f\"g h", &[_][]const u8{ "a\\\\\\b", "de fg", "h" });
testWindowsCmdLine("a\\\\\\\"b c d", &[_][]const u8{ "a\\\"b", "c", "d" });
testWindowsCmdLine("a\\\\\\\\\"b c\" d e", &[_][]const u8{ "a\\\\b c", "d", "e" });
- testWindowsCmdLine("a b\tc \"d f", &[_][]const u8{ "a", "b", "c", "\"d", "f" });
+ testWindowsCmdLine("a b\tc \"d f", &[_][]const u8{ "a", "b", "c", "d f" });
testWindowsCmdLine("\".\\..\\zig-cache\\build\" \"bin\\zig.exe\" \".\\..\" \".\\..\\zig-cache\" \"--help\"", &[_][]const u8{
".\\..\\zig-cache\\build",
@@ -745,7 +718,7 @@ pub fn getSelfExeSharedLibPaths(allocator: *Allocator) error{OutOfMemory}![][:0]
fn callback(info: *os.dl_phdr_info, size: usize, list: *List) !void {
const name = info.dlpi_name orelse return;
if (name[0] == '/') {
- const item = try mem.dupeZ(list.allocator, u8, mem.spanZ(name));
+ const item = try list.allocator.dupeZ(u8, mem.spanZ(name));
errdefer list.allocator.free(item);
try list.append(item);
}
@@ -766,7 +739,7 @@ pub fn getSelfExeSharedLibPaths(allocator: *Allocator) error{OutOfMemory}![][:0]
var i: u32 = 0;
while (i < img_count) : (i += 1) {
const name = std.c._dyld_get_image_name(i);
- const item = try mem.dupeZ(allocator, u8, mem.spanZ(name));
+ const item = try allocator.dupeZ(u8, mem.spanZ(name));
errdefer allocator.free(item);
try paths.append(item);
}
diff --git a/lib/std/progress.zig b/lib/std/progress.zig
index d80f8c4423..b81e81aa2c 100644
--- a/lib/std/progress.zig
+++ b/lib/std/progress.zig
@@ -224,7 +224,7 @@ pub const Progress = struct {
self.prev_refresh_timestamp = self.timer.read();
}
- pub fn log(self: *Progress, comptime format: []const u8, args: var) void {
+ pub fn log(self: *Progress, comptime format: []const u8, args: anytype) void {
const file = self.terminal orelse return;
self.refresh();
file.outStream().print(format, args) catch {
@@ -234,7 +234,7 @@ pub const Progress = struct {
self.columns_written = 0;
}
- fn bufWrite(self: *Progress, end: *usize, comptime format: []const u8, args: var) void {
+ fn bufWrite(self: *Progress, end: *usize, comptime format: []const u8, args: anytype) void {
if (std.fmt.bufPrint(self.output_buffer[end.*..], format, args)) |written| {
const amt = written.len;
end.* += amt;
diff --git a/lib/std/segmented_list.zig b/lib/std/segmented_list.zig
index d087e480f6..d39fe3e239 100644
--- a/lib/std/segmented_list.zig
+++ b/lib/std/segmented_list.zig
@@ -122,7 +122,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
self.* = undefined;
}
- pub fn at(self: var, i: usize) AtType(@TypeOf(self)) {
+ pub fn at(self: anytype, i: usize) AtType(@TypeOf(self)) {
assert(i < self.len);
return self.uncheckedAt(i);
}
@@ -241,7 +241,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
}
- pub fn uncheckedAt(self: var, index: usize) AtType(@TypeOf(self)) {
+ pub fn uncheckedAt(self: anytype, index: usize) AtType(@TypeOf(self)) {
if (index < prealloc_item_count) {
return &self.prealloc_segment[index];
}
diff --git a/lib/std/sort.zig b/lib/std/sort.zig
index cb6162e9b0..464054e4a5 100644
--- a/lib/std/sort.zig
+++ b/lib/std/sort.zig
@@ -9,7 +9,7 @@ pub fn binarySearch(
comptime T: type,
key: T,
items: []const T,
- context: var,
+ context: anytype,
comptime compareFn: fn (context: @TypeOf(context), lhs: T, rhs: T) math.Order,
) ?usize {
var left: usize = 0;
@@ -76,7 +76,7 @@ test "binarySearch" {
pub fn insertionSort(
comptime T: type,
items: []T,
- context: var,
+ context: anytype,
comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool,
) void {
var i: usize = 1;
@@ -182,7 +182,7 @@ const Pull = struct {
pub fn sort(
comptime T: type,
items: []T,
- context: var,
+ context: anytype,
comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool,
) void {
// Implementation ported from https://github.com/BonzaiThePenguin/WikiSort/blob/master/WikiSort.c
@@ -813,7 +813,7 @@ fn mergeInPlace(
items: []T,
A_arg: Range,
B_arg: Range,
- context: var,
+ context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
) void {
if (A_arg.length() == 0 or B_arg.length() == 0) return;
@@ -862,7 +862,7 @@ fn mergeInternal(
items: []T,
A: Range,
B: Range,
- context: var,
+ context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
buffer: Range,
) void {
@@ -906,7 +906,7 @@ fn findFirstForward(
items: []T,
value: T,
range: Range,
- context: var,
+ context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
unique: usize,
) usize {
@@ -928,7 +928,7 @@ fn findFirstBackward(
items: []T,
value: T,
range: Range,
- context: var,
+ context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
unique: usize,
) usize {
@@ -950,7 +950,7 @@ fn findLastForward(
items: []T,
value: T,
range: Range,
- context: var,
+ context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
unique: usize,
) usize {
@@ -972,7 +972,7 @@ fn findLastBackward(
items: []T,
value: T,
range: Range,
- context: var,
+ context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
unique: usize,
) usize {
@@ -994,7 +994,7 @@ fn binaryFirst(
items: []T,
value: T,
range: Range,
- context: var,
+ context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
) usize {
var curr = range.start;
@@ -1017,7 +1017,7 @@ fn binaryLast(
items: []T,
value: T,
range: Range,
- context: var,
+ context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
) usize {
var curr = range.start;
@@ -1040,7 +1040,7 @@ fn mergeInto(
from: []T,
A: Range,
B: Range,
- context: var,
+ context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
into: []T,
) void {
@@ -1078,7 +1078,7 @@ fn mergeExternal(
items: []T,
A: Range,
B: Range,
- context: var,
+ context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
cache: []T,
) void {
@@ -1112,7 +1112,7 @@ fn mergeExternal(
fn swap(
comptime T: type,
items: []T,
- context: var,
+ context: anytype,
comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
order: *[8]u8,
x: usize,
@@ -1358,7 +1358,7 @@ fn fuzzTest(rng: *std.rand.Random) !void {
pub fn argMin(
comptime T: type,
items: []const T,
- context: var,
+ context: anytype,
comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
) ?usize {
if (items.len == 0) {
@@ -1390,7 +1390,7 @@ test "argMin" {
pub fn min(
comptime T: type,
items: []const T,
- context: var,
+ context: anytype,
comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool,
) ?T {
const i = argMin(T, items, context, lessThan) orelse return null;
@@ -1410,7 +1410,7 @@ test "min" {
pub fn argMax(
comptime T: type,
items: []const T,
- context: var,
+ context: anytype,
comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool,
) ?usize {
if (items.len == 0) {
@@ -1442,7 +1442,7 @@ test "argMax" {
pub fn max(
comptime T: type,
items: []const T,
- context: var,
+ context: anytype,
comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool,
) ?T {
const i = argMax(T, items, context, lessThan) orelse return null;
@@ -1462,7 +1462,7 @@ test "max" {
pub fn isSorted(
comptime T: type,
items: []const T,
- context: var,
+ context: anytype,
comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool,
) bool {
var i: usize = 1;
diff --git a/lib/std/special/build_runner.zig b/lib/std/special/build_runner.zig
index 1c88f98e6e..83c181e841 100644
--- a/lib/std/special/build_runner.zig
+++ b/lib/std/special/build_runner.zig
@@ -135,7 +135,7 @@ fn runBuild(builder: *Builder) anyerror!void {
}
}
-fn usage(builder: *Builder, already_ran_build: bool, out_stream: var) !void {
+fn usage(builder: *Builder, already_ran_build: bool, out_stream: anytype) !void {
// run the build script to collect the options
if (!already_ran_build) {
builder.setInstallPrefix(null);
@@ -202,7 +202,7 @@ fn usage(builder: *Builder, already_ran_build: bool, out_stream: var) !void {
);
}
-fn usageAndErr(builder: *Builder, already_ran_build: bool, out_stream: var) void {
+fn usageAndErr(builder: *Builder, already_ran_build: bool, out_stream: anytype) void {
usage(builder, already_ran_build, out_stream) catch {};
process.exit(1);
}
diff --git a/lib/std/special/compiler_rt/clzsi2_test.zig b/lib/std/special/compiler_rt/clzsi2_test.zig
index ff94455846..8a2896fcb6 100644
--- a/lib/std/special/compiler_rt/clzsi2_test.zig
+++ b/lib/std/special/compiler_rt/clzsi2_test.zig
@@ -4,7 +4,7 @@ const testing = @import("std").testing;
fn test__clzsi2(a: u32, expected: i32) void {
var nakedClzsi2 = clzsi2.__clzsi2;
var actualClzsi2 = @ptrCast(fn (a: i32) callconv(.C) i32, nakedClzsi2);
- var x = @intCast(i32, a);
+ var x = @bitCast(i32, a);
var result = actualClzsi2(x);
testing.expectEqual(expected, result);
}
diff --git a/lib/std/special/compiler_rt/int.zig b/lib/std/special/compiler_rt/int.zig
index eb731ee898..a72c13e233 100644
--- a/lib/std/special/compiler_rt/int.zig
+++ b/lib/std/special/compiler_rt/int.zig
@@ -244,7 +244,7 @@ pub fn __udivsi3(n: u32, d: u32) callconv(.C) u32 {
// r.all -= d.all;
// carry = 1;
// }
- const s = @intCast(i32, d -% r -% 1) >> @intCast(u5, n_uword_bits - 1);
+ const s = @bitCast(i32, d -% r -% 1) >> @intCast(u5, n_uword_bits - 1);
carry = @intCast(u32, s & 1);
r -= d & @bitCast(u32, s);
}
diff --git a/lib/std/special/compiler_rt/udivmod.zig b/lib/std/special/compiler_rt/udivmod.zig
index 7d519c34cb..ba53d1dee0 100644
--- a/lib/std/special/compiler_rt/udivmod.zig
+++ b/lib/std/special/compiler_rt/udivmod.zig
@@ -184,7 +184,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
// carry = 1;
// }
r_all = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
- const s: SignedDoubleInt = @intCast(SignedDoubleInt, b -% r_all -% 1) >> (DoubleInt.bit_count - 1);
+ const s: SignedDoubleInt = @bitCast(SignedDoubleInt, b -% r_all -% 1) >> (DoubleInt.bit_count - 1);
carry = @intCast(u32, s & 1);
r_all -= b & @bitCast(DoubleInt, s);
r = @ptrCast(*[2]SingleInt, &r_all).*; // TODO issue #421
diff --git a/lib/std/special/test_runner.zig b/lib/std/special/test_runner.zig
index 7403cca9c2..301457dde0 100644
--- a/lib/std/special/test_runner.zig
+++ b/lib/std/special/test_runner.zig
@@ -21,6 +21,7 @@ pub fn main() anyerror!void {
for (test_fn_list) |test_fn, i| {
std.testing.base_allocator_instance.reset();
+ std.testing.log_level = .warn;
var test_node = root_node.start(test_fn.name, null);
test_node.activate();
@@ -35,7 +36,7 @@ pub fn main() anyerror!void {
async_frame_buffer = try std.heap.page_allocator.alignedAlloc(u8, std.Target.stack_align, size);
}
const casted_fn = @ptrCast(fn () callconv(.Async) anyerror!void, test_fn.func);
- break :blk await @asyncCall(async_frame_buffer, {}, casted_fn);
+ break :blk await @asyncCall(async_frame_buffer, {}, casted_fn, .{});
},
.blocking => {
skip_count += 1;
@@ -73,3 +74,14 @@ pub fn main() anyerror!void {
std.debug.warn("{} passed; {} skipped.\n", .{ ok_count, skip_count });
}
}
+
+pub fn log(
+ comptime message_level: std.log.Level,
+ comptime scope: @Type(.EnumLiteral),
+ comptime format: []const u8,
+ args: anytype,
+) void {
+ if (@enumToInt(message_level) <= @enumToInt(std.testing.log_level)) {
+ std.debug.print("[{}] ({}): " ++ format, .{ @tagName(scope), @tagName(message_level) } ++ args);
+ }
+}
diff --git a/lib/std/start.zig b/lib/std/start.zig
index 9b1a330391..a921b8c7b2 100644
--- a/lib/std/start.zig
+++ b/lib/std/start.zig
@@ -246,7 +246,7 @@ inline fn initEventLoopAndCallMain(comptime Out: type, comptime mainFunc: fn ()
var result: u8 = undefined;
var frame: @Frame(callMainAsync) = undefined;
- _ = @asyncCall(&frame, &result, callMainAsync, u8, mainFunc, loop);
+ _ = @asyncCall(&frame, &result, callMainAsync, .{u8, mainFunc, loop});
loop.run();
return result;
}
diff --git a/lib/std/std.zig b/lib/std/std.zig
index b1cab77109..50aaef6f11 100644
--- a/lib/std/std.zig
+++ b/lib/std/std.zig
@@ -3,14 +3,16 @@ pub const ArrayListAligned = @import("array_list.zig").ArrayListAligned;
pub const ArrayListAlignedUnmanaged = @import("array_list.zig").ArrayListAlignedUnmanaged;
pub const ArrayListSentineled = @import("array_list_sentineled.zig").ArrayListSentineled;
pub const ArrayListUnmanaged = @import("array_list.zig").ArrayListUnmanaged;
-pub const AutoHashMap = @import("hash_map.zig").AutoHashMap;
+pub const AutoHashMap = hash_map.AutoHashMap;
+pub const AutoHashMapUnmanaged = hash_map.AutoHashMapUnmanaged;
pub const BloomFilter = @import("bloom_filter.zig").BloomFilter;
pub const BufMap = @import("buf_map.zig").BufMap;
pub const BufSet = @import("buf_set.zig").BufSet;
pub const ChildProcess = @import("child_process.zig").ChildProcess;
pub const ComptimeStringMap = @import("comptime_string_map.zig").ComptimeStringMap;
pub const DynLib = @import("dynamic_library.zig").DynLib;
-pub const HashMap = @import("hash_map.zig").HashMap;
+pub const HashMap = hash_map.HashMap;
+pub const HashMapUnmanaged = hash_map.HashMapUnmanaged;
pub const Mutex = @import("mutex.zig").Mutex;
pub const PackedIntArray = @import("packed_int_array.zig").PackedIntArray;
pub const PackedIntArrayEndian = @import("packed_int_array.zig").PackedIntArrayEndian;
@@ -22,7 +24,8 @@ pub const ResetEvent = @import("reset_event.zig").ResetEvent;
pub const SegmentedList = @import("segmented_list.zig").SegmentedList;
pub const SinglyLinkedList = @import("linked_list.zig").SinglyLinkedList;
pub const SpinLock = @import("spinlock.zig").SpinLock;
-pub const StringHashMap = @import("hash_map.zig").StringHashMap;
+pub const StringHashMap = hash_map.StringHashMap;
+pub const StringHashMapUnmanaged = hash_map.StringHashMapUnmanaged;
pub const TailQueue = @import("linked_list.zig").TailQueue;
pub const Target = @import("target.zig").Target;
pub const Thread = @import("thread.zig").Thread;
@@ -49,6 +52,7 @@ pub const heap = @import("heap.zig");
pub const http = @import("http.zig");
pub const io = @import("io.zig");
pub const json = @import("json.zig");
+pub const log = @import("log.zig");
pub const macho = @import("macho.zig");
pub const math = @import("math.zig");
pub const mem = @import("mem.zig");
diff --git a/lib/std/target.zig b/lib/std/target.zig
index 9df3e21e52..e1aab72786 100644
--- a/lib/std/target.zig
+++ b/lib/std/target.zig
@@ -101,6 +101,31 @@ pub const Target = struct {
return @enumToInt(ver) >= @enumToInt(self.min) and @enumToInt(ver) <= @enumToInt(self.max);
}
};
+
+ /// This function is defined to serialize a Zig source code representation of this
+ /// type, that, when parsed, will deserialize into the same data.
+ pub fn format(
+ self: WindowsVersion,
+ comptime fmt: []const u8,
+ options: std.fmt.FormatOptions,
+ out_stream: anytype,
+ ) !void {
+ if (fmt.len > 0 and fmt[0] == 's') {
+ if (@enumToInt(self) >= @enumToInt(WindowsVersion.nt4) and @enumToInt(self) <= @enumToInt(WindowsVersion.win10_19h1)) {
+ try std.fmt.format(out_stream, ".{}", .{@tagName(self)});
+ } else {
+ try std.fmt.format(out_stream, "@intToEnum(Target.Os.WindowsVersion, {})", .{@enumToInt(self)});
+ }
+ } else {
+ if (@enumToInt(self) >= @enumToInt(WindowsVersion.nt4) and @enumToInt(self) <= @enumToInt(WindowsVersion.win10_19h1)) {
+ try std.fmt.format(out_stream, "WindowsVersion.{}", .{@tagName(self)});
+ } else {
+ try std.fmt.format(out_stream, "WindowsVersion(", .{@typeName(@This())});
+ try std.fmt.format(out_stream, "{}", .{@enumToInt(self)});
+ try out_stream.writeAll(")");
+ }
+ }
+ }
};
pub const LinuxVersionRange = struct {
@@ -410,6 +435,7 @@ pub const Target = struct {
elf,
macho,
wasm,
+ c,
};
pub const SubSystem = enum {
@@ -871,25 +897,34 @@ pub const Target = struct {
/// All processors Zig is aware of, sorted lexicographically by name.
pub fn allCpuModels(arch: Arch) []const *const Cpu.Model {
return switch (arch) {
- .arm, .armeb, .thumb, .thumbeb => arm.all_cpus,
- .aarch64, .aarch64_be, .aarch64_32 => aarch64.all_cpus,
- .avr => avr.all_cpus,
- .bpfel, .bpfeb => bpf.all_cpus,
- .hexagon => hexagon.all_cpus,
- .mips, .mipsel, .mips64, .mips64el => mips.all_cpus,
- .msp430 => msp430.all_cpus,
- .powerpc, .powerpc64, .powerpc64le => powerpc.all_cpus,
- .amdgcn => amdgpu.all_cpus,
- .riscv32, .riscv64 => riscv.all_cpus,
- .sparc, .sparcv9, .sparcel => sparc.all_cpus,
- .s390x => systemz.all_cpus,
- .i386, .x86_64 => x86.all_cpus,
- .nvptx, .nvptx64 => nvptx.all_cpus,
- .wasm32, .wasm64 => wasm.all_cpus,
+ .arm, .armeb, .thumb, .thumbeb => comptime allCpusFromDecls(arm.cpu),
+ .aarch64, .aarch64_be, .aarch64_32 => comptime allCpusFromDecls(aarch64.cpu),
+ .avr => comptime allCpusFromDecls(avr.cpu),
+ .bpfel, .bpfeb => comptime allCpusFromDecls(bpf.cpu),
+ .hexagon => comptime allCpusFromDecls(hexagon.cpu),
+ .mips, .mipsel, .mips64, .mips64el => comptime allCpusFromDecls(mips.cpu),
+ .msp430 => comptime allCpusFromDecls(msp430.cpu),
+ .powerpc, .powerpc64, .powerpc64le => comptime allCpusFromDecls(powerpc.cpu),
+ .amdgcn => comptime allCpusFromDecls(amdgpu.cpu),
+ .riscv32, .riscv64 => comptime allCpusFromDecls(riscv.cpu),
+ .sparc, .sparcv9, .sparcel => comptime allCpusFromDecls(sparc.cpu),
+ .s390x => comptime allCpusFromDecls(systemz.cpu),
+ .i386, .x86_64 => comptime allCpusFromDecls(x86.cpu),
+ .nvptx, .nvptx64 => comptime allCpusFromDecls(nvptx.cpu),
+ .wasm32, .wasm64 => comptime allCpusFromDecls(wasm.cpu),
else => &[0]*const Model{},
};
}
+
+ fn allCpusFromDecls(comptime cpus: type) []const *const Cpu.Model {
+ const decls = std.meta.declarations(cpus);
+ var array: [decls.len]*const Cpu.Model = undefined;
+ for (decls) |decl, i| {
+ array[i] = &@field(cpus, decl.name);
+ }
+ return &array;
+ }
};
pub const Model = struct {
@@ -1157,7 +1192,7 @@ pub const Target = struct {
pub fn standardDynamicLinkerPath(self: Target) DynamicLinker {
var result: DynamicLinker = .{};
const S = struct {
- fn print(r: *DynamicLinker, comptime fmt: []const u8, args: var) DynamicLinker {
+ fn print(r: *DynamicLinker, comptime fmt: []const u8, args: anytype) DynamicLinker {
r.max_byte = @intCast(u8, (std.fmt.bufPrint(&r.buffer, fmt, args) catch unreachable).len - 1);
return r.*;
}
diff --git a/lib/std/target/aarch64.zig b/lib/std/target/aarch64.zig
index 5c49d4acfc..9af95dfada 100644
--- a/lib/std/target/aarch64.zig
+++ b/lib/std/target/aarch64.zig
@@ -1505,48 +1505,3 @@ pub const cpu = struct {
}),
};
};
-
-/// All aarch64 CPUs, sorted alphabetically by name.
-/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
-/// compiler has inefficient memory and CPU usage, affecting build times.
-pub const all_cpus = &[_]*const CpuModel{
- &cpu.apple_a10,
- &cpu.apple_a11,
- &cpu.apple_a12,
- &cpu.apple_a13,
- &cpu.apple_a7,
- &cpu.apple_a8,
- &cpu.apple_a9,
- &cpu.apple_latest,
- &cpu.apple_s4,
- &cpu.apple_s5,
- &cpu.cortex_a35,
- &cpu.cortex_a53,
- &cpu.cortex_a55,
- &cpu.cortex_a57,
- &cpu.cortex_a65,
- &cpu.cortex_a65ae,
- &cpu.cortex_a72,
- &cpu.cortex_a73,
- &cpu.cortex_a75,
- &cpu.cortex_a76,
- &cpu.cortex_a76ae,
- &cpu.cyclone,
- &cpu.exynos_m1,
- &cpu.exynos_m2,
- &cpu.exynos_m3,
- &cpu.exynos_m4,
- &cpu.exynos_m5,
- &cpu.falkor,
- &cpu.generic,
- &cpu.kryo,
- &cpu.neoverse_e1,
- &cpu.neoverse_n1,
- &cpu.saphira,
- &cpu.thunderx,
- &cpu.thunderx2t99,
- &cpu.thunderxt81,
- &cpu.thunderxt83,
- &cpu.thunderxt88,
- &cpu.tsv110,
-};
diff --git a/lib/std/target/amdgpu.zig b/lib/std/target/amdgpu.zig
index 962e3073cf..4b3f83bbc3 100644
--- a/lib/std/target/amdgpu.zig
+++ b/lib/std/target/amdgpu.zig
@@ -1276,48 +1276,3 @@ pub const cpu = struct {
}),
};
};
-
-/// All amdgpu CPUs, sorted alphabetically by name.
-/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
-/// compiler has inefficient memory and CPU usage, affecting build times.
-pub const all_cpus = &[_]*const CpuModel{
- &cpu.bonaire,
- &cpu.carrizo,
- &cpu.fiji,
- &cpu.generic,
- &cpu.generic_hsa,
- &cpu.gfx1010,
- &cpu.gfx1011,
- &cpu.gfx1012,
- &cpu.gfx600,
- &cpu.gfx601,
- &cpu.gfx700,
- &cpu.gfx701,
- &cpu.gfx702,
- &cpu.gfx703,
- &cpu.gfx704,
- &cpu.gfx801,
- &cpu.gfx802,
- &cpu.gfx803,
- &cpu.gfx810,
- &cpu.gfx900,
- &cpu.gfx902,
- &cpu.gfx904,
- &cpu.gfx906,
- &cpu.gfx908,
- &cpu.gfx909,
- &cpu.hainan,
- &cpu.hawaii,
- &cpu.iceland,
- &cpu.kabini,
- &cpu.kaveri,
- &cpu.mullins,
- &cpu.oland,
- &cpu.pitcairn,
- &cpu.polaris10,
- &cpu.polaris11,
- &cpu.stoney,
- &cpu.tahiti,
- &cpu.tonga,
- &cpu.verde,
-};
diff --git a/lib/std/target/arm.zig b/lib/std/target/arm.zig
index aab8e9d068..90b060c03f 100644
--- a/lib/std/target/arm.zig
+++ b/lib/std/target/arm.zig
@@ -2145,92 +2145,3 @@ pub const cpu = struct {
}),
};
};
-
-/// All arm CPUs, sorted alphabetically by name.
-/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
-/// compiler has inefficient memory and CPU usage, affecting build times.
-pub const all_cpus = &[_]*const CpuModel{
- &cpu.arm1020e,
- &cpu.arm1020t,
- &cpu.arm1022e,
- &cpu.arm10e,
- &cpu.arm10tdmi,
- &cpu.arm1136j_s,
- &cpu.arm1136jf_s,
- &cpu.arm1156t2_s,
- &cpu.arm1156t2f_s,
- &cpu.arm1176j_s,
- &cpu.arm1176jz_s,
- &cpu.arm1176jzf_s,
- &cpu.arm710t,
- &cpu.arm720t,
- &cpu.arm7tdmi,
- &cpu.arm7tdmi_s,
- &cpu.arm8,
- &cpu.arm810,
- &cpu.arm9,
- &cpu.arm920,
- &cpu.arm920t,
- &cpu.arm922t,
- &cpu.arm926ej_s,
- &cpu.arm940t,
- &cpu.arm946e_s,
- &cpu.arm966e_s,
- &cpu.arm968e_s,
- &cpu.arm9e,
- &cpu.arm9tdmi,
- &cpu.cortex_a12,
- &cpu.cortex_a15,
- &cpu.cortex_a17,
- &cpu.cortex_a32,
- &cpu.cortex_a35,
- &cpu.cortex_a5,
- &cpu.cortex_a53,
- &cpu.cortex_a55,
- &cpu.cortex_a57,
- &cpu.cortex_a7,
- &cpu.cortex_a72,
- &cpu.cortex_a73,
- &cpu.cortex_a75,
- &cpu.cortex_a76,
- &cpu.cortex_a76ae,
- &cpu.cortex_a8,
- &cpu.cortex_a9,
- &cpu.cortex_m0,
- &cpu.cortex_m0plus,
- &cpu.cortex_m1,
- &cpu.cortex_m23,
- &cpu.cortex_m3,
- &cpu.cortex_m33,
- &cpu.cortex_m35p,
- &cpu.cortex_m4,
- &cpu.cortex_m7,
- &cpu.cortex_r4,
- &cpu.cortex_r4f,
- &cpu.cortex_r5,
- &cpu.cortex_r52,
- &cpu.cortex_r7,
- &cpu.cortex_r8,
- &cpu.cyclone,
- &cpu.ep9312,
- &cpu.exynos_m1,
- &cpu.exynos_m2,
- &cpu.exynos_m3,
- &cpu.exynos_m4,
- &cpu.exynos_m5,
- &cpu.generic,
- &cpu.iwmmxt,
- &cpu.krait,
- &cpu.kryo,
- &cpu.mpcore,
- &cpu.mpcorenovfp,
- &cpu.neoverse_n1,
- &cpu.sc000,
- &cpu.sc300,
- &cpu.strongarm,
- &cpu.strongarm110,
- &cpu.strongarm1100,
- &cpu.strongarm1110,
- &cpu.swift,
- &cpu.xscale,
-};
diff --git a/lib/std/target/avr.zig b/lib/std/target/avr.zig
index 4d0da9b2c3..af4f5ba5be 100644
--- a/lib/std/target/avr.zig
+++ b/lib/std/target/avr.zig
@@ -2116,266 +2116,3 @@ pub const cpu = struct {
}),
};
};
-
-/// All avr CPUs, sorted alphabetically by name.
-/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
-/// compiler has inefficient memory and CPU usage, affecting build times.
-pub const all_cpus = &[_]*const CpuModel{
- &cpu.at43usb320,
- &cpu.at43usb355,
- &cpu.at76c711,
- &cpu.at86rf401,
- &cpu.at90c8534,
- &cpu.at90can128,
- &cpu.at90can32,
- &cpu.at90can64,
- &cpu.at90pwm1,
- &cpu.at90pwm161,
- &cpu.at90pwm2,
- &cpu.at90pwm216,
- &cpu.at90pwm2b,
- &cpu.at90pwm3,
- &cpu.at90pwm316,
- &cpu.at90pwm3b,
- &cpu.at90pwm81,
- &cpu.at90s1200,
- &cpu.at90s2313,
- &cpu.at90s2323,
- &cpu.at90s2333,
- &cpu.at90s2343,
- &cpu.at90s4414,
- &cpu.at90s4433,
- &cpu.at90s4434,
- &cpu.at90s8515,
- &cpu.at90s8535,
- &cpu.at90scr100,
- &cpu.at90usb1286,
- &cpu.at90usb1287,
- &cpu.at90usb162,
- &cpu.at90usb646,
- &cpu.at90usb647,
- &cpu.at90usb82,
- &cpu.at94k,
- &cpu.ata5272,
- &cpu.ata5505,
- &cpu.ata5790,
- &cpu.ata5795,
- &cpu.ata6285,
- &cpu.ata6286,
- &cpu.ata6289,
- &cpu.atmega103,
- &cpu.atmega128,
- &cpu.atmega1280,
- &cpu.atmega1281,
- &cpu.atmega1284,
- &cpu.atmega1284p,
- &cpu.atmega1284rfr2,
- &cpu.atmega128a,
- &cpu.atmega128rfa1,
- &cpu.atmega128rfr2,
- &cpu.atmega16,
- &cpu.atmega161,
- &cpu.atmega162,
- &cpu.atmega163,
- &cpu.atmega164a,
- &cpu.atmega164p,
- &cpu.atmega164pa,
- &cpu.atmega165,
- &cpu.atmega165a,
- &cpu.atmega165p,
- &cpu.atmega165pa,
- &cpu.atmega168,
- &cpu.atmega168a,
- &cpu.atmega168p,
- &cpu.atmega168pa,
- &cpu.atmega169,
- &cpu.atmega169a,
- &cpu.atmega169p,
- &cpu.atmega169pa,
- &cpu.atmega16a,
- &cpu.atmega16hva,
- &cpu.atmega16hva2,
- &cpu.atmega16hvb,
- &cpu.atmega16hvbrevb,
- &cpu.atmega16m1,
- &cpu.atmega16u2,
- &cpu.atmega16u4,
- &cpu.atmega2560,
- &cpu.atmega2561,
- &cpu.atmega2564rfr2,
- &cpu.atmega256rfr2,
- &cpu.atmega32,
- &cpu.atmega323,
- &cpu.atmega324a,
- &cpu.atmega324p,
- &cpu.atmega324pa,
- &cpu.atmega325,
- &cpu.atmega3250,
- &cpu.atmega3250a,
- &cpu.atmega3250p,
- &cpu.atmega3250pa,
- &cpu.atmega325a,
- &cpu.atmega325p,
- &cpu.atmega325pa,
- &cpu.atmega328,
- &cpu.atmega328p,
- &cpu.atmega329,
- &cpu.atmega3290,
- &cpu.atmega3290a,
- &cpu.atmega3290p,
- &cpu.atmega3290pa,
- &cpu.atmega329a,
- &cpu.atmega329p,
- &cpu.atmega329pa,
- &cpu.atmega32a,
- &cpu.atmega32c1,
- &cpu.atmega32hvb,
- &cpu.atmega32hvbrevb,
- &cpu.atmega32m1,
- &cpu.atmega32u2,
- &cpu.atmega32u4,
- &cpu.atmega32u6,
- &cpu.atmega406,
- &cpu.atmega48,
- &cpu.atmega48a,
- &cpu.atmega48p,
- &cpu.atmega48pa,
- &cpu.atmega64,
- &cpu.atmega640,
- &cpu.atmega644,
- &cpu.atmega644a,
- &cpu.atmega644p,
- &cpu.atmega644pa,
- &cpu.atmega644rfr2,
- &cpu.atmega645,
- &cpu.atmega6450,
- &cpu.atmega6450a,
- &cpu.atmega6450p,
- &cpu.atmega645a,
- &cpu.atmega645p,
- &cpu.atmega649,
- &cpu.atmega6490,
- &cpu.atmega6490a,
- &cpu.atmega6490p,
- &cpu.atmega649a,
- &cpu.atmega649p,
- &cpu.atmega64a,
- &cpu.atmega64c1,
- &cpu.atmega64hve,
- &cpu.atmega64m1,
- &cpu.atmega64rfr2,
- &cpu.atmega8,
- &cpu.atmega8515,
- &cpu.atmega8535,
- &cpu.atmega88,
- &cpu.atmega88a,
- &cpu.atmega88p,
- &cpu.atmega88pa,
- &cpu.atmega8a,
- &cpu.atmega8hva,
- &cpu.atmega8u2,
- &cpu.attiny10,
- &cpu.attiny102,
- &cpu.attiny104,
- &cpu.attiny11,
- &cpu.attiny12,
- &cpu.attiny13,
- &cpu.attiny13a,
- &cpu.attiny15,
- &cpu.attiny1634,
- &cpu.attiny167,
- &cpu.attiny20,
- &cpu.attiny22,
- &cpu.attiny2313,
- &cpu.attiny2313a,
- &cpu.attiny24,
- &cpu.attiny24a,
- &cpu.attiny25,
- &cpu.attiny26,
- &cpu.attiny261,
- &cpu.attiny261a,
- &cpu.attiny28,
- &cpu.attiny4,
- &cpu.attiny40,
- &cpu.attiny4313,
- &cpu.attiny43u,
- &cpu.attiny44,
- &cpu.attiny44a,
- &cpu.attiny45,
- &cpu.attiny461,
- &cpu.attiny461a,
- &cpu.attiny48,
- &cpu.attiny5,
- &cpu.attiny828,
- &cpu.attiny84,
- &cpu.attiny84a,
- &cpu.attiny85,
- &cpu.attiny861,
- &cpu.attiny861a,
- &cpu.attiny87,
- &cpu.attiny88,
- &cpu.attiny9,
- &cpu.atxmega128a1,
- &cpu.atxmega128a1u,
- &cpu.atxmega128a3,
- &cpu.atxmega128a3u,
- &cpu.atxmega128a4u,
- &cpu.atxmega128b1,
- &cpu.atxmega128b3,
- &cpu.atxmega128c3,
- &cpu.atxmega128d3,
- &cpu.atxmega128d4,
- &cpu.atxmega16a4,
- &cpu.atxmega16a4u,
- &cpu.atxmega16c4,
- &cpu.atxmega16d4,
- &cpu.atxmega16e5,
- &cpu.atxmega192a3,
- &cpu.atxmega192a3u,
- &cpu.atxmega192c3,
- &cpu.atxmega192d3,
- &cpu.atxmega256a3,
- &cpu.atxmega256a3b,
- &cpu.atxmega256a3bu,
- &cpu.atxmega256a3u,
- &cpu.atxmega256c3,
- &cpu.atxmega256d3,
- &cpu.atxmega32a4,
- &cpu.atxmega32a4u,
- &cpu.atxmega32c4,
- &cpu.atxmega32d4,
- &cpu.atxmega32e5,
- &cpu.atxmega32x1,
- &cpu.atxmega384c3,
- &cpu.atxmega384d3,
- &cpu.atxmega64a1,
- &cpu.atxmega64a1u,
- &cpu.atxmega64a3,
- &cpu.atxmega64a3u,
- &cpu.atxmega64a4u,
- &cpu.atxmega64b1,
- &cpu.atxmega64b3,
- &cpu.atxmega64c3,
- &cpu.atxmega64d3,
- &cpu.atxmega64d4,
- &cpu.atxmega8e5,
- &cpu.avr1,
- &cpu.avr2,
- &cpu.avr25,
- &cpu.avr3,
- &cpu.avr31,
- &cpu.avr35,
- &cpu.avr4,
- &cpu.avr5,
- &cpu.avr51,
- &cpu.avr6,
- &cpu.avrtiny,
- &cpu.avrxmega1,
- &cpu.avrxmega2,
- &cpu.avrxmega3,
- &cpu.avrxmega4,
- &cpu.avrxmega5,
- &cpu.avrxmega6,
- &cpu.avrxmega7,
- &cpu.m3000,
-};
diff --git a/lib/std/target/bpf.zig b/lib/std/target/bpf.zig
index 6b548ac031..ddb12d3d85 100644
--- a/lib/std/target/bpf.zig
+++ b/lib/std/target/bpf.zig
@@ -64,14 +64,3 @@ pub const cpu = struct {
.features = featureSet(&[_]Feature{}),
};
};
-
-/// All bpf CPUs, sorted alphabetically by name.
-/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
-/// compiler has inefficient memory and CPU usage, affecting build times.
-pub const all_cpus = &[_]*const CpuModel{
- &cpu.generic,
- &cpu.probe,
- &cpu.v1,
- &cpu.v2,
- &cpu.v3,
-};
diff --git a/lib/std/target/hexagon.zig b/lib/std/target/hexagon.zig
index b0558908e3..f429099d88 100644
--- a/lib/std/target/hexagon.zig
+++ b/lib/std/target/hexagon.zig
@@ -298,16 +298,3 @@ pub const cpu = struct {
}),
};
};
-
-/// All hexagon CPUs, sorted alphabetically by name.
-/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
-/// compiler has inefficient memory and CPU usage, affecting build times.
-pub const all_cpus = &[_]*const CpuModel{
- &cpu.generic,
- &cpu.hexagonv5,
- &cpu.hexagonv55,
- &cpu.hexagonv60,
- &cpu.hexagonv62,
- &cpu.hexagonv65,
- &cpu.hexagonv66,
-};
diff --git a/lib/std/target/mips.zig b/lib/std/target/mips.zig
index 21211ae20e..fc95b2dee8 100644
--- a/lib/std/target/mips.zig
+++ b/lib/std/target/mips.zig
@@ -524,28 +524,3 @@ pub const cpu = struct {
}),
};
};
-
-/// All mips CPUs, sorted alphabetically by name.
-/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
-/// compiler has inefficient memory and CPU usage, affecting build times.
-pub const all_cpus = &[_]*const CpuModel{
- &cpu.generic,
- &cpu.mips1,
- &cpu.mips2,
- &cpu.mips3,
- &cpu.mips32,
- &cpu.mips32r2,
- &cpu.mips32r3,
- &cpu.mips32r5,
- &cpu.mips32r6,
- &cpu.mips4,
- &cpu.mips5,
- &cpu.mips64,
- &cpu.mips64r2,
- &cpu.mips64r3,
- &cpu.mips64r5,
- &cpu.mips64r6,
- &cpu.octeon,
- &cpu.@"octeon+",
- &cpu.p5600,
-};
diff --git a/lib/std/target/msp430.zig b/lib/std/target/msp430.zig
index e1b858341f..947137b3e2 100644
--- a/lib/std/target/msp430.zig
+++ b/lib/std/target/msp430.zig
@@ -62,12 +62,3 @@ pub const cpu = struct {
}),
};
};
-
-/// All msp430 CPUs, sorted alphabetically by name.
-/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
-/// compiler has inefficient memory and CPU usage, affecting build times.
-pub const all_cpus = &[_]*const CpuModel{
- &cpu.generic,
- &cpu.msp430,
- &cpu.msp430x,
-};
diff --git a/lib/std/target/nvptx.zig b/lib/std/target/nvptx.zig
index 6a79aea1da..d719a6bb71 100644
--- a/lib/std/target/nvptx.zig
+++ b/lib/std/target/nvptx.zig
@@ -287,24 +287,3 @@ pub const cpu = struct {
}),
};
};
-
-/// All nvptx CPUs, sorted alphabetically by name.
-/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
-/// compiler has inefficient memory and CPU usage, affecting build times.
-pub const all_cpus = &[_]*const CpuModel{
- &cpu.sm_20,
- &cpu.sm_21,
- &cpu.sm_30,
- &cpu.sm_32,
- &cpu.sm_35,
- &cpu.sm_37,
- &cpu.sm_50,
- &cpu.sm_52,
- &cpu.sm_53,
- &cpu.sm_60,
- &cpu.sm_61,
- &cpu.sm_62,
- &cpu.sm_70,
- &cpu.sm_72,
- &cpu.sm_75,
-};
diff --git a/lib/std/target/powerpc.zig b/lib/std/target/powerpc.zig
index c06b82f02a..ffea7344fc 100644
--- a/lib/std/target/powerpc.zig
+++ b/lib/std/target/powerpc.zig
@@ -944,47 +944,3 @@ pub const cpu = struct {
}),
};
};
-
-/// All powerpc CPUs, sorted alphabetically by name.
-/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
-/// compiler has inefficient memory and CPU usage, affecting build times.
-pub const all_cpus = &[_]*const CpuModel{
- &cpu.@"440",
- &cpu.@"450",
- &cpu.@"601",
- &cpu.@"602",
- &cpu.@"603",
- &cpu.@"603e",
- &cpu.@"603ev",
- &cpu.@"604",
- &cpu.@"604e",
- &cpu.@"620",
- &cpu.@"7400",
- &cpu.@"7450",
- &cpu.@"750",
- &cpu.@"970",
- &cpu.a2,
- &cpu.a2q,
- &cpu.e500,
- &cpu.e500mc,
- &cpu.e5500,
- &cpu.future,
- &cpu.g3,
- &cpu.g4,
- &cpu.@"g4+",
- &cpu.g5,
- &cpu.generic,
- &cpu.ppc,
- &cpu.ppc32,
- &cpu.ppc64,
- &cpu.ppc64le,
- &cpu.pwr3,
- &cpu.pwr4,
- &cpu.pwr5,
- &cpu.pwr5x,
- &cpu.pwr6,
- &cpu.pwr6x,
- &cpu.pwr7,
- &cpu.pwr8,
- &cpu.pwr9,
-};
diff --git a/lib/std/target/riscv.zig b/lib/std/target/riscv.zig
index ff8921eaf2..dbdb107024 100644
--- a/lib/std/target/riscv.zig
+++ b/lib/std/target/riscv.zig
@@ -303,13 +303,3 @@ pub const cpu = struct {
}),
};
};
-
-/// All riscv CPUs, sorted alphabetically by name.
-/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
-/// compiler has inefficient memory and CPU usage, affecting build times.
-pub const all_cpus = &[_]*const CpuModel{
- &cpu.baseline_rv32,
- &cpu.baseline_rv64,
- &cpu.generic_rv32,
- &cpu.generic_rv64,
-};
diff --git a/lib/std/target/sparc.zig b/lib/std/target/sparc.zig
index 3ec6cc7c20..e1cbc845fc 100644
--- a/lib/std/target/sparc.zig
+++ b/lib/std/target/sparc.zig
@@ -448,49 +448,3 @@ pub const cpu = struct {
}),
};
};
-
-/// All sparc CPUs, sorted alphabetically by name.
-/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
-/// compiler has inefficient memory and CPU usage, affecting build times.
-pub const all_cpus = &[_]*const CpuModel{
- &cpu.at697e,
- &cpu.at697f,
- &cpu.f934,
- &cpu.generic,
- &cpu.gr712rc,
- &cpu.gr740,
- &cpu.hypersparc,
- &cpu.leon2,
- &cpu.leon3,
- &cpu.leon4,
- &cpu.ma2080,
- &cpu.ma2085,
- &cpu.ma2100,
- &cpu.ma2150,
- &cpu.ma2155,
- &cpu.ma2450,
- &cpu.ma2455,
- &cpu.ma2480,
- &cpu.ma2485,
- &cpu.ma2x5x,
- &cpu.ma2x8x,
- &cpu.myriad2,
- &cpu.myriad2_1,
- &cpu.myriad2_2,
- &cpu.myriad2_3,
- &cpu.niagara,
- &cpu.niagara2,
- &cpu.niagara3,
- &cpu.niagara4,
- &cpu.sparclet,
- &cpu.sparclite,
- &cpu.sparclite86x,
- &cpu.supersparc,
- &cpu.tsc701,
- &cpu.ultrasparc,
- &cpu.ultrasparc3,
- &cpu.ut699,
- &cpu.v7,
- &cpu.v8,
- &cpu.v9,
-};
diff --git a/lib/std/target/systemz.zig b/lib/std/target/systemz.zig
index 453ce8887f..f065a8b169 100644
--- a/lib/std/target/systemz.zig
+++ b/lib/std/target/systemz.zig
@@ -532,22 +532,3 @@ pub const cpu = struct {
}),
};
};
-
-/// All systemz CPUs, sorted alphabetically by name.
-/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
-/// compiler has inefficient memory and CPU usage, affecting build times.
-pub const all_cpus = &[_]*const CpuModel{
- &cpu.arch10,
- &cpu.arch11,
- &cpu.arch12,
- &cpu.arch13,
- &cpu.arch8,
- &cpu.arch9,
- &cpu.generic,
- &cpu.z10,
- &cpu.z13,
- &cpu.z14,
- &cpu.z15,
- &cpu.z196,
- &cpu.zEC12,
-};
diff --git a/lib/std/target/wasm.zig b/lib/std/target/wasm.zig
index 066282f3c6..72b2b6d431 100644
--- a/lib/std/target/wasm.zig
+++ b/lib/std/target/wasm.zig
@@ -104,12 +104,3 @@ pub const cpu = struct {
.features = featureSet(&[_]Feature{}),
};
};
-
-/// All wasm CPUs, sorted alphabetically by name.
-/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
-/// compiler has inefficient memory and CPU usage, affecting build times.
-pub const all_cpus = &[_]*const CpuModel{
- &cpu.bleeding_edge,
- &cpu.generic,
- &cpu.mvp,
-};
diff --git a/lib/std/target/x86.zig b/lib/std/target/x86.zig
index 5eccd61c7e..bfcd1abc1a 100644
--- a/lib/std/target/x86.zig
+++ b/lib/std/target/x86.zig
@@ -2943,88 +2943,3 @@ pub const cpu = struct {
}),
};
};
-
-/// All x86 CPUs, sorted alphabetically by name.
-/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
-/// compiler has inefficient memory and CPU usage, affecting build times.
-pub const all_cpus = &[_]*const CpuModel{
- &cpu.amdfam10,
- &cpu.athlon,
- &cpu.athlon_4,
- &cpu.athlon_fx,
- &cpu.athlon_mp,
- &cpu.athlon_tbird,
- &cpu.athlon_xp,
- &cpu.athlon64,
- &cpu.athlon64_sse3,
- &cpu.atom,
- &cpu.barcelona,
- &cpu.bdver1,
- &cpu.bdver2,
- &cpu.bdver3,
- &cpu.bdver4,
- &cpu.bonnell,
- &cpu.broadwell,
- &cpu.btver1,
- &cpu.btver2,
- &cpu.c3,
- &cpu.c3_2,
- &cpu.cannonlake,
- &cpu.cascadelake,
- &cpu.cooperlake,
- &cpu.core_avx_i,
- &cpu.core_avx2,
- &cpu.core2,
- &cpu.corei7,
- &cpu.corei7_avx,
- &cpu.generic,
- &cpu.geode,
- &cpu.goldmont,
- &cpu.goldmont_plus,
- &cpu.haswell,
- &cpu._i386,
- &cpu._i486,
- &cpu._i586,
- &cpu._i686,
- &cpu.icelake_client,
- &cpu.icelake_server,
- &cpu.ivybridge,
- &cpu.k6,
- &cpu.k6_2,
- &cpu.k6_3,
- &cpu.k8,
- &cpu.k8_sse3,
- &cpu.knl,
- &cpu.knm,
- &cpu.lakemont,
- &cpu.nehalem,
- &cpu.nocona,
- &cpu.opteron,
- &cpu.opteron_sse3,
- &cpu.penryn,
- &cpu.pentium,
- &cpu.pentium_m,
- &cpu.pentium_mmx,
- &cpu.pentium2,
- &cpu.pentium3,
- &cpu.pentium3m,
- &cpu.pentium4,
- &cpu.pentium4m,
- &cpu.pentiumpro,
- &cpu.prescott,
- &cpu.sandybridge,
- &cpu.silvermont,
- &cpu.skx,
- &cpu.skylake,
- &cpu.skylake_avx512,
- &cpu.slm,
- &cpu.tigerlake,
- &cpu.tremont,
- &cpu.westmere,
- &cpu.winchip_c6,
- &cpu.winchip2,
- &cpu.x86_64,
- &cpu.yonah,
- &cpu.znver1,
- &cpu.znver2,
-};
diff --git a/lib/std/testing.zig b/lib/std/testing.zig
index 2d136d56c9..44c221d76a 100644
--- a/lib/std/testing.zig
+++ b/lib/std/testing.zig
@@ -11,12 +11,15 @@ pub var allocator_instance = LeakCountAllocator.init(&base_allocator_instance.al
pub const failing_allocator = &failing_allocator_instance.allocator;
pub var failing_allocator_instance = FailingAllocator.init(&base_allocator_instance.allocator, 0);
-pub var base_allocator_instance = std.heap.ThreadSafeFixedBufferAllocator.init(allocator_mem[0..]);
+pub var base_allocator_instance = std.mem.validationWrap(std.heap.ThreadSafeFixedBufferAllocator.init(allocator_mem[0..]));
var allocator_mem: [2 * 1024 * 1024]u8 = undefined;
+/// TODO https://github.com/ziglang/zig/issues/5738
+pub var log_level = std.log.Level.warn;
+
/// This function is intended to be used only in tests. It prints diagnostics to stderr
/// and then aborts when actual_error_union is not expected_error.
-pub fn expectError(expected_error: anyerror, actual_error_union: var) void {
+pub fn expectError(expected_error: anyerror, actual_error_union: anytype) void {
if (actual_error_union) |actual_payload| {
std.debug.panic("expected error.{}, found {}", .{ @errorName(expected_error), actual_payload });
} else |actual_error| {
@@ -33,7 +36,7 @@ pub fn expectError(expected_error: anyerror, actual_error_union: var) void {
/// equal, prints diagnostics to stderr to show exactly how they are not equal,
/// then aborts.
/// The types must match exactly.
-pub fn expectEqual(expected: var, actual: @TypeOf(expected)) void {
+pub fn expectEqual(expected: anytype, actual: @TypeOf(expected)) void {
switch (@typeInfo(@TypeOf(actual))) {
.NoReturn,
.BoundFn,
@@ -215,7 +218,7 @@ fn getCwdOrWasiPreopen() std.fs.Dir {
defer preopens.deinit();
preopens.populate() catch
@panic("unable to make tmp dir for testing: unable to populate preopens");
- const preopen = preopens.find(".") orelse
+ const preopen = preopens.find(std.fs.wasi.PreopenType{ .Dir = "." }) orelse
@panic("unable to make tmp dir for testing: didn't find '.' in the preopens");
return std.fs.Dir{ .fd = preopen.fd };
diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig
index 081a29cd97..ade3e9d85a 100644
--- a/lib/std/testing/failing_allocator.zig
+++ b/lib/std/testing/failing_allocator.zig
@@ -39,43 +39,38 @@ pub const FailingAllocator = struct {
.allocations = 0,
.deallocations = 0,
.allocator = mem.Allocator{
- .reallocFn = realloc,
- .shrinkFn = shrink,
+ .allocFn = alloc,
+ .resizeFn = resize,
},
};
}
- fn realloc(allocator: *mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
+ fn alloc(allocator: *std.mem.Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 {
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
if (self.index == self.fail_index) {
return error.OutOfMemory;
}
- const result = try self.internal_allocator.reallocFn(
- self.internal_allocator,
- old_mem,
- old_align,
- new_size,
- new_align,
- );
- if (new_size < old_mem.len) {
- self.freed_bytes += old_mem.len - new_size;
- if (new_size == 0)
- self.deallocations += 1;
- } else if (new_size > old_mem.len) {
- self.allocated_bytes += new_size - old_mem.len;
- if (old_mem.len == 0)
- self.allocations += 1;
- }
+ const result = try self.internal_allocator.callAllocFn(len, ptr_align, len_align);
+ self.allocated_bytes += result.len;
+ self.allocations += 1;
self.index += 1;
return result;
}
- fn shrink(allocator: *mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
+ fn resize(allocator: *std.mem.Allocator, old_mem: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!usize {
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
- const r = self.internal_allocator.shrinkFn(self.internal_allocator, old_mem, old_align, new_size, new_align);
- self.freed_bytes += old_mem.len - r.len;
- if (new_size == 0)
+ const r = self.internal_allocator.callResizeFn(old_mem, new_len, len_align) catch |e| {
+ std.debug.assert(new_len > old_mem.len);
+ return e;
+ };
+ if (new_len == 0) {
self.deallocations += 1;
+ self.freed_bytes += old_mem.len;
+ } else if (r < old_mem.len) {
+ self.freed_bytes += old_mem.len - r;
+ } else {
+ self.allocated_bytes += r - old_mem.len;
+ }
return r;
}
};
diff --git a/lib/std/testing/leak_count_allocator.zig b/lib/std/testing/leak_count_allocator.zig
index 65244e529b..87564aeea7 100644
--- a/lib/std/testing/leak_count_allocator.zig
+++ b/lib/std/testing/leak_count_allocator.zig
@@ -14,23 +14,21 @@ pub const LeakCountAllocator = struct {
return .{
.count = 0,
.allocator = .{
- .reallocFn = realloc,
- .shrinkFn = shrink,
+ .allocFn = alloc,
+ .resizeFn = resize,
},
.internal_allocator = allocator,
};
}
- fn realloc(allocator: *std.mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
+ fn alloc(allocator: *std.mem.Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 {
const self = @fieldParentPtr(LeakCountAllocator, "allocator", allocator);
- var data = try self.internal_allocator.reallocFn(self.internal_allocator, old_mem, old_align, new_size, new_align);
- if (old_mem.len == 0) {
- self.count += 1;
- }
- return data;
+ const ptr = try self.internal_allocator.callAllocFn(len, ptr_align, len_align);
+ self.count += 1;
+ return ptr;
}
- fn shrink(allocator: *std.mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
+ fn resize(allocator: *std.mem.Allocator, old_mem: []u8, new_size: usize, len_align: u29) error{OutOfMemory}!usize {
const self = @fieldParentPtr(LeakCountAllocator, "allocator", allocator);
if (new_size == 0) {
if (self.count == 0) {
@@ -38,7 +36,10 @@ pub const LeakCountAllocator = struct {
}
self.count -= 1;
}
- return self.internal_allocator.shrinkFn(self.internal_allocator, old_mem, old_align, new_size, new_align);
+ return self.internal_allocator.callResizeFn(old_mem, new_size, len_align) catch |e| {
+ std.debug.assert(new_size > old_mem.len);
+ return e;
+ };
}
pub fn validate(self: LeakCountAllocator) !void {
diff --git a/lib/std/thread.zig b/lib/std/thread.zig
index d07c41c5b0..3d20f54558 100644
--- a/lib/std/thread.zig
+++ b/lib/std/thread.zig
@@ -143,7 +143,7 @@ pub const Thread = struct {
/// fn startFn(@TypeOf(context)) T
/// where T is u8, noreturn, void, or !void
/// caller must call wait on the returned thread
- pub fn spawn(context: var, comptime startFn: var) SpawnError!*Thread {
+ pub fn spawn(context: anytype, comptime startFn: anytype) SpawnError!*Thread {
if (builtin.single_threaded) @compileError("cannot spawn thread when building in single-threaded mode");
// TODO compile-time call graph analysis to determine stack upper bound
// https://github.com/ziglang/zig/issues/157
diff --git a/lib/std/unicode.zig b/lib/std/unicode.zig
index df2e16a4bf..2c88d2ba0c 100644
--- a/lib/std/unicode.zig
+++ b/lib/std/unicode.zig
@@ -235,6 +235,22 @@ pub const Utf8Iterator = struct {
else => unreachable,
}
}
+
+ /// Look ahead at the next n codepoints without advancing the iterator.
+ /// If fewer than n codepoints are available, then return the remainder of the string.
+ pub fn peek(it: *Utf8Iterator, n: usize) []const u8 {
+ const original_i = it.i;
+ defer it.i = original_i;
+
+ var end_ix = original_i;
+ var found: usize = 0;
+ while (found < n) : (found += 1) {
+ const next_codepoint = it.nextCodepointSlice() orelse return it.bytes[original_i..];
+ end_ix += next_codepoint.len;
+ }
+
+ return it.bytes[original_i..end_ix];
+ }
};
pub const Utf16LeIterator = struct {
@@ -451,6 +467,31 @@ fn testMiscInvalidUtf8() void {
testValid("\xee\x80\x80", 0xe000);
}
+test "utf8 iterator peeking" {
+ comptime testUtf8Peeking();
+ testUtf8Peeking();
+}
+
+fn testUtf8Peeking() void {
+ const s = Utf8View.initComptime("noël");
+ var it = s.iterator();
+
+ testing.expect(std.mem.eql(u8, "n", it.nextCodepointSlice().?));
+
+ testing.expect(std.mem.eql(u8, "o", it.peek(1)));
+ testing.expect(std.mem.eql(u8, "oë", it.peek(2)));
+ testing.expect(std.mem.eql(u8, "oël", it.peek(3)));
+ testing.expect(std.mem.eql(u8, "oël", it.peek(4)));
+ testing.expect(std.mem.eql(u8, "oël", it.peek(10)));
+
+ testing.expect(std.mem.eql(u8, "o", it.nextCodepointSlice().?));
+ testing.expect(std.mem.eql(u8, "ë", it.nextCodepointSlice().?));
+ testing.expect(std.mem.eql(u8, "l", it.nextCodepointSlice().?));
+ testing.expect(it.nextCodepointSlice() == null);
+
+ testing.expect(std.mem.eql(u8, &[_]u8{}, it.peek(1)));
+}
+
fn testError(bytes: []const u8, expected_err: anyerror) void {
testing.expectError(expected_err, testDecode(bytes));
}
diff --git a/lib/std/zig.zig b/lib/std/zig.zig
index bb4f955797..841827cc19 100644
--- a/lib/std/zig.zig
+++ b/lib/std/zig.zig
@@ -1,4 +1,6 @@
+const std = @import("std.zig");
const tokenizer = @import("zig/tokenizer.zig");
+
pub const Token = tokenizer.Token;
pub const Tokenizer = tokenizer.Tokenizer;
pub const parse = @import("zig/parse.zig").parse;
@@ -9,6 +11,21 @@ pub const ast = @import("zig/ast.zig");
pub const system = @import("zig/system.zig");
pub const CrossTarget = @import("zig/cross_target.zig").CrossTarget;
+pub const SrcHash = [16]u8;
+
+/// If the source is small enough, it is used directly as the hash.
+/// If it is long, blake3 hash is computed.
+pub fn hashSrc(src: []const u8) SrcHash {
+ var out: SrcHash = undefined;
+ if (src.len <= SrcHash.len) {
+ std.mem.copy(u8, &out, src);
+ std.mem.set(u8, out[src.len..], 0);
+ } else {
+ std.crypto.Blake3.hash(src, &out);
+ }
+ return out;
+}
+
pub fn findLineColumn(source: []const u8, byte_offset: usize) struct { line: usize, column: usize } {
var line: usize = 0;
var column: usize = 0;
@@ -26,6 +43,27 @@ pub fn findLineColumn(source: []const u8, byte_offset: usize) struct { line: usi
return .{ .line = line, .column = column };
}
+/// Returns the standard file system basename of a binary generated by the Zig compiler.
+pub fn binNameAlloc(
+ allocator: *std.mem.Allocator,
+ root_name: []const u8,
+ target: std.Target,
+ output_mode: std.builtin.OutputMode,
+ link_mode: ?std.builtin.LinkMode,
+) error{OutOfMemory}![]u8 {
+ switch (output_mode) {
+ .Exe => return std.fmt.allocPrint(allocator, "{}{}", .{ root_name, target.exeFileExt() }),
+ .Lib => {
+ const suffix = switch (link_mode orelse .Static) {
+ .Static => target.staticLibSuffix(),
+ .Dynamic => target.dynamicLibSuffix(),
+ };
+ return std.fmt.allocPrint(allocator, "{}{}{}", .{ target.libPrefix(), root_name, suffix });
+ },
+ .Obj => return std.fmt.allocPrint(allocator, "{}{}", .{ root_name, target.oFileExt() }),
+ }
+}
+
test "" {
@import("std").meta.refAllDecls(@This());
}
diff --git a/lib/std/zig/ast.zig b/lib/std/zig/ast.zig
index 4d63011266..b91cac7865 100644
--- a/lib/std/zig/ast.zig
+++ b/lib/std/zig/ast.zig
@@ -29,7 +29,7 @@ pub const Tree = struct {
self.arena.promote(self.gpa).deinit();
}
- pub fn renderError(self: *Tree, parse_error: *const Error, stream: var) !void {
+ pub fn renderError(self: *Tree, parse_error: *const Error, stream: anytype) !void {
return parse_error.render(self.token_ids, stream);
}
@@ -167,7 +167,7 @@ pub const Error = union(enum) {
DeclBetweenFields: DeclBetweenFields,
InvalidAnd: InvalidAnd,
- pub fn render(self: *const Error, tokens: []const Token.Id, stream: var) !void {
+ pub fn render(self: *const Error, tokens: []const Token.Id, stream: anytype) !void {
switch (self.*) {
.InvalidToken => |*x| return x.render(tokens, stream),
.ExpectedContainerMembers => |*x| return x.render(tokens, stream),
@@ -322,9 +322,9 @@ pub const Error = union(enum) {
pub const ExpectedCall = struct {
node: *Node,
- pub fn render(self: *const ExpectedCall, tokens: []const Token.Id, stream: var) !void {
- return stream.print("expected " ++ @tagName(Node.Id.Call) ++ ", found {}", .{
- @tagName(self.node.id),
+ pub fn render(self: *const ExpectedCall, tokens: []const Token.Id, stream: anytype) !void {
+ return stream.print("expected " ++ @tagName(Node.Tag.Call) ++ ", found {}", .{
+ @tagName(self.node.tag),
});
}
};
@@ -332,9 +332,9 @@ pub const Error = union(enum) {
pub const ExpectedCallOrFnProto = struct {
node: *Node,
- pub fn render(self: *const ExpectedCallOrFnProto, tokens: []const Token.Id, stream: var) !void {
- return stream.print("expected " ++ @tagName(Node.Id.Call) ++ " or " ++
- @tagName(Node.Id.FnProto) ++ ", found {}", .{@tagName(self.node.id)});
+ pub fn render(self: *const ExpectedCallOrFnProto, tokens: []const Token.Id, stream: anytype) !void {
+ return stream.print("expected " ++ @tagName(Node.Tag.Call) ++ " or " ++
+ @tagName(Node.Tag.FnProto) ++ ", found {}", .{@tagName(self.node.tag)});
}
};
@@ -342,7 +342,7 @@ pub const Error = union(enum) {
token: TokenIndex,
expected_id: Token.Id,
- pub fn render(self: *const ExpectedToken, tokens: []const Token.Id, stream: var) !void {
+ pub fn render(self: *const ExpectedToken, tokens: []const Token.Id, stream: anytype) !void {
const found_token = tokens[self.token];
switch (found_token) {
.Invalid => {
@@ -360,7 +360,7 @@ pub const Error = union(enum) {
token: TokenIndex,
end_id: Token.Id,
- pub fn render(self: *const ExpectedCommaOrEnd, tokens: []const Token.Id, stream: var) !void {
+ pub fn render(self: *const ExpectedCommaOrEnd, tokens: []const Token.Id, stream: anytype) !void {
const actual_token = tokens[self.token];
return stream.print("expected ',' or '{}', found '{}'", .{
self.end_id.symbol(),
@@ -375,7 +375,7 @@ pub const Error = union(enum) {
token: TokenIndex,
- pub fn render(self: *const ThisError, tokens: []const Token.Id, stream: var) !void {
+ pub fn render(self: *const ThisError, tokens: []const Token.Id, stream: anytype) !void {
const actual_token = tokens[self.token];
return stream.print(msg, .{actual_token.symbol()});
}
@@ -388,7 +388,7 @@ pub const Error = union(enum) {
token: TokenIndex,
- pub fn render(self: *const ThisError, tokens: []const Token.Id, stream: var) !void {
+ pub fn render(self: *const ThisError, tokens: []const Token.Id, stream: anytype) !void {
return stream.writeAll(msg);
}
};
@@ -396,9 +396,9 @@ pub const Error = union(enum) {
};
pub const Node = struct {
- id: Id,
+ tag: Tag,
- pub const Id = enum {
+ pub const Tag = enum {
// Top level
Root,
Use,
@@ -408,9 +408,69 @@ pub const Node = struct {
VarDecl,
Defer,
- // Operators
- InfixOp,
- PrefixOp,
+ // Infix operators
+ Catch,
+
+ // SimpleInfixOp
+ Add,
+ AddWrap,
+ ArrayCat,
+ ArrayMult,
+ Assign,
+ AssignBitAnd,
+ AssignBitOr,
+ AssignBitShiftLeft,
+ AssignBitShiftRight,
+ AssignBitXor,
+ AssignDiv,
+ AssignSub,
+ AssignSubWrap,
+ AssignMod,
+ AssignAdd,
+ AssignAddWrap,
+ AssignMul,
+ AssignMulWrap,
+ BangEqual,
+ BitAnd,
+ BitOr,
+ BitShiftLeft,
+ BitShiftRight,
+ BitXor,
+ BoolAnd,
+ BoolOr,
+ Div,
+ EqualEqual,
+ ErrorUnion,
+ GreaterOrEqual,
+ GreaterThan,
+ LessOrEqual,
+ LessThan,
+ MergeErrorSets,
+ Mod,
+ Mul,
+ MulWrap,
+ Period,
+ Range,
+ Sub,
+ SubWrap,
+ UnwrapOptional,
+
+ // SimplePrefixOp
+ AddressOf,
+ Await,
+ BitNot,
+ BoolNot,
+ OptionalType,
+ Negation,
+ NegationWrap,
+ Resume,
+ Try,
+
+ ArrayType,
+ /// ArrayType but has a sentinel node.
+ ArrayTypeSentinel,
+ PtrType,
+ SliceType,
/// Not all suffix operations are under this tag. To save memory, some
/// suffix operations have dedicated Node tags.
SuffixOp,
@@ -434,7 +494,7 @@ pub const Node = struct {
Suspend,
// Type expressions
- VarType,
+ AnyType,
ErrorType,
FnProto,
AnyFrameType,
@@ -471,49 +531,177 @@ pub const Node = struct {
ContainerField,
ErrorTag,
FieldInitializer,
+
+ pub fn Type(tag: Tag) type {
+ return switch (tag) {
+ .Root => Root,
+ .Use => Use,
+ .TestDecl => TestDecl,
+ .VarDecl => VarDecl,
+ .Defer => Defer,
+ .Catch => Catch,
+
+ .Add,
+ .AddWrap,
+ .ArrayCat,
+ .ArrayMult,
+ .Assign,
+ .AssignBitAnd,
+ .AssignBitOr,
+ .AssignBitShiftLeft,
+ .AssignBitShiftRight,
+ .AssignBitXor,
+ .AssignDiv,
+ .AssignSub,
+ .AssignSubWrap,
+ .AssignMod,
+ .AssignAdd,
+ .AssignAddWrap,
+ .AssignMul,
+ .AssignMulWrap,
+ .BangEqual,
+ .BitAnd,
+ .BitOr,
+ .BitShiftLeft,
+ .BitShiftRight,
+ .BitXor,
+ .BoolAnd,
+ .BoolOr,
+ .Div,
+ .EqualEqual,
+ .ErrorUnion,
+ .GreaterOrEqual,
+ .GreaterThan,
+ .LessOrEqual,
+ .LessThan,
+ .MergeErrorSets,
+ .Mod,
+ .Mul,
+ .MulWrap,
+ .Period,
+ .Range,
+ .Sub,
+ .SubWrap,
+ .UnwrapOptional,
+ => SimpleInfixOp,
+
+ .AddressOf,
+ .Await,
+ .BitNot,
+ .BoolNot,
+ .OptionalType,
+ .Negation,
+ .NegationWrap,
+ .Resume,
+ .Try,
+ => SimplePrefixOp,
+
+ .ArrayType => ArrayType,
+ .ArrayTypeSentinel => ArrayTypeSentinel,
+
+ .PtrType => PtrType,
+ .SliceType => SliceType,
+ .SuffixOp => SuffixOp,
+
+ .ArrayInitializer => ArrayInitializer,
+ .ArrayInitializerDot => ArrayInitializerDot,
+
+ .StructInitializer => StructInitializer,
+ .StructInitializerDot => StructInitializerDot,
+
+ .Call => Call,
+ .Switch => Switch,
+ .While => While,
+ .For => For,
+ .If => If,
+ .ControlFlowExpression => ControlFlowExpression,
+ .Suspend => Suspend,
+ .AnyType => AnyType,
+ .ErrorType => ErrorType,
+ .FnProto => FnProto,
+ .AnyFrameType => AnyFrameType,
+ .IntegerLiteral => IntegerLiteral,
+ .FloatLiteral => FloatLiteral,
+ .EnumLiteral => EnumLiteral,
+ .StringLiteral => StringLiteral,
+ .MultilineStringLiteral => MultilineStringLiteral,
+ .CharLiteral => CharLiteral,
+ .BoolLiteral => BoolLiteral,
+ .NullLiteral => NullLiteral,
+ .UndefinedLiteral => UndefinedLiteral,
+ .Unreachable => Unreachable,
+ .Identifier => Identifier,
+ .GroupedExpression => GroupedExpression,
+ .BuiltinCall => BuiltinCall,
+ .ErrorSetDecl => ErrorSetDecl,
+ .ContainerDecl => ContainerDecl,
+ .Asm => Asm,
+ .Comptime => Comptime,
+ .Nosuspend => Nosuspend,
+ .Block => Block,
+ .DocComment => DocComment,
+ .SwitchCase => SwitchCase,
+ .SwitchElse => SwitchElse,
+ .Else => Else,
+ .Payload => Payload,
+ .PointerPayload => PointerPayload,
+ .PointerIndexPayload => PointerIndexPayload,
+ .ContainerField => ContainerField,
+ .ErrorTag => ErrorTag,
+ .FieldInitializer => FieldInitializer,
+ };
+ }
};
+ /// Prefer `castTag` to this.
pub fn cast(base: *Node, comptime T: type) ?*T {
- if (base.id == comptime typeToId(T)) {
- return @fieldParentPtr(T, "base", base);
+ if (std.meta.fieldInfo(T, "base").default_value) |default_base| {
+ return base.castTag(default_base.tag);
+ }
+ inline for (@typeInfo(Tag).Enum.fields) |field| {
+ const tag = @intToEnum(Tag, field.value);
+ if (base.tag == tag) {
+ if (T == tag.Type()) {
+ return @fieldParentPtr(T, "base", base);
+ }
+ return null;
+ }
+ }
+ unreachable;
+ }
+
+ pub fn castTag(base: *Node, comptime tag: Tag) ?*tag.Type() {
+ if (base.tag == tag) {
+ return @fieldParentPtr(tag.Type(), "base", base);
}
return null;
}
pub fn iterate(base: *Node, index: usize) ?*Node {
- inline for (@typeInfo(Id).Enum.fields) |f| {
- if (base.id == @field(Id, f.name)) {
- const T = @field(Node, f.name);
- return @fieldParentPtr(T, "base", base).iterate(index);
+ inline for (@typeInfo(Tag).Enum.fields) |field| {
+ const tag = @intToEnum(Tag, field.value);
+ if (base.tag == tag) {
+ return @fieldParentPtr(tag.Type(), "base", base).iterate(index);
}
}
unreachable;
}
pub fn firstToken(base: *const Node) TokenIndex {
- inline for (@typeInfo(Id).Enum.fields) |f| {
- if (base.id == @field(Id, f.name)) {
- const T = @field(Node, f.name);
- return @fieldParentPtr(T, "base", base).firstToken();
+ inline for (@typeInfo(Tag).Enum.fields) |field| {
+ const tag = @intToEnum(Tag, field.value);
+ if (base.tag == tag) {
+ return @fieldParentPtr(tag.Type(), "base", base).firstToken();
}
}
unreachable;
}
pub fn lastToken(base: *const Node) TokenIndex {
- inline for (@typeInfo(Id).Enum.fields) |f| {
- if (base.id == @field(Id, f.name)) {
- const T = @field(Node, f.name);
- return @fieldParentPtr(T, "base", base).lastToken();
- }
- }
- unreachable;
- }
-
- pub fn typeToId(comptime T: type) Id {
- inline for (@typeInfo(Id).Enum.fields) |f| {
- if (T == @field(Node, f.name)) {
- return @field(Id, f.name);
+ inline for (@typeInfo(Tag).Enum.fields) |field| {
+ const tag = @intToEnum(Tag, field.value);
+ if (base.tag == tag) {
+ return @fieldParentPtr(tag.Type(), "base", base).lastToken();
}
}
unreachable;
@@ -522,7 +710,7 @@ pub const Node = struct {
pub fn requireSemiColon(base: *const Node) bool {
var n = base;
while (true) {
- switch (n.id) {
+ switch (n.tag) {
.Root,
.ContainerField,
.Block,
@@ -543,7 +731,7 @@ pub const Node = struct {
continue;
}
- return while_node.body.id != .Block;
+ return while_node.body.tag != .Block;
},
.For => {
const for_node = @fieldParentPtr(For, "base", n);
@@ -552,7 +740,7 @@ pub const Node = struct {
continue;
}
- return for_node.body.id != .Block;
+ return for_node.body.tag != .Block;
},
.If => {
const if_node = @fieldParentPtr(If, "base", n);
@@ -561,7 +749,7 @@ pub const Node = struct {
continue;
}
- return if_node.body.id != .Block;
+ return if_node.body.tag != .Block;
},
.Else => {
const else_node = @fieldParentPtr(Else, "base", n);
@@ -570,23 +758,23 @@ pub const Node = struct {
},
.Defer => {
const defer_node = @fieldParentPtr(Defer, "base", n);
- return defer_node.expr.id != .Block;
+ return defer_node.expr.tag != .Block;
},
.Comptime => {
const comptime_node = @fieldParentPtr(Comptime, "base", n);
- return comptime_node.expr.id != .Block;
+ return comptime_node.expr.tag != .Block;
},
.Suspend => {
const suspend_node = @fieldParentPtr(Suspend, "base", n);
if (suspend_node.body) |body| {
- return body.id != .Block;
+ return body.tag != .Block;
}
return true;
},
.Nosuspend => {
const nosuspend_node = @fieldParentPtr(Nosuspend, "base", n);
- return nosuspend_node.expr.id != .Block;
+ return nosuspend_node.expr.tag != .Block;
},
else => return true,
}
@@ -600,7 +788,7 @@ pub const Node = struct {
std.debug.warn(" ", .{});
}
}
- std.debug.warn("{}\n", .{@tagName(self.id)});
+ std.debug.warn("{}\n", .{@tagName(self.tag)});
var child_i: usize = 0;
while (self.iterate(child_i)) |child| : (child_i += 1) {
@@ -610,7 +798,7 @@ pub const Node = struct {
/// The decls data follows this struct in memory as an array of Node pointers.
pub const Root = struct {
- base: Node = Node{ .id = .Root },
+ base: Node = Node{ .tag = .Root },
eof_token: TokenIndex,
decls_len: NodeIndex,
@@ -662,42 +850,84 @@ pub const Node = struct {
}
};
+ /// Trailed in memory by possibly many things, with each optional thing
+ /// determined by a bit in `trailer_flags`.
pub const VarDecl = struct {
- base: Node = Node{ .id = .VarDecl },
- doc_comments: ?*DocComment,
- visib_token: ?TokenIndex,
- thread_local_token: ?TokenIndex,
- name_token: TokenIndex,
- eq_token: ?TokenIndex,
+ base: Node = Node{ .tag = .VarDecl },
+ trailer_flags: TrailerFlags,
mut_token: TokenIndex,
- comptime_token: ?TokenIndex,
- extern_export_token: ?TokenIndex,
- lib_name: ?*Node,
- type_node: ?*Node,
- align_node: ?*Node,
- section_node: ?*Node,
- init_node: ?*Node,
+ name_token: TokenIndex,
semicolon_token: TokenIndex,
+ pub const TrailerFlags = std.meta.TrailerFlags(struct {
+ doc_comments: *DocComment,
+ visib_token: TokenIndex,
+ thread_local_token: TokenIndex,
+ eq_token: TokenIndex,
+ comptime_token: TokenIndex,
+ extern_export_token: TokenIndex,
+ lib_name: *Node,
+ type_node: *Node,
+ align_node: *Node,
+ section_node: *Node,
+ init_node: *Node,
+ });
+
+ pub const RequiredFields = struct {
+ mut_token: TokenIndex,
+ name_token: TokenIndex,
+ semicolon_token: TokenIndex,
+ };
+
+ pub fn getTrailer(self: *const VarDecl, comptime name: []const u8) ?TrailerFlags.Field(name) {
+ const trailers_start = @ptrCast([*]const u8, self) + @sizeOf(VarDecl);
+ return self.trailer_flags.get(trailers_start, name);
+ }
+
+ pub fn setTrailer(self: *VarDecl, comptime name: []const u8, value: TrailerFlags.Field(name)) void {
+ const trailers_start = @ptrCast([*]u8, self) + @sizeOf(VarDecl);
+ self.trailer_flags.set(trailers_start, name, value);
+ }
+
+ pub fn create(allocator: *mem.Allocator, required: RequiredFields, trailers: anytype) !*VarDecl {
+ const trailer_flags = TrailerFlags.init(trailers);
+ const bytes = try allocator.alignedAlloc(u8, @alignOf(VarDecl), sizeInBytes(trailer_flags));
+ const var_decl = @ptrCast(*VarDecl, bytes.ptr);
+ var_decl.* = .{
+ .trailer_flags = trailer_flags,
+ .mut_token = required.mut_token,
+ .name_token = required.name_token,
+ .semicolon_token = required.semicolon_token,
+ };
+ const trailers_start = bytes.ptr + @sizeOf(VarDecl);
+ trailer_flags.setMany(trailers_start, trailers);
+ return var_decl;
+ }
+
+ pub fn destroy(self: *VarDecl, allocator: *mem.Allocator) void {
+ const bytes = @ptrCast([*]u8, self)[0..sizeInBytes(self.trailer_flags)];
+ allocator.free(bytes);
+ }
+
pub fn iterate(self: *const VarDecl, index: usize) ?*Node {
var i = index;
- if (self.type_node) |type_node| {
+ if (self.getTrailer("type_node")) |type_node| {
if (i < 1) return type_node;
i -= 1;
}
- if (self.align_node) |align_node| {
+ if (self.getTrailer("align_node")) |align_node| {
if (i < 1) return align_node;
i -= 1;
}
- if (self.section_node) |section_node| {
+ if (self.getTrailer("section_node")) |section_node| {
if (i < 1) return section_node;
i -= 1;
}
- if (self.init_node) |init_node| {
+ if (self.getTrailer("init_node")) |init_node| {
if (i < 1) return init_node;
i -= 1;
}
@@ -706,21 +936,25 @@ pub const Node = struct {
}
pub fn firstToken(self: *const VarDecl) TokenIndex {
- if (self.visib_token) |visib_token| return visib_token;
- if (self.thread_local_token) |thread_local_token| return thread_local_token;
- if (self.comptime_token) |comptime_token| return comptime_token;
- if (self.extern_export_token) |extern_export_token| return extern_export_token;
- assert(self.lib_name == null);
+ if (self.getTrailer("visib_token")) |visib_token| return visib_token;
+ if (self.getTrailer("thread_local_token")) |thread_local_token| return thread_local_token;
+ if (self.getTrailer("comptime_token")) |comptime_token| return comptime_token;
+ if (self.getTrailer("extern_export_token")) |extern_export_token| return extern_export_token;
+ assert(self.getTrailer("lib_name") == null);
return self.mut_token;
}
pub fn lastToken(self: *const VarDecl) TokenIndex {
return self.semicolon_token;
}
+
+ fn sizeInBytes(trailer_flags: TrailerFlags) usize {
+ return @sizeOf(VarDecl) + trailer_flags.sizeInBytes();
+ }
};
pub const Use = struct {
- base: Node = Node{ .id = .Use },
+ base: Node = Node{ .tag = .Use },
doc_comments: ?*DocComment,
visib_token: ?TokenIndex,
use_token: TokenIndex,
@@ -747,7 +981,7 @@ pub const Node = struct {
};
pub const ErrorSetDecl = struct {
- base: Node = Node{ .id = .ErrorSetDecl },
+ base: Node = Node{ .tag = .ErrorSetDecl },
error_token: TokenIndex,
rbrace_token: TokenIndex,
decls_len: NodeIndex,
@@ -797,7 +1031,7 @@ pub const Node = struct {
/// The fields and decls Node pointers directly follow this struct in memory.
pub const ContainerDecl = struct {
- base: Node = Node{ .id = .ContainerDecl },
+ base: Node = Node{ .tag = .ContainerDecl },
kind_token: TokenIndex,
layout_token: ?TokenIndex,
lbrace_token: TokenIndex,
@@ -866,7 +1100,7 @@ pub const Node = struct {
};
pub const ContainerField = struct {
- base: Node = Node{ .id = .ContainerField },
+ base: Node = Node{ .tag = .ContainerField },
doc_comments: ?*DocComment,
comptime_token: ?TokenIndex,
name_token: TokenIndex,
@@ -917,7 +1151,7 @@ pub const Node = struct {
};
pub const ErrorTag = struct {
- base: Node = Node{ .id = .ErrorTag },
+ base: Node = Node{ .tag = .ErrorTag },
doc_comments: ?*DocComment,
name_token: TokenIndex,
@@ -942,7 +1176,7 @@ pub const Node = struct {
};
pub const Identifier = struct {
- base: Node = Node{ .id = .Identifier },
+ base: Node = Node{ .tag = .Identifier },
token: TokenIndex,
pub fn iterate(self: *const Identifier, index: usize) ?*Node {
@@ -959,23 +1193,34 @@ pub const Node = struct {
};
/// The params are directly after the FnProto in memory.
+ /// Next, each optional thing determined by a bit in `trailer_flags`.
pub const FnProto = struct {
- base: Node = Node{ .id = .FnProto },
- doc_comments: ?*DocComment,
- visib_token: ?TokenIndex,
+ base: Node = Node{ .tag = .FnProto },
+ trailer_flags: TrailerFlags,
fn_token: TokenIndex,
- name_token: ?TokenIndex,
params_len: NodeIndex,
return_type: ReturnType,
- var_args_token: ?TokenIndex,
- extern_export_inline_token: ?TokenIndex,
- body_node: ?*Node,
- lib_name: ?*Node, // populated if this is an extern declaration
- align_expr: ?*Node, // populated if align(A) is present
- section_expr: ?*Node, // populated if linksection(A) is present
- callconv_expr: ?*Node, // populated if callconv(A) is present
- is_extern_prototype: bool = false, // TODO: Remove once extern fn rewriting is
- is_async: bool = false, // TODO: remove once async fn rewriting is
+
+ pub const TrailerFlags = std.meta.TrailerFlags(struct {
+ doc_comments: *DocComment,
+ body_node: *Node,
+ lib_name: *Node, // populated if this is an extern declaration
+ align_expr: *Node, // populated if align(A) is present
+ section_expr: *Node, // populated if linksection(A) is present
+ callconv_expr: *Node, // populated if callconv(A) is present
+ visib_token: TokenIndex,
+ name_token: TokenIndex,
+ var_args_token: TokenIndex,
+ extern_export_inline_token: TokenIndex,
+ is_extern_prototype: void, // TODO: Remove once extern fn rewriting is
+ is_async: void, // TODO: remove once async fn rewriting is
+ });
+
+ pub const RequiredFields = struct {
+ fn_token: TokenIndex,
+ params_len: NodeIndex,
+ return_type: ReturnType,
+ };
pub const ReturnType = union(enum) {
Explicit: *Node,
@@ -991,8 +1236,7 @@ pub const Node = struct {
param_type: ParamType,
pub const ParamType = union(enum) {
- var_type: *Node,
- var_args: TokenIndex,
+ any_type: *Node,
type_expr: *Node,
};
@@ -1001,8 +1245,7 @@ pub const Node = struct {
if (i < 1) {
switch (self.param_type) {
- .var_args => return null,
- .var_type, .type_expr => |node| return node,
+ .any_type, .type_expr => |node| return node,
}
}
i -= 1;
@@ -1015,34 +1258,79 @@ pub const Node = struct {
if (self.noalias_token) |noalias_token| return noalias_token;
if (self.name_token) |name_token| return name_token;
switch (self.param_type) {
- .var_args => |tok| return tok,
- .var_type, .type_expr => |node| return node.firstToken(),
+ .any_type, .type_expr => |node| return node.firstToken(),
}
}
pub fn lastToken(self: *const ParamDecl) TokenIndex {
switch (self.param_type) {
- .var_args => |tok| return tok,
- .var_type, .type_expr => |node| return node.lastToken(),
+ .any_type, .type_expr => |node| return node.lastToken(),
}
}
};
+ /// For debugging purposes.
+ pub fn dump(self: *const FnProto) void {
+ const trailers_start = @alignCast(
+ @alignOf(ParamDecl),
+ @ptrCast([*]const u8, self) + @sizeOf(FnProto) + @sizeOf(ParamDecl) * self.params_len,
+ );
+ std.debug.print("{*} flags: {b} name_token: {} {*} params_len: {}\n", .{
+ self,
+ self.trailer_flags.bits,
+ self.getTrailer("name_token"),
+ self.trailer_flags.ptrConst(trailers_start, "name_token"),
+ self.params_len,
+ });
+ }
+
+ pub fn getTrailer(self: *const FnProto, comptime name: []const u8) ?TrailerFlags.Field(name) {
+ const trailers_start = @alignCast(
+ @alignOf(ParamDecl),
+ @ptrCast([*]const u8, self) + @sizeOf(FnProto) + @sizeOf(ParamDecl) * self.params_len,
+ );
+ return self.trailer_flags.get(trailers_start, name);
+ }
+
+ pub fn setTrailer(self: *FnProto, comptime name: []const u8, value: TrailerFlags.Field(name)) void {
+ const trailers_start = @alignCast(
+ @alignOf(ParamDecl),
+ @ptrCast([*]u8, self) + @sizeOf(FnProto) + @sizeOf(ParamDecl) * self.params_len,
+ );
+ self.trailer_flags.set(trailers_start, name, value);
+ }
+
/// After this the caller must initialize the params list.
- pub fn alloc(allocator: *mem.Allocator, params_len: NodeIndex) !*FnProto {
- const bytes = try allocator.alignedAlloc(u8, @alignOf(FnProto), sizeInBytes(params_len));
- return @ptrCast(*FnProto, bytes.ptr);
+ pub fn create(allocator: *mem.Allocator, required: RequiredFields, trailers: anytype) !*FnProto {
+ const trailer_flags = TrailerFlags.init(trailers);
+ const bytes = try allocator.alignedAlloc(u8, @alignOf(FnProto), sizeInBytes(
+ required.params_len,
+ trailer_flags,
+ ));
+ const fn_proto = @ptrCast(*FnProto, bytes.ptr);
+ fn_proto.* = .{
+ .trailer_flags = trailer_flags,
+ .fn_token = required.fn_token,
+ .params_len = required.params_len,
+ .return_type = required.return_type,
+ };
+ const trailers_start = @alignCast(
+ @alignOf(ParamDecl),
+ bytes.ptr + @sizeOf(FnProto) + @sizeOf(ParamDecl) * required.params_len,
+ );
+ trailer_flags.setMany(trailers_start, trailers);
+ return fn_proto;
}
- pub fn free(self: *FnProto, allocator: *mem.Allocator) void {
- const bytes = @ptrCast([*]u8, self)[0..sizeInBytes(self.params_len)];
+ pub fn destroy(self: *FnProto, allocator: *mem.Allocator) void {
+ const bytes = @ptrCast([*]u8, self)[0..sizeInBytes(self.params_len, self.trailer_flags)];
allocator.free(bytes);
}
pub fn iterate(self: *const FnProto, index: usize) ?*Node {
var i = index;
- if (self.lib_name) |lib_name| {
+ if (self.getTrailer("lib_name")) |lib_name| {
if (i < 1) return lib_name;
i -= 1;
}
@@ -1050,24 +1338,22 @@ pub const Node = struct {
const params_len: usize = if (self.params_len == 0)
0
else switch (self.paramsConst()[self.params_len - 1].param_type) {
- .var_type, .type_expr => self.params_len,
- .var_args => self.params_len - 1,
+ .any_type, .type_expr => self.params_len,
};
if (i < params_len) {
switch (self.paramsConst()[i].param_type) {
- .var_type => |n| return n,
- .var_args => unreachable,
+ .any_type => |n| return n,
.type_expr => |n| return n,
}
}
i -= params_len;
- if (self.align_expr) |align_expr| {
+ if (self.getTrailer("align_expr")) |align_expr| {
if (i < 1) return align_expr;
i -= 1;
}
- if (self.section_expr) |section_expr| {
+ if (self.getTrailer("section_expr")) |section_expr| {
if (i < 1) return section_expr;
i -= 1;
}
@@ -1080,7 +1366,7 @@ pub const Node = struct {
.Invalid => {},
}
- if (self.body_node) |body_node| {
+ if (self.getTrailer("body_node")) |body_node| {
if (i < 1) return body_node;
i -= 1;
}
@@ -1089,14 +1375,14 @@ pub const Node = struct {
}
pub fn firstToken(self: *const FnProto) TokenIndex {
- if (self.visib_token) |visib_token| return visib_token;
- if (self.extern_export_inline_token) |extern_export_inline_token| return extern_export_inline_token;
- assert(self.lib_name == null);
+ if (self.getTrailer("visib_token")) |visib_token| return visib_token;
+ if (self.getTrailer("extern_export_inline_token")) |extern_export_inline_token| return extern_export_inline_token;
+ assert(self.getTrailer("lib_name") == null);
return self.fn_token;
}
pub fn lastToken(self: *const FnProto) TokenIndex {
- if (self.body_node) |body_node| return body_node.lastToken();
+ if (self.getTrailer("body_node")) |body_node| return body_node.lastToken();
switch (self.return_type) {
.Explicit, .InferErrorSet => |node| return node.lastToken(),
.Invalid => |tok| return tok,
@@ -1104,22 +1390,22 @@ pub const Node = struct {
}
pub fn params(self: *FnProto) []ParamDecl {
- const decls_start = @ptrCast([*]u8, self) + @sizeOf(FnProto);
- return @ptrCast([*]ParamDecl, decls_start)[0..self.params_len];
+ const params_start = @ptrCast([*]u8, self) + @sizeOf(FnProto);
+ return @ptrCast([*]ParamDecl, params_start)[0..self.params_len];
}
pub fn paramsConst(self: *const FnProto) []const ParamDecl {
- const decls_start = @ptrCast([*]const u8, self) + @sizeOf(FnProto);
- return @ptrCast([*]const ParamDecl, decls_start)[0..self.params_len];
+ const params_start = @ptrCast([*]const u8, self) + @sizeOf(FnProto);
+ return @ptrCast([*]const ParamDecl, params_start)[0..self.params_len];
}
- fn sizeInBytes(params_len: NodeIndex) usize {
- return @sizeOf(FnProto) + @sizeOf(ParamDecl) * @as(usize, params_len);
+ fn sizeInBytes(params_len: NodeIndex, trailer_flags: TrailerFlags) usize {
+ return @sizeOf(FnProto) + @sizeOf(ParamDecl) * @as(usize, params_len) + trailer_flags.sizeInBytes();
}
};
pub const AnyFrameType = struct {
- base: Node = Node{ .id = .AnyFrameType },
+ base: Node = Node{ .tag = .AnyFrameType },
anyframe_token: TokenIndex,
result: ?Result,
@@ -1151,7 +1437,7 @@ pub const Node = struct {
/// The statements of the block follow Block directly in memory.
pub const Block = struct {
- base: Node = Node{ .id = .Block },
+ base: Node = Node{ .tag = .Block },
statements_len: NodeIndex,
lbrace: TokenIndex,
rbrace: TokenIndex,
@@ -1205,7 +1491,7 @@ pub const Node = struct {
};
pub const Defer = struct {
- base: Node = Node{ .id = .Defer },
+ base: Node = Node{ .tag = .Defer },
defer_token: TokenIndex,
payload: ?*Node,
expr: *Node,
@@ -1229,7 +1515,7 @@ pub const Node = struct {
};
pub const Comptime = struct {
- base: Node = Node{ .id = .Comptime },
+ base: Node = Node{ .tag = .Comptime },
doc_comments: ?*DocComment,
comptime_token: TokenIndex,
expr: *Node,
@@ -1253,7 +1539,7 @@ pub const Node = struct {
};
pub const Nosuspend = struct {
- base: Node = Node{ .id = .Nosuspend },
+ base: Node = Node{ .tag = .Nosuspend },
nosuspend_token: TokenIndex,
expr: *Node,
@@ -1276,7 +1562,7 @@ pub const Node = struct {
};
pub const Payload = struct {
- base: Node = Node{ .id = .Payload },
+ base: Node = Node{ .tag = .Payload },
lpipe: TokenIndex,
error_symbol: *Node,
rpipe: TokenIndex,
@@ -1300,7 +1586,7 @@ pub const Node = struct {
};
pub const PointerPayload = struct {
- base: Node = Node{ .id = .PointerPayload },
+ base: Node = Node{ .tag = .PointerPayload },
lpipe: TokenIndex,
ptr_token: ?TokenIndex,
value_symbol: *Node,
@@ -1325,7 +1611,7 @@ pub const Node = struct {
};
pub const PointerIndexPayload = struct {
- base: Node = Node{ .id = .PointerIndexPayload },
+ base: Node = Node{ .tag = .PointerIndexPayload },
lpipe: TokenIndex,
ptr_token: ?TokenIndex,
value_symbol: *Node,
@@ -1356,7 +1642,7 @@ pub const Node = struct {
};
pub const Else = struct {
- base: Node = Node{ .id = .Else },
+ base: Node = Node{ .tag = .Else },
else_token: TokenIndex,
payload: ?*Node,
body: *Node,
@@ -1387,7 +1673,7 @@ pub const Node = struct {
/// The cases node pointers are found in memory after Switch.
/// They must be SwitchCase or SwitchElse nodes.
pub const Switch = struct {
- base: Node = Node{ .id = .Switch },
+ base: Node = Node{ .tag = .Switch },
switch_token: TokenIndex,
rbrace: TokenIndex,
cases_len: NodeIndex,
@@ -1441,7 +1727,7 @@ pub const Node = struct {
/// Items sub-nodes appear in memory directly following SwitchCase.
pub const SwitchCase = struct {
- base: Node = Node{ .id = .SwitchCase },
+ base: Node = Node{ .tag = .SwitchCase },
arrow_token: TokenIndex,
payload: ?*Node,
expr: *Node,
@@ -1499,7 +1785,7 @@ pub const Node = struct {
};
pub const SwitchElse = struct {
- base: Node = Node{ .id = .SwitchElse },
+ base: Node = Node{ .tag = .SwitchElse },
token: TokenIndex,
pub fn iterate(self: *const SwitchElse, index: usize) ?*Node {
@@ -1516,7 +1802,7 @@ pub const Node = struct {
};
pub const While = struct {
- base: Node = Node{ .id = .While },
+ base: Node = Node{ .tag = .While },
label: ?TokenIndex,
inline_token: ?TokenIndex,
while_token: TokenIndex,
@@ -1575,7 +1861,7 @@ pub const Node = struct {
};
pub const For = struct {
- base: Node = Node{ .id = .For },
+ base: Node = Node{ .tag = .For },
label: ?TokenIndex,
inline_token: ?TokenIndex,
for_token: TokenIndex,
@@ -1626,7 +1912,7 @@ pub const Node = struct {
};
pub const If = struct {
- base: Node = Node{ .id = .If },
+ base: Node = Node{ .tag = .If },
if_token: TokenIndex,
condition: *Node,
payload: ?*Node,
@@ -1668,116 +1954,22 @@ pub const Node = struct {
}
};
- pub const InfixOp = struct {
- base: Node = Node{ .id = .InfixOp },
+ pub const Catch = struct {
+ base: Node = Node{ .tag = .Catch },
op_token: TokenIndex,
lhs: *Node,
- op: Op,
rhs: *Node,
+ payload: ?*Node,
- pub const Op = union(enum) {
- Add,
- AddWrap,
- ArrayCat,
- ArrayMult,
- Assign,
- AssignBitAnd,
- AssignBitOr,
- AssignBitShiftLeft,
- AssignBitShiftRight,
- AssignBitXor,
- AssignDiv,
- AssignSub,
- AssignSubWrap,
- AssignMod,
- AssignAdd,
- AssignAddWrap,
- AssignMul,
- AssignMulWrap,
- BangEqual,
- BitAnd,
- BitOr,
- BitShiftLeft,
- BitShiftRight,
- BitXor,
- BoolAnd,
- BoolOr,
- Catch: ?*Node,
- Div,
- EqualEqual,
- ErrorUnion,
- GreaterOrEqual,
- GreaterThan,
- LessOrEqual,
- LessThan,
- MergeErrorSets,
- Mod,
- Mul,
- MulWrap,
- Period,
- Range,
- Sub,
- SubWrap,
- UnwrapOptional,
- };
-
- pub fn iterate(self: *const InfixOp, index: usize) ?*Node {
+ pub fn iterate(self: *const Catch, index: usize) ?*Node {
var i = index;
if (i < 1) return self.lhs;
i -= 1;
- switch (self.op) {
- .Catch => |maybe_payload| {
- if (maybe_payload) |payload| {
- if (i < 1) return payload;
- i -= 1;
- }
- },
-
- .Add,
- .AddWrap,
- .ArrayCat,
- .ArrayMult,
- .Assign,
- .AssignBitAnd,
- .AssignBitOr,
- .AssignBitShiftLeft,
- .AssignBitShiftRight,
- .AssignBitXor,
- .AssignDiv,
- .AssignSub,
- .AssignSubWrap,
- .AssignMod,
- .AssignAdd,
- .AssignAddWrap,
- .AssignMul,
- .AssignMulWrap,
- .BangEqual,
- .BitAnd,
- .BitOr,
- .BitShiftLeft,
- .BitShiftRight,
- .BitXor,
- .BoolAnd,
- .BoolOr,
- .Div,
- .EqualEqual,
- .ErrorUnion,
- .GreaterOrEqual,
- .GreaterThan,
- .LessOrEqual,
- .LessThan,
- .MergeErrorSets,
- .Mod,
- .Mul,
- .MulWrap,
- .Period,
- .Range,
- .Sub,
- .SubWrap,
- .UnwrapOptional,
- => {},
+ if (self.payload) |payload| {
+ if (i < 1) return payload;
+ i -= 1;
}
if (i < 1) return self.rhs;
@@ -1786,94 +1978,140 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *const InfixOp) TokenIndex {
+ pub fn firstToken(self: *const Catch) TokenIndex {
return self.lhs.firstToken();
}
- pub fn lastToken(self: *const InfixOp) TokenIndex {
+ pub fn lastToken(self: *const Catch) TokenIndex {
return self.rhs.lastToken();
}
};
- pub const PrefixOp = struct {
- base: Node = Node{ .id = .PrefixOp },
+ pub const SimpleInfixOp = struct {
+ base: Node,
op_token: TokenIndex,
- op: Op,
+ lhs: *Node,
rhs: *Node,
- pub const Op = union(enum) {
- AddressOf,
- ArrayType: ArrayInfo,
- Await,
- BitNot,
- BoolNot,
- OptionalType,
- Negation,
- NegationWrap,
- Resume,
- PtrType: PtrInfo,
- SliceType: PtrInfo,
- Try,
- };
+ pub fn iterate(self: *const SimpleInfixOp, index: usize) ?*Node {
+ var i = index;
- pub const ArrayInfo = struct {
- len_expr: *Node,
- sentinel: ?*Node,
- };
+ if (i < 1) return self.lhs;
+ i -= 1;
- pub const PtrInfo = struct {
- allowzero_token: ?TokenIndex = null,
- align_info: ?Align = null,
- const_token: ?TokenIndex = null,
- volatile_token: ?TokenIndex = null,
- sentinel: ?*Node = null,
-
- pub const Align = struct {
- node: *Node,
- bit_range: ?BitRange,
-
- pub const BitRange = struct {
- start: *Node,
- end: *Node,
- };
- };
- };
+ if (i < 1) return self.rhs;
+ i -= 1;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *const SimpleInfixOp) TokenIndex {
+ return self.lhs.firstToken();
+ }
+
+ pub fn lastToken(self: *const SimpleInfixOp) TokenIndex {
+ return self.rhs.lastToken();
+ }
+ };
+
+ pub const SimplePrefixOp = struct {
+ base: Node,
+ op_token: TokenIndex,
+ rhs: *Node,
+
+ const Self = @This();
+
+ pub fn iterate(self: *const Self, index: usize) ?*Node {
+ if (index == 0) return self.rhs;
+ return null;
+ }
- pub fn iterate(self: *const PrefixOp, index: usize) ?*Node {
+ pub fn firstToken(self: *const Self) TokenIndex {
+ return self.op_token;
+ }
+
+ pub fn lastToken(self: *const Self) TokenIndex {
+ return self.rhs.lastToken();
+ }
+ };
+
+ pub const ArrayType = struct {
+ base: Node = Node{ .tag = .ArrayType },
+ op_token: TokenIndex,
+ rhs: *Node,
+ len_expr: *Node,
+
+ pub fn iterate(self: *const ArrayType, index: usize) ?*Node {
var i = index;
- switch (self.op) {
- .PtrType, .SliceType => |addr_of_info| {
- if (addr_of_info.sentinel) |sentinel| {
- if (i < 1) return sentinel;
- i -= 1;
- }
+ if (i < 1) return self.len_expr;
+ i -= 1;
- if (addr_of_info.align_info) |align_info| {
- if (i < 1) return align_info.node;
- i -= 1;
- }
- },
+ if (i < 1) return self.rhs;
+ i -= 1;
- .ArrayType => |array_info| {
- if (i < 1) return array_info.len_expr;
- i -= 1;
- if (array_info.sentinel) |sentinel| {
- if (i < 1) return sentinel;
- i -= 1;
- }
- },
+ return null;
+ }
- .AddressOf,
- .Await,
- .BitNot,
- .BoolNot,
- .OptionalType,
- .Negation,
- .NegationWrap,
- .Try,
- .Resume,
- => {},
+ pub fn firstToken(self: *const ArrayType) TokenIndex {
+ return self.op_token;
+ }
+
+ pub fn lastToken(self: *const ArrayType) TokenIndex {
+ return self.rhs.lastToken();
+ }
+ };
+
+ pub const ArrayTypeSentinel = struct {
+ base: Node = Node{ .tag = .ArrayTypeSentinel },
+ op_token: TokenIndex,
+ rhs: *Node,
+ len_expr: *Node,
+ sentinel: *Node,
+
+ pub fn iterate(self: *const ArrayTypeSentinel, index: usize) ?*Node {
+ var i = index;
+
+ if (i < 1) return self.len_expr;
+ i -= 1;
+
+ if (i < 1) return self.sentinel;
+ i -= 1;
+
+ if (i < 1) return self.rhs;
+ i -= 1;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *const ArrayTypeSentinel) TokenIndex {
+ return self.op_token;
+ }
+
+ pub fn lastToken(self: *const ArrayTypeSentinel) TokenIndex {
+ return self.rhs.lastToken();
+ }
+ };
+
+ pub const PtrType = struct {
+ base: Node = Node{ .tag = .PtrType },
+ op_token: TokenIndex,
+ rhs: *Node,
+ /// TODO Add a u8 flags field to Node where it would otherwise be padding, and each bit represents
+ /// one of these possibly-null things. Then we have them directly follow the PtrType in memory.
+ ptr_info: PtrInfo = .{},
+
+ pub fn iterate(self: *const PtrType, index: usize) ?*Node {
+ var i = index;
+
+ if (self.ptr_info.sentinel) |sentinel| {
+ if (i < 1) return sentinel;
+ i -= 1;
+ }
+
+ if (self.ptr_info.align_info) |align_info| {
+ if (i < 1) return align_info.node;
+ i -= 1;
}
if (i < 1) return self.rhs;
@@ -1882,17 +2120,53 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *const PrefixOp) TokenIndex {
+ pub fn firstToken(self: *const PtrType) TokenIndex {
return self.op_token;
}
- pub fn lastToken(self: *const PrefixOp) TokenIndex {
+ pub fn lastToken(self: *const PtrType) TokenIndex {
+ return self.rhs.lastToken();
+ }
+ };
+
+ pub const SliceType = struct {
+ base: Node = Node{ .tag = .SliceType },
+ op_token: TokenIndex,
+ rhs: *Node,
+ /// TODO Add a u8 flags field to Node where it would otherwise be padding, and each bit represents
+ /// one of these possibly-null things. Then we have them directly follow the SliceType in memory.
+ ptr_info: PtrInfo = .{},
+
+ pub fn iterate(self: *const SliceType, index: usize) ?*Node {
+ var i = index;
+
+ if (self.ptr_info.sentinel) |sentinel| {
+ if (i < 1) return sentinel;
+ i -= 1;
+ }
+
+ if (self.ptr_info.align_info) |align_info| {
+ if (i < 1) return align_info.node;
+ i -= 1;
+ }
+
+ if (i < 1) return self.rhs;
+ i -= 1;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *const SliceType) TokenIndex {
+ return self.op_token;
+ }
+
+ pub fn lastToken(self: *const SliceType) TokenIndex {
return self.rhs.lastToken();
}
};
pub const FieldInitializer = struct {
- base: Node = Node{ .id = .FieldInitializer },
+ base: Node = Node{ .tag = .FieldInitializer },
period_token: TokenIndex,
name_token: TokenIndex,
expr: *Node,
@@ -1917,7 +2191,7 @@ pub const Node = struct {
/// Elements occur directly in memory after ArrayInitializer.
pub const ArrayInitializer = struct {
- base: Node = Node{ .id = .ArrayInitializer },
+ base: Node = Node{ .tag = .ArrayInitializer },
rtoken: TokenIndex,
list_len: NodeIndex,
lhs: *Node,
@@ -1970,7 +2244,7 @@ pub const Node = struct {
/// Elements occur directly in memory after ArrayInitializerDot.
pub const ArrayInitializerDot = struct {
- base: Node = Node{ .id = .ArrayInitializerDot },
+ base: Node = Node{ .tag = .ArrayInitializerDot },
dot: TokenIndex,
rtoken: TokenIndex,
list_len: NodeIndex,
@@ -2020,7 +2294,7 @@ pub const Node = struct {
/// Elements occur directly in memory after StructInitializer.
pub const StructInitializer = struct {
- base: Node = Node{ .id = .StructInitializer },
+ base: Node = Node{ .tag = .StructInitializer },
rtoken: TokenIndex,
list_len: NodeIndex,
lhs: *Node,
@@ -2073,7 +2347,7 @@ pub const Node = struct {
/// Elements occur directly in memory after StructInitializerDot.
pub const StructInitializerDot = struct {
- base: Node = Node{ .id = .StructInitializerDot },
+ base: Node = Node{ .tag = .StructInitializerDot },
dot: TokenIndex,
rtoken: TokenIndex,
list_len: NodeIndex,
@@ -2123,7 +2397,7 @@ pub const Node = struct {
/// Parameter nodes directly follow Call in memory.
pub const Call = struct {
- base: Node = Node{ .id = .Call },
+ base: Node = Node{ .tag = .Call },
lhs: *Node,
rtoken: TokenIndex,
params_len: NodeIndex,
@@ -2177,7 +2451,7 @@ pub const Node = struct {
};
pub const SuffixOp = struct {
- base: Node = Node{ .id = .SuffixOp },
+ base: Node = Node{ .tag = .SuffixOp },
op: Op,
lhs: *Node,
rtoken: TokenIndex,
@@ -2237,7 +2511,7 @@ pub const Node = struct {
};
pub const GroupedExpression = struct {
- base: Node = Node{ .id = .GroupedExpression },
+ base: Node = Node{ .tag = .GroupedExpression },
lparen: TokenIndex,
expr: *Node,
rparen: TokenIndex,
@@ -2260,8 +2534,10 @@ pub const Node = struct {
}
};
+ /// TODO break this into separate Break, Continue, Return AST Nodes to save memory.
+ /// Could be further broken into LabeledBreak, LabeledContinue, and ReturnVoid to save even more.
pub const ControlFlowExpression = struct {
- base: Node = Node{ .id = .ControlFlowExpression },
+ base: Node = Node{ .tag = .ControlFlowExpression },
ltoken: TokenIndex,
kind: Kind,
rhs: ?*Node,
@@ -2316,7 +2592,7 @@ pub const Node = struct {
};
pub const Suspend = struct {
- base: Node = Node{ .id = .Suspend },
+ base: Node = Node{ .tag = .Suspend },
suspend_token: TokenIndex,
body: ?*Node,
@@ -2345,7 +2621,7 @@ pub const Node = struct {
};
pub const IntegerLiteral = struct {
- base: Node = Node{ .id = .IntegerLiteral },
+ base: Node = Node{ .tag = .IntegerLiteral },
token: TokenIndex,
pub fn iterate(self: *const IntegerLiteral, index: usize) ?*Node {
@@ -2362,7 +2638,7 @@ pub const Node = struct {
};
pub const EnumLiteral = struct {
- base: Node = Node{ .id = .EnumLiteral },
+ base: Node = Node{ .tag = .EnumLiteral },
dot: TokenIndex,
name: TokenIndex,
@@ -2380,7 +2656,7 @@ pub const Node = struct {
};
pub const FloatLiteral = struct {
- base: Node = Node{ .id = .FloatLiteral },
+ base: Node = Node{ .tag = .FloatLiteral },
token: TokenIndex,
pub fn iterate(self: *const FloatLiteral, index: usize) ?*Node {
@@ -2398,7 +2674,7 @@ pub const Node = struct {
/// Parameters are in memory following BuiltinCall.
pub const BuiltinCall = struct {
- base: Node = Node{ .id = .BuiltinCall },
+ base: Node = Node{ .tag = .BuiltinCall },
params_len: NodeIndex,
builtin_token: TokenIndex,
rparen_token: TokenIndex,
@@ -2447,7 +2723,7 @@ pub const Node = struct {
};
pub const StringLiteral = struct {
- base: Node = Node{ .id = .StringLiteral },
+ base: Node = Node{ .tag = .StringLiteral },
token: TokenIndex,
pub fn iterate(self: *const StringLiteral, index: usize) ?*Node {
@@ -2465,7 +2741,7 @@ pub const Node = struct {
/// The string literal tokens appear directly in memory after MultilineStringLiteral.
pub const MultilineStringLiteral = struct {
- base: Node = Node{ .id = .MultilineStringLiteral },
+ base: Node = Node{ .tag = .MultilineStringLiteral },
lines_len: TokenIndex,
/// After this the caller must initialize the lines list.
@@ -2507,7 +2783,7 @@ pub const Node = struct {
};
pub const CharLiteral = struct {
- base: Node = Node{ .id = .CharLiteral },
+ base: Node = Node{ .tag = .CharLiteral },
token: TokenIndex,
pub fn iterate(self: *const CharLiteral, index: usize) ?*Node {
@@ -2524,7 +2800,7 @@ pub const Node = struct {
};
pub const BoolLiteral = struct {
- base: Node = Node{ .id = .BoolLiteral },
+ base: Node = Node{ .tag = .BoolLiteral },
token: TokenIndex,
pub fn iterate(self: *const BoolLiteral, index: usize) ?*Node {
@@ -2541,7 +2817,7 @@ pub const Node = struct {
};
pub const NullLiteral = struct {
- base: Node = Node{ .id = .NullLiteral },
+ base: Node = Node{ .tag = .NullLiteral },
token: TokenIndex,
pub fn iterate(self: *const NullLiteral, index: usize) ?*Node {
@@ -2558,7 +2834,7 @@ pub const Node = struct {
};
pub const UndefinedLiteral = struct {
- base: Node = Node{ .id = .UndefinedLiteral },
+ base: Node = Node{ .tag = .UndefinedLiteral },
token: TokenIndex,
pub fn iterate(self: *const UndefinedLiteral, index: usize) ?*Node {
@@ -2575,7 +2851,7 @@ pub const Node = struct {
};
pub const Asm = struct {
- base: Node = Node{ .id = .Asm },
+ base: Node = Node{ .tag = .Asm },
asm_token: TokenIndex,
rparen: TokenIndex,
volatile_token: ?TokenIndex,
@@ -2695,7 +2971,7 @@ pub const Node = struct {
};
pub const Unreachable = struct {
- base: Node = Node{ .id = .Unreachable },
+ base: Node = Node{ .tag = .Unreachable },
token: TokenIndex,
pub fn iterate(self: *const Unreachable, index: usize) ?*Node {
@@ -2712,7 +2988,7 @@ pub const Node = struct {
};
pub const ErrorType = struct {
- base: Node = Node{ .id = .ErrorType },
+ base: Node = Node{ .tag = .ErrorType },
token: TokenIndex,
pub fn iterate(self: *const ErrorType, index: usize) ?*Node {
@@ -2728,25 +3004,28 @@ pub const Node = struct {
}
};
- pub const VarType = struct {
- base: Node = Node{ .id = .VarType },
+ pub const AnyType = struct {
+ base: Node = Node{ .tag = .AnyType },
token: TokenIndex,
- pub fn iterate(self: *const VarType, index: usize) ?*Node {
+ pub fn iterate(self: *const AnyType, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: *const VarType) TokenIndex {
+ pub fn firstToken(self: *const AnyType) TokenIndex {
return self.token;
}
- pub fn lastToken(self: *const VarType) TokenIndex {
+ pub fn lastToken(self: *const AnyType) TokenIndex {
return self.token;
}
};
+ /// TODO remove from the Node base struct
+ /// TODO actually maybe remove entirely in favor of iterating backward from Node.firstToken()
+ /// and forwards to find same-line doc comments.
pub const DocComment = struct {
- base: Node = Node{ .id = .DocComment },
+ base: Node = Node{ .tag = .DocComment },
/// Points to the first doc comment token. API users are expected to iterate over the
/// tokens array, looking for more doc comments, ignoring line comments, and stopping
/// at the first other token.
@@ -2768,7 +3047,7 @@ pub const Node = struct {
};
pub const TestDecl = struct {
- base: Node = Node{ .id = .TestDecl },
+ base: Node = Node{ .tag = .TestDecl },
doc_comments: ?*DocComment,
test_token: TokenIndex,
name: *Node,
@@ -2793,9 +3072,27 @@ pub const Node = struct {
};
};
+pub const PtrInfo = struct {
+ allowzero_token: ?TokenIndex = null,
+ align_info: ?Align = null,
+ const_token: ?TokenIndex = null,
+ volatile_token: ?TokenIndex = null,
+ sentinel: ?*Node = null,
+
+ pub const Align = struct {
+ node: *Node,
+ bit_range: ?BitRange = null,
+
+ pub const BitRange = struct {
+ start: *Node,
+ end: *Node,
+ };
+ };
+};
+
test "iterate" {
var root = Node.Root{
- .base = Node{ .id = Node.Id.Root },
+ .base = Node{ .tag = Node.Tag.Root },
.decls_len = 0,
.eof_token = 0,
};
diff --git a/lib/std/zig/cross_target.zig b/lib/std/zig/cross_target.zig
index 1909a07df0..5466b39f0b 100644
--- a/lib/std/zig/cross_target.zig
+++ b/lib/std/zig/cross_target.zig
@@ -497,7 +497,7 @@ pub const CrossTarget = struct {
pub fn zigTriple(self: CrossTarget, allocator: *mem.Allocator) error{OutOfMemory}![]u8 {
if (self.isNative()) {
- return mem.dupe(allocator, u8, "native");
+ return allocator.dupe(u8, "native");
}
const arch_name = if (self.cpu_arch) |arch| @tagName(arch) else "native";
@@ -514,14 +514,14 @@ pub const CrossTarget = struct {
switch (self.getOsVersionMin()) {
.none => {},
.semver => |v| try result.outStream().print(".{}", .{v}),
- .windows => |v| try result.outStream().print(".{}", .{@tagName(v)}),
+ .windows => |v| try result.outStream().print("{s}", .{v}),
}
}
if (self.os_version_max) |max| {
switch (max) {
.none => {},
.semver => |v| try result.outStream().print("...{}", .{v}),
- .windows => |v| try result.outStream().print("...{}", .{@tagName(v)}),
+ .windows => |v| try result.outStream().print("..{s}", .{v}),
}
}
diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig
index 2a3ff9d9de..b02cdcc1fd 100644
--- a/lib/std/zig/parse.zig
+++ b/lib/std/zig/parse.zig
@@ -150,7 +150,7 @@ const Parser = struct {
const visib_token = p.eatToken(.Keyword_pub);
- if (p.parseTopLevelDecl() catch |err| switch (err) {
+ if (p.parseTopLevelDecl(doc_comments, visib_token) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.ParseError => {
p.findNextContainerMember();
@@ -160,30 +160,7 @@ const Parser = struct {
if (field_state == .seen) {
field_state = .{ .end = visib_token orelse node.firstToken() };
}
- switch (node.id) {
- .FnProto => {
- node.cast(Node.FnProto).?.doc_comments = doc_comments;
- node.cast(Node.FnProto).?.visib_token = visib_token;
- },
- .VarDecl => {
- node.cast(Node.VarDecl).?.doc_comments = doc_comments;
- node.cast(Node.VarDecl).?.visib_token = visib_token;
- },
- .Use => {
- node.cast(Node.Use).?.doc_comments = doc_comments;
- node.cast(Node.Use).?.visib_token = visib_token;
- },
- else => unreachable,
- }
try list.append(node);
- if (try p.parseAppendedDocComment(node.lastToken())) |appended_comment| {
- switch (node.id) {
- .FnProto => {},
- .VarDecl => node.cast(Node.VarDecl).?.doc_comments = appended_comment,
- .Use => node.cast(Node.Use).?.doc_comments = appended_comment,
- else => unreachable,
- }
- }
continue;
}
@@ -417,7 +394,7 @@ const Parser = struct {
/// <- (KEYWORD_export / KEYWORD_extern STRINGLITERALSINGLE? / (KEYWORD_inline / KEYWORD_noinline))? FnProto (SEMICOLON / Block)
/// / (KEYWORD_export / KEYWORD_extern STRINGLITERALSINGLE?)? KEYWORD_threadlocal? VarDecl
/// / KEYWORD_usingnamespace Expr SEMICOLON
- fn parseTopLevelDecl(p: *Parser) !?*Node {
+ fn parseTopLevelDecl(p: *Parser, doc_comments: ?*Node.DocComment, visib_token: ?TokenIndex) !?*Node {
var lib_name: ?*Node = null;
const extern_export_inline_token = blk: {
if (p.eatToken(.Keyword_export)) |token| break :blk token;
@@ -430,20 +407,12 @@ const Parser = struct {
break :blk null;
};
- if (try p.parseFnProto()) |node| {
- const fn_node = node.cast(Node.FnProto).?;
- fn_node.*.extern_export_inline_token = extern_export_inline_token;
- fn_node.*.lib_name = lib_name;
- if (p.eatToken(.Semicolon)) |_| return node;
-
- if (try p.expectNodeRecoverable(parseBlock, .{
- // since parseBlock only return error.ParseError on
- // a missing '}' we can assume this function was
- // supposed to end here.
- .ExpectedSemiOrLBrace = .{ .token = p.tok_i },
- })) |body_node| {
- fn_node.body_node = body_node;
- }
+ if (try p.parseFnProto(.top_level, .{
+ .doc_comments = doc_comments,
+ .visib_token = visib_token,
+ .extern_export_inline_token = extern_export_inline_token,
+ .lib_name = lib_name,
+ })) |node| {
return node;
}
@@ -460,12 +429,13 @@ const Parser = struct {
const thread_local_token = p.eatToken(.Keyword_threadlocal);
- if (try p.parseVarDecl()) |node| {
- var var_decl = node.cast(Node.VarDecl).?;
- var_decl.*.thread_local_token = thread_local_token;
- var_decl.*.comptime_token = null;
- var_decl.*.extern_export_token = extern_export_inline_token;
- var_decl.*.lib_name = lib_name;
+ if (try p.parseVarDecl(.{
+ .doc_comments = doc_comments,
+ .visib_token = visib_token,
+ .thread_local_token = thread_local_token,
+ .extern_export_token = extern_export_inline_token,
+ .lib_name = lib_name,
+ })) |node| {
return node;
}
@@ -485,21 +455,41 @@ const Parser = struct {
return error.ParseError;
}
- return p.parseUse();
+ const use_token = p.eatToken(.Keyword_usingnamespace) orelse return null;
+ const expr = try p.expectNode(parseExpr, .{
+ .ExpectedExpr = .{ .token = p.tok_i },
+ });
+ const semicolon_token = try p.expectToken(.Semicolon);
+
+ const node = try p.arena.allocator.create(Node.Use);
+ node.* = .{
+ .doc_comments = doc_comments orelse try p.parseAppendedDocComment(semicolon_token),
+ .visib_token = visib_token,
+ .use_token = use_token,
+ .expr = expr,
+ .semicolon_token = semicolon_token,
+ };
+
+ return &node.base;
}
- /// FnProto <- KEYWORD_fn IDENTIFIER? LPAREN ParamDeclList RPAREN ByteAlign? LinkSection? EXCLAMATIONMARK? (KEYWORD_var / TypeExpr)
- fn parseFnProto(p: *Parser) !?*Node {
+ /// FnProto <- KEYWORD_fn IDENTIFIER? LPAREN ParamDeclList RPAREN ByteAlign? LinkSection? EXCLAMATIONMARK? (Keyword_anytype / TypeExpr)
+ fn parseFnProto(p: *Parser, level: enum { top_level, as_type }, fields: struct {
+ doc_comments: ?*Node.DocComment = null,
+ visib_token: ?TokenIndex = null,
+ extern_export_inline_token: ?TokenIndex = null,
+ lib_name: ?*Node = null,
+ }) !?*Node {
// TODO: Remove once extern/async fn rewriting is
- var is_async = false;
- var is_extern = false;
+ var is_async: ?void = null;
+ var is_extern_prototype: ?void = null;
const cc_token: ?TokenIndex = blk: {
if (p.eatToken(.Keyword_extern)) |token| {
- is_extern = true;
+ is_extern_prototype = {};
break :blk token;
}
if (p.eatToken(.Keyword_async)) |token| {
- is_async = true;
+ is_async = {};
break :blk token;
}
break :blk null;
@@ -513,13 +503,14 @@ const Parser = struct {
const lparen = try p.expectToken(.LParen);
const params = try p.parseParamDeclList();
defer p.gpa.free(params);
+ const var_args_token = p.eatToken(.Ellipsis3);
const rparen = try p.expectToken(.RParen);
const align_expr = try p.parseByteAlign();
const section_expr = try p.parseLinkSection();
const callconv_expr = try p.parseCallconv();
const exclamation_token = p.eatToken(.Bang);
- const return_type_expr = (try p.parseVarType()) orelse
+ const return_type_expr = (try p.parseAnyType()) orelse
try p.expectNodeRecoverable(parseTypeExpr, .{
// most likely the user forgot to specify the return type.
// Mark return type as invalid and try to continue.
@@ -535,37 +526,53 @@ const Parser = struct {
else
R{ .Explicit = return_type_expr.? };
- const var_args_token = if (params.len > 0) blk: {
- const param_type = params[params.len - 1].param_type;
- break :blk if (param_type == .var_args) param_type.var_args else null;
- } else
- null;
+ const body_node: ?*Node = switch (level) {
+ .top_level => blk: {
+ if (p.eatToken(.Semicolon)) |_| {
+ break :blk null;
+ }
+ break :blk try p.expectNodeRecoverable(parseBlock, .{
+ // Since parseBlock only return error.ParseError on
+ // a missing '}' we can assume this function was
+ // supposed to end here.
+ .ExpectedSemiOrLBrace = .{ .token = p.tok_i },
+ });
+ },
+ .as_type => null,
+ };
- const fn_proto_node = try Node.FnProto.alloc(&p.arena.allocator, params.len);
- fn_proto_node.* = .{
- .doc_comments = null,
- .visib_token = null,
- .fn_token = fn_token,
- .name_token = name_token,
+ const fn_proto_node = try Node.FnProto.create(&p.arena.allocator, .{
.params_len = params.len,
+ .fn_token = fn_token,
.return_type = return_type,
+ }, .{
+ .doc_comments = fields.doc_comments,
+ .visib_token = fields.visib_token,
+ .name_token = name_token,
.var_args_token = var_args_token,
- .extern_export_inline_token = null,
- .body_node = null,
- .lib_name = null,
+ .extern_export_inline_token = fields.extern_export_inline_token,
+ .body_node = body_node,
+ .lib_name = fields.lib_name,
.align_expr = align_expr,
.section_expr = section_expr,
.callconv_expr = callconv_expr,
- .is_extern_prototype = is_extern,
+ .is_extern_prototype = is_extern_prototype,
.is_async = is_async,
- };
+ });
std.mem.copy(Node.FnProto.ParamDecl, fn_proto_node.params(), params);
return &fn_proto_node.base;
}
/// VarDecl <- (KEYWORD_const / KEYWORD_var) IDENTIFIER (COLON TypeExpr)? ByteAlign? LinkSection? (EQUAL Expr)? SEMICOLON
- fn parseVarDecl(p: *Parser) !?*Node {
+ fn parseVarDecl(p: *Parser, fields: struct {
+ doc_comments: ?*Node.DocComment = null,
+ visib_token: ?TokenIndex = null,
+ thread_local_token: ?TokenIndex = null,
+ extern_export_token: ?TokenIndex = null,
+ lib_name: ?*Node = null,
+ comptime_token: ?TokenIndex = null,
+ }) !?*Node {
const mut_token = p.eatToken(.Keyword_const) orelse
p.eatToken(.Keyword_var) orelse
return null;
@@ -587,23 +594,25 @@ const Parser = struct {
} else null;
const semicolon_token = try p.expectToken(.Semicolon);
- const node = try p.arena.allocator.create(Node.VarDecl);
- node.* = .{
- .doc_comments = null,
- .visib_token = null,
- .thread_local_token = null,
+ const doc_comments = fields.doc_comments orelse try p.parseAppendedDocComment(semicolon_token);
+
+ const node = try Node.VarDecl.create(&p.arena.allocator, .{
+ .mut_token = mut_token,
.name_token = name_token,
+ .semicolon_token = semicolon_token,
+ }, .{
+ .doc_comments = doc_comments,
+ .visib_token = fields.visib_token,
+ .thread_local_token = fields.thread_local_token,
.eq_token = eq_token,
- .mut_token = mut_token,
- .comptime_token = null,
- .extern_export_token = null,
- .lib_name = null,
+ .comptime_token = fields.comptime_token,
+ .extern_export_token = fields.extern_export_token,
+ .lib_name = fields.lib_name,
.type_node = type_node,
.align_node = align_node,
.section_node = section_node,
.init_node = init_node,
- .semicolon_token = semicolon_token,
- };
+ });
return &node.base;
}
@@ -618,9 +627,9 @@ const Parser = struct {
var align_expr: ?*Node = null;
var type_expr: ?*Node = null;
if (p.eatToken(.Colon)) |_| {
- if (p.eatToken(.Keyword_var)) |var_tok| {
- const node = try p.arena.allocator.create(Node.VarType);
- node.* = .{ .token = var_tok };
+ if (p.eatToken(.Keyword_anytype) orelse p.eatToken(.Keyword_var)) |anytype_tok| {
+ const node = try p.arena.allocator.create(Node.AnyType);
+ node.* = .{ .token = anytype_tok };
type_expr = &node.base;
} else {
type_expr = try p.expectNode(parseTypeExpr, .{
@@ -663,10 +672,9 @@ const Parser = struct {
fn parseStatement(p: *Parser) Error!?*Node {
const comptime_token = p.eatToken(.Keyword_comptime);
- const var_decl_node = try p.parseVarDecl();
- if (var_decl_node) |node| {
- const var_decl = node.cast(Node.VarDecl).?;
- var_decl.comptime_token = comptime_token;
+ if (try p.parseVarDecl(.{
+ .comptime_token = comptime_token,
+ })) |node| {
return node;
}
@@ -937,7 +945,6 @@ const Parser = struct {
return node;
}
-
while_prefix.body = try p.expectNode(parseAssignExpr, .{
.ExpectedBlockOrAssignment = .{ .token = p.tok_i },
});
@@ -1008,7 +1015,7 @@ const Parser = struct {
/// BoolOrExpr <- BoolAndExpr (KEYWORD_or BoolAndExpr)*
fn parseBoolOrExpr(p: *Parser) !?*Node {
return p.parseBinOpExpr(
- SimpleBinOpParseFn(.Keyword_or, Node.InfixOp.Op.BoolOr),
+ SimpleBinOpParseFn(.Keyword_or, .BoolOr),
parseBoolAndExpr,
.Infinitely,
);
@@ -1121,10 +1128,10 @@ const Parser = struct {
const expr_node = try p.expectNode(parseExpr, .{
.ExpectedExpr = .{ .token = p.tok_i },
});
- const node = try p.arena.allocator.create(Node.PrefixOp);
+ const node = try p.arena.allocator.create(Node.SimplePrefixOp);
node.* = .{
+ .base = .{ .tag = .Resume },
.op_token = token,
- .op = .Resume,
.rhs = expr_node,
};
return &node.base;
@@ -1398,8 +1405,8 @@ const Parser = struct {
fn parseErrorUnionExpr(p: *Parser) !?*Node {
const suffix_expr = (try p.parseSuffixExpr()) orelse return null;
- if (try SimpleBinOpParseFn(.Bang, Node.InfixOp.Op.ErrorUnion)(p)) |node| {
- const error_union = node.cast(Node.InfixOp).?;
+ if (try SimpleBinOpParseFn(.Bang, .ErrorUnion)(p)) |node| {
+ const error_union = node.castTag(.ErrorUnion).?;
const type_expr = try p.expectNode(parseTypeExpr, .{
.ExpectedTypeExpr = .{ .token = p.tok_i },
});
@@ -1432,10 +1439,56 @@ const Parser = struct {
.ExpectedPrimaryTypeExpr = .{ .token = p.tok_i },
});
+ // TODO pass `res` into `parseSuffixOp` rather than patching it up afterwards.
while (try p.parseSuffixOp()) |node| {
- switch (node.id) {
+ switch (node.tag) {
.SuffixOp => node.cast(Node.SuffixOp).?.lhs = res,
- .InfixOp => node.cast(Node.InfixOp).?.lhs = res,
+ .Catch => node.castTag(.Catch).?.lhs = res,
+
+ .Add,
+ .AddWrap,
+ .ArrayCat,
+ .ArrayMult,
+ .Assign,
+ .AssignBitAnd,
+ .AssignBitOr,
+ .AssignBitShiftLeft,
+ .AssignBitShiftRight,
+ .AssignBitXor,
+ .AssignDiv,
+ .AssignSub,
+ .AssignSubWrap,
+ .AssignMod,
+ .AssignAdd,
+ .AssignAddWrap,
+ .AssignMul,
+ .AssignMulWrap,
+ .BangEqual,
+ .BitAnd,
+ .BitOr,
+ .BitShiftLeft,
+ .BitShiftRight,
+ .BitXor,
+ .BoolAnd,
+ .BoolOr,
+ .Div,
+ .EqualEqual,
+ .ErrorUnion,
+ .GreaterOrEqual,
+ .GreaterThan,
+ .LessOrEqual,
+ .LessThan,
+ .MergeErrorSets,
+ .Mod,
+ .Mul,
+ .MulWrap,
+ .Period,
+ .Range,
+ .Sub,
+ .SubWrap,
+ .UnwrapOptional,
+ => node.cast(Node.SimpleInfixOp).?.lhs = res,
+
else => unreachable,
}
res = node;
@@ -1463,10 +1516,55 @@ const Parser = struct {
var res = expr;
while (true) {
+ // TODO pass `res` into `parseSuffixOp` rather than patching it up afterwards.
if (try p.parseSuffixOp()) |node| {
- switch (node.id) {
+ switch (node.tag) {
.SuffixOp => node.cast(Node.SuffixOp).?.lhs = res,
- .InfixOp => node.cast(Node.InfixOp).?.lhs = res,
+ .Catch => node.castTag(.Catch).?.lhs = res,
+
+ .Add,
+ .AddWrap,
+ .ArrayCat,
+ .ArrayMult,
+ .Assign,
+ .AssignBitAnd,
+ .AssignBitOr,
+ .AssignBitShiftLeft,
+ .AssignBitShiftRight,
+ .AssignBitXor,
+ .AssignDiv,
+ .AssignSub,
+ .AssignSubWrap,
+ .AssignMod,
+ .AssignAdd,
+ .AssignAddWrap,
+ .AssignMul,
+ .AssignMulWrap,
+ .BangEqual,
+ .BitAnd,
+ .BitOr,
+ .BitShiftLeft,
+ .BitShiftRight,
+ .BitXor,
+ .BoolAnd,
+ .BoolOr,
+ .Div,
+ .EqualEqual,
+ .ErrorUnion,
+ .GreaterOrEqual,
+ .GreaterThan,
+ .LessOrEqual,
+ .LessThan,
+ .MergeErrorSets,
+ .Mod,
+ .Mul,
+ .MulWrap,
+ .Period,
+ .Range,
+ .Sub,
+ .SubWrap,
+ .UnwrapOptional,
+ => node.cast(Node.SimpleInfixOp).?.lhs = res,
else => unreachable,
}
res = node;
@@ -1529,7 +1627,7 @@ const Parser = struct {
if (try p.parseAnonLiteral()) |node| return node;
if (try p.parseErrorSetDecl()) |node| return node;
if (try p.parseFloatLiteral()) |node| return node;
- if (try p.parseFnProto()) |node| return node;
+ if (try p.parseFnProto(.as_type, .{})) |node| return node;
if (try p.parseGroupedExpr()) |node| return node;
if (try p.parseLabeledTypeExpr()) |node| return node;
if (try p.parseIdentifier()) |node| return node;
@@ -1553,11 +1651,11 @@ const Parser = struct {
const global_error_set = try p.createLiteral(Node.ErrorType, token);
if (period == null or identifier == null) return global_error_set;
- const node = try p.arena.allocator.create(Node.InfixOp);
+ const node = try p.arena.allocator.create(Node.SimpleInfixOp);
node.* = .{
+ .base = Node{ .tag = .Period },
.op_token = period.?,
.lhs = global_error_set,
- .op = .Period,
.rhs = identifier.?,
};
return &node.base;
@@ -1654,7 +1752,7 @@ const Parser = struct {
}
if (try p.parseLoopTypeExpr()) |node| {
- switch (node.id) {
+ switch (node.tag) {
.For => node.cast(Node.For).?.label = label,
.While => node.cast(Node.While).?.label = label,
else => unreachable,
@@ -2023,14 +2121,13 @@ const Parser = struct {
}
/// ParamType
- /// <- KEYWORD_var
+ /// <- Keyword_anytype
/// / DOT3
/// / TypeExpr
fn parseParamType(p: *Parser) !?Node.FnProto.ParamDecl.ParamType {
// TODO cast from tuple to error union is broken
const P = Node.FnProto.ParamDecl.ParamType;
- if (try p.parseVarType()) |node| return P{ .var_type = node };
- if (p.eatToken(.Ellipsis3)) |token| return P{ .var_args = token };
+ if (try p.parseAnyType()) |node| return P{ .any_type = node };
if (try p.parseTypeExpr()) |node| return P{ .type_expr = node };
return null;
}
@@ -2231,11 +2328,11 @@ const Parser = struct {
.ExpectedExpr = .{ .token = p.tok_i },
});
- const node = try p.arena.allocator.create(Node.InfixOp);
+ const node = try p.arena.allocator.create(Node.SimpleInfixOp);
node.* = .{
+ .base = Node{ .tag = .Range },
.op_token = token,
.lhs = expr,
- .op = .Range,
.rhs = range_end,
};
return &node.base;
@@ -2260,7 +2357,7 @@ const Parser = struct {
/// / EQUAL
fn parseAssignOp(p: *Parser) !?*Node {
const token = p.nextToken();
- const op: Node.InfixOp.Op = switch (p.token_ids[token]) {
+ const op: Node.Tag = switch (p.token_ids[token]) {
.AsteriskEqual => .AssignMul,
.SlashEqual => .AssignDiv,
.PercentEqual => .AssignMod,
@@ -2281,11 +2378,11 @@ const Parser = struct {
},
};
- const node = try p.arena.allocator.create(Node.InfixOp);
+ const node = try p.arena.allocator.create(Node.SimpleInfixOp);
node.* = .{
+ .base = .{ .tag = op },
.op_token = token,
.lhs = undefined, // set by caller
- .op = op,
.rhs = undefined, // set by caller
};
return &node.base;
@@ -2300,7 +2397,7 @@ const Parser = struct {
/// / RARROWEQUAL
fn parseCompareOp(p: *Parser) !?*Node {
const token = p.nextToken();
- const op: Node.InfixOp.Op = switch (p.token_ids[token]) {
+ const op: Node.Tag = switch (p.token_ids[token]) {
.EqualEqual => .EqualEqual,
.BangEqual => .BangEqual,
.AngleBracketLeft => .LessThan,
@@ -2324,12 +2421,22 @@ const Parser = struct {
/// / KEYWORD_catch Payload?
fn parseBitwiseOp(p: *Parser) !?*Node {
const token = p.nextToken();
- const op: Node.InfixOp.Op = switch (p.token_ids[token]) {
+ const op: Node.Tag = switch (p.token_ids[token]) {
.Ampersand => .BitAnd,
.Caret => .BitXor,
.Pipe => .BitOr,
.Keyword_orelse => .UnwrapOptional,
- .Keyword_catch => .{ .Catch = try p.parsePayload() },
+ .Keyword_catch => {
+ const payload = try p.parsePayload();
+ const node = try p.arena.allocator.create(Node.Catch);
+ node.* = .{
+ .op_token = token,
+ .lhs = undefined, // set by caller
+ .rhs = undefined, // set by caller
+ .payload = payload,
+ };
+ return &node.base;
+ },
else => {
p.putBackToken(token);
return null;
@@ -2344,7 +2451,7 @@ const Parser = struct {
/// / RARROW2
fn parseBitShiftOp(p: *Parser) !?*Node {
const token = p.nextToken();
- const op: Node.InfixOp.Op = switch (p.token_ids[token]) {
+ const op: Node.Tag = switch (p.token_ids[token]) {
.AngleBracketAngleBracketLeft => .BitShiftLeft,
.AngleBracketAngleBracketRight => .BitShiftRight,
else => {
@@ -2364,7 +2471,7 @@ const Parser = struct {
/// / MINUSPERCENT
fn parseAdditionOp(p: *Parser) !?*Node {
const token = p.nextToken();
- const op: Node.InfixOp.Op = switch (p.token_ids[token]) {
+ const op: Node.Tag = switch (p.token_ids[token]) {
.Plus => .Add,
.Minus => .Sub,
.PlusPlus => .ArrayCat,
@@ -2388,7 +2495,7 @@ const Parser = struct {
/// / ASTERISKPERCENT
fn parseMultiplyOp(p: *Parser) !?*Node {
const token = p.nextToken();
- const op: Node.InfixOp.Op = switch (p.token_ids[token]) {
+ const op: Node.Tag = switch (p.token_ids[token]) {
.PipePipe => .MergeErrorSets,
.Asterisk => .Mul,
.Slash => .Div,
@@ -2414,24 +2521,26 @@ const Parser = struct {
/// / KEYWORD_await
fn parsePrefixOp(p: *Parser) !?*Node {
const token = p.nextToken();
- const op: Node.PrefixOp.Op = switch (p.token_ids[token]) {
- .Bang => .BoolNot,
- .Minus => .Negation,
- .Tilde => .BitNot,
- .MinusPercent => .NegationWrap,
- .Ampersand => .AddressOf,
- .Keyword_try => .Try,
- .Keyword_await => .Await,
+ switch (p.token_ids[token]) {
+ .Bang => return p.allocSimplePrefixOp(.BoolNot, token),
+ .Minus => return p.allocSimplePrefixOp(.Negation, token),
+ .Tilde => return p.allocSimplePrefixOp(.BitNot, token),
+ .MinusPercent => return p.allocSimplePrefixOp(.NegationWrap, token),
+ .Ampersand => return p.allocSimplePrefixOp(.AddressOf, token),
+ .Keyword_try => return p.allocSimplePrefixOp(.Try, token),
+ .Keyword_await => return p.allocSimplePrefixOp(.Await, token),
else => {
p.putBackToken(token);
return null;
},
- };
+ }
+ }
- const node = try p.arena.allocator.create(Node.PrefixOp);
+ fn allocSimplePrefixOp(p: *Parser, comptime tag: Node.Tag, token: TokenIndex) !?*Node {
+ const node = try p.arena.allocator.create(Node.SimplePrefixOp);
node.* = .{
+ .base = .{ .tag = tag },
.op_token = token,
- .op = op,
.rhs = undefined, // set by caller
};
return &node.base;
@@ -2451,19 +2560,15 @@ const Parser = struct {
/// / PtrTypeStart (KEYWORD_align LPAREN Expr (COLON INTEGER COLON INTEGER)? RPAREN / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
fn parsePrefixTypeOp(p: *Parser) !?*Node {
if (p.eatToken(.QuestionMark)) |token| {
- const node = try p.arena.allocator.create(Node.PrefixOp);
+ const node = try p.arena.allocator.create(Node.SimplePrefixOp);
node.* = .{
+ .base = .{ .tag = .OptionalType },
.op_token = token,
- .op = .OptionalType,
.rhs = undefined, // set by caller
};
return &node.base;
}
- // TODO: Returning a AnyFrameType instead of PrefixOp makes casting and setting .rhs or
- // .return_type more difficult for the caller (see parsePrefixOpExpr helper).
- // Consider making the AnyFrameType a member of PrefixOp and add a
- // PrefixOp.AnyFrameType variant?
if (p.eatToken(.Keyword_anyframe)) |token| {
const arrow = p.eatToken(.Arrow) orelse {
p.putBackToken(token);
@@ -2483,11 +2588,15 @@ const Parser = struct {
if (try p.parsePtrTypeStart()) |node| {
// If the token encountered was **, there will be two nodes instead of one.
// The attributes should be applied to the rightmost operator.
- const prefix_op = node.cast(Node.PrefixOp).?;
- var ptr_info = if (p.token_ids[prefix_op.op_token] == .AsteriskAsterisk)
- &prefix_op.rhs.cast(Node.PrefixOp).?.op.PtrType
+ var ptr_info = if (node.cast(Node.PtrType)) |ptr_type|
+ if (p.token_ids[ptr_type.op_token] == .AsteriskAsterisk)
+ &ptr_type.rhs.cast(Node.PtrType).?.ptr_info
+ else
+ &ptr_type.ptr_info
+ else if (node.cast(Node.SliceType)) |slice_type|
+ &slice_type.ptr_info
else
- &prefix_op.op.PtrType;
+ unreachable;
while (true) {
if (p.eatToken(.Keyword_align)) |align_token| {
@@ -2506,7 +2615,7 @@ const Parser = struct {
.ExpectedIntegerLiteral = .{ .token = p.tok_i },
});
- break :bit_range_value Node.PrefixOp.PtrInfo.Align.BitRange{
+ break :bit_range_value ast.PtrInfo.Align.BitRange{
.start = range_start,
.end = range_end,
};
@@ -2520,7 +2629,7 @@ const Parser = struct {
continue;
}
- ptr_info.align_info = Node.PrefixOp.PtrInfo.Align{
+ ptr_info.align_info = ast.PtrInfo.Align{
.node = expr_node,
.bit_range = bit_range,
};
@@ -2564,58 +2673,54 @@ const Parser = struct {
}
if (try p.parseArrayTypeStart()) |node| {
- switch (node.cast(Node.PrefixOp).?.op) {
- .ArrayType => {},
- .SliceType => |*slice_type| {
- // Collect pointer qualifiers in any order, but disallow duplicates
- while (true) {
- if (try p.parseByteAlign()) |align_expr| {
- if (slice_type.align_info != null) {
- try p.errors.append(p.gpa, .{
- .ExtraAlignQualifier = .{ .token = p.tok_i - 1 },
- });
- continue;
- }
- slice_type.align_info = Node.PrefixOp.PtrInfo.Align{
- .node = align_expr,
- .bit_range = null,
- };
+ if (node.cast(Node.SliceType)) |slice_type| {
+ // Collect pointer qualifiers in any order, but disallow duplicates
+ while (true) {
+ if (try p.parseByteAlign()) |align_expr| {
+ if (slice_type.ptr_info.align_info != null) {
+ try p.errors.append(p.gpa, .{
+ .ExtraAlignQualifier = .{ .token = p.tok_i - 1 },
+ });
continue;
}
- if (p.eatToken(.Keyword_const)) |const_token| {
- if (slice_type.const_token != null) {
- try p.errors.append(p.gpa, .{
- .ExtraConstQualifier = .{ .token = p.tok_i - 1 },
- });
- continue;
- }
- slice_type.const_token = const_token;
+ slice_type.ptr_info.align_info = ast.PtrInfo.Align{
+ .node = align_expr,
+ .bit_range = null,
+ };
+ continue;
+ }
+ if (p.eatToken(.Keyword_const)) |const_token| {
+ if (slice_type.ptr_info.const_token != null) {
+ try p.errors.append(p.gpa, .{
+ .ExtraConstQualifier = .{ .token = p.tok_i - 1 },
+ });
continue;
}
- if (p.eatToken(.Keyword_volatile)) |volatile_token| {
- if (slice_type.volatile_token != null) {
- try p.errors.append(p.gpa, .{
- .ExtraVolatileQualifier = .{ .token = p.tok_i - 1 },
- });
- continue;
- }
- slice_type.volatile_token = volatile_token;
+ slice_type.ptr_info.const_token = const_token;
+ continue;
+ }
+ if (p.eatToken(.Keyword_volatile)) |volatile_token| {
+ if (slice_type.ptr_info.volatile_token != null) {
+ try p.errors.append(p.gpa, .{
+ .ExtraVolatileQualifier = .{ .token = p.tok_i - 1 },
+ });
continue;
}
- if (p.eatToken(.Keyword_allowzero)) |allowzero_token| {
- if (slice_type.allowzero_token != null) {
- try p.errors.append(p.gpa, .{
- .ExtraAllowZeroQualifier = .{ .token = p.tok_i - 1 },
- });
- continue;
- }
- slice_type.allowzero_token = allowzero_token;
+ slice_type.ptr_info.volatile_token = volatile_token;
+ continue;
+ }
+ if (p.eatToken(.Keyword_allowzero)) |allowzero_token| {
+ if (slice_type.ptr_info.allowzero_token != null) {
+ try p.errors.append(p.gpa, .{
+ .ExtraAllowZeroQualifier = .{ .token = p.tok_i - 1 },
+ });
continue;
}
- break;
+ slice_type.ptr_info.allowzero_token = allowzero_token;
+ continue;
}
- },
- else => unreachable,
+ break;
+ }
}
return node;
}
@@ -2669,14 +2774,14 @@ const Parser = struct {
if (p.eatToken(.Period)) |period| {
if (try p.parseIdentifier()) |identifier| {
- // TODO: It's a bit weird to return an InfixOp from the SuffixOp parser.
+ // TODO: It's a bit weird to return a SimpleInfixOp from the SuffixOp parser.
// Should there be an Node.SuffixOp.FieldAccess variant? Or should
// this grammar rule be altered?
- const node = try p.arena.allocator.create(Node.InfixOp);
+ const node = try p.arena.allocator.create(Node.SimpleInfixOp);
node.* = .{
+ .base = Node{ .tag = .Period },
.op_token = period,
.lhs = undefined, // set by caller
- .op = .Period,
.rhs = identifier,
};
return &node.base;
@@ -2729,29 +2834,32 @@ const Parser = struct {
null;
const rbracket = try p.expectToken(.RBracket);
- const op: Node.PrefixOp.Op = if (expr) |len_expr|
- .{
- .ArrayType = .{
+ if (expr) |len_expr| {
+ if (sentinel) |s| {
+ const node = try p.arena.allocator.create(Node.ArrayTypeSentinel);
+ node.* = .{
+ .op_token = lbracket,
+ .rhs = undefined, // set by caller
.len_expr = len_expr,
- .sentinel = sentinel,
- },
+ .sentinel = s,
+ };
+ return &node.base;
+ } else {
+ const node = try p.arena.allocator.create(Node.ArrayType);
+ node.* = .{
+ .op_token = lbracket,
+ .rhs = undefined, // set by caller
+ .len_expr = len_expr,
+ };
+ return &node.base;
}
- else
- .{
- .SliceType = Node.PrefixOp.PtrInfo{
- .allowzero_token = null,
- .align_info = null,
- .const_token = null,
- .volatile_token = null,
- .sentinel = sentinel,
- },
- };
+ }
- const node = try p.arena.allocator.create(Node.PrefixOp);
+ const node = try p.arena.allocator.create(Node.SliceType);
node.* = .{
.op_token = lbracket,
- .op = op,
.rhs = undefined, // set by caller
+ .ptr_info = .{ .sentinel = sentinel },
};
return &node.base;
}
@@ -2769,28 +2877,26 @@ const Parser = struct {
})
else
null;
- const node = try p.arena.allocator.create(Node.PrefixOp);
+ const node = try p.arena.allocator.create(Node.PtrType);
node.* = .{
.op_token = asterisk,
- .op = .{ .PtrType = .{ .sentinel = sentinel } },
.rhs = undefined, // set by caller
+ .ptr_info = .{ .sentinel = sentinel },
};
return &node.base;
}
if (p.eatToken(.AsteriskAsterisk)) |double_asterisk| {
- const node = try p.arena.allocator.create(Node.PrefixOp);
+ const node = try p.arena.allocator.create(Node.PtrType);
node.* = .{
.op_token = double_asterisk,
- .op = .{ .PtrType = .{} },
.rhs = undefined, // set by caller
};
// Special case for **, which is its own token
- const child = try p.arena.allocator.create(Node.PrefixOp);
+ const child = try p.arena.allocator.create(Node.PtrType);
child.* = .{
.op_token = double_asterisk,
- .op = .{ .PtrType = .{} },
.rhs = undefined, // set by caller
};
node.rhs = &child.base;
@@ -2809,10 +2915,9 @@ const Parser = struct {
p.putBackToken(ident);
} else {
_ = try p.expectToken(.RBracket);
- const node = try p.arena.allocator.create(Node.PrefixOp);
+ const node = try p.arena.allocator.create(Node.PtrType);
node.* = .{
.op_token = lbracket,
- .op = .{ .PtrType = .{} },
.rhs = undefined, // set by caller
};
return &node.base;
@@ -2825,11 +2930,11 @@ const Parser = struct {
else
null;
_ = try p.expectToken(.RBracket);
- const node = try p.arena.allocator.create(Node.PrefixOp);
+ const node = try p.arena.allocator.create(Node.PtrType);
node.* = .{
.op_token = lbracket,
- .op = .{ .PtrType = .{ .sentinel = sentinel } },
.rhs = undefined, // set by caller
+ .ptr_info = .{ .sentinel = sentinel },
};
return &node.base;
}
@@ -2956,7 +3061,7 @@ const Parser = struct {
const NodeParseFn = fn (p: *Parser) Error!?*Node;
- fn ListParseFn(comptime E: type, comptime nodeParseFn: var) ParseFn([]E) {
+ fn ListParseFn(comptime E: type, comptime nodeParseFn: anytype) ParseFn([]E) {
return struct {
pub fn parse(p: *Parser) ![]E {
var list = std.ArrayList(E).init(p.gpa);
@@ -2983,7 +3088,7 @@ const Parser = struct {
}.parse;
}
- fn SimpleBinOpParseFn(comptime token: Token.Id, comptime op: Node.InfixOp.Op) NodeParseFn {
+ fn SimpleBinOpParseFn(comptime token: Token.Id, comptime op: Node.Tag) NodeParseFn {
return struct {
pub fn parse(p: *Parser) Error!?*Node {
const op_token = if (token == .Keyword_and) switch (p.token_ids[p.tok_i]) {
@@ -2997,11 +3102,11 @@ const Parser = struct {
else => return null,
} else p.eatToken(token) orelse return null;
- const node = try p.arena.allocator.create(Node.InfixOp);
+ const node = try p.arena.allocator.create(Node.SimpleInfixOp);
node.* = .{
+ .base = .{ .tag = op },
.op_token = op_token,
.lhs = undefined, // set by caller
- .op = op,
.rhs = undefined, // set by caller
};
return &node.base;
@@ -3058,9 +3163,10 @@ const Parser = struct {
return &node.base;
}
- fn parseVarType(p: *Parser) !?*Node {
- const token = p.eatToken(.Keyword_var) orelse return null;
- const node = try p.arena.allocator.create(Node.VarType);
+ fn parseAnyType(p: *Parser) !?*Node {
+ const token = p.eatToken(.Keyword_anytype) orelse
+ p.eatToken(.Keyword_var) orelse return null; // TODO remove in next release cycle
+ const node = try p.arena.allocator.create(Node.AnyType);
node.* = .{
.token = token,
};
@@ -3070,7 +3176,6 @@ const Parser = struct {
fn createLiteral(p: *Parser, comptime T: type, token: TokenIndex) !*Node {
const result = try p.arena.allocator.create(T);
result.* = T{
- .base = Node{ .id = Node.typeToId(T) },
.token = token,
};
return &result.base;
@@ -3146,30 +3251,15 @@ const Parser = struct {
fn parseTry(p: *Parser) !?*Node {
const token = p.eatToken(.Keyword_try) orelse return null;
- const node = try p.arena.allocator.create(Node.PrefixOp);
+ const node = try p.arena.allocator.create(Node.SimplePrefixOp);
node.* = .{
+ .base = .{ .tag = .Try },
.op_token = token,
- .op = .Try,
.rhs = undefined, // set by caller
};
return &node.base;
}
- fn parseUse(p: *Parser) !?*Node {
- const token = p.eatToken(.Keyword_usingnamespace) orelse return null;
- const node = try p.arena.allocator.create(Node.Use);
- node.* = .{
- .doc_comments = null,
- .visib_token = null,
- .use_token = token,
- .expr = try p.expectNode(parseExpr, .{
- .ExpectedExpr = .{ .token = p.tok_i },
- }),
- .semicolon_token = try p.expectToken(.Semicolon),
- };
- return &node.base;
- }
-
/// IfPrefix Body (KEYWORD_else Payload? Body)?
fn parseIf(p: *Parser, bodyParseFn: NodeParseFn) !?*Node {
const node = (try p.parseIfPrefix()) orelse return null;
@@ -3223,20 +3313,53 @@ const Parser = struct {
}
/// Op* Child
- fn parsePrefixOpExpr(p: *Parser, opParseFn: NodeParseFn, childParseFn: NodeParseFn) Error!?*Node {
+ fn parsePrefixOpExpr(p: *Parser, comptime opParseFn: NodeParseFn, comptime childParseFn: NodeParseFn) Error!?*Node {
if (try opParseFn(p)) |first_op| {
var rightmost_op = first_op;
while (true) {
- switch (rightmost_op.id) {
- .PrefixOp => {
- var prefix_op = rightmost_op.cast(Node.PrefixOp).?;
+ switch (rightmost_op.tag) {
+ .AddressOf,
+ .Await,
+ .BitNot,
+ .BoolNot,
+ .OptionalType,
+ .Negation,
+ .NegationWrap,
+ .Resume,
+ .Try,
+ => {
+ if (try opParseFn(p)) |rhs| {
+ rightmost_op.cast(Node.SimplePrefixOp).?.rhs = rhs;
+ rightmost_op = rhs;
+ } else break;
+ },
+ .ArrayType => {
+ if (try opParseFn(p)) |rhs| {
+ rightmost_op.cast(Node.ArrayType).?.rhs = rhs;
+ rightmost_op = rhs;
+ } else break;
+ },
+ .ArrayTypeSentinel => {
+ if (try opParseFn(p)) |rhs| {
+ rightmost_op.cast(Node.ArrayTypeSentinel).?.rhs = rhs;
+ rightmost_op = rhs;
+ } else break;
+ },
+ .SliceType => {
+ if (try opParseFn(p)) |rhs| {
+ rightmost_op.cast(Node.SliceType).?.rhs = rhs;
+ rightmost_op = rhs;
+ } else break;
+ },
+ .PtrType => {
+ var ptr_type = rightmost_op.cast(Node.PtrType).?;
// If the token encountered was **, there will be two nodes
- if (p.token_ids[prefix_op.op_token] == .AsteriskAsterisk) {
- rightmost_op = prefix_op.rhs;
- prefix_op = rightmost_op.cast(Node.PrefixOp).?;
+ if (p.token_ids[ptr_type.op_token] == .AsteriskAsterisk) {
+ rightmost_op = ptr_type.rhs;
+ ptr_type = rightmost_op.cast(Node.PtrType).?;
}
if (try opParseFn(p)) |rhs| {
- prefix_op.rhs = rhs;
+ ptr_type.rhs = rhs;
rightmost_op = rhs;
} else break;
},
@@ -3252,9 +3375,42 @@ const Parser = struct {
}
// If any prefix op existed, a child node on the RHS is required
- switch (rightmost_op.id) {
- .PrefixOp => {
- const prefix_op = rightmost_op.cast(Node.PrefixOp).?;
+ switch (rightmost_op.tag) {
+ .AddressOf,
+ .Await,
+ .BitNot,
+ .BoolNot,
+ .OptionalType,
+ .Negation,
+ .NegationWrap,
+ .Resume,
+ .Try,
+ => {
+ const prefix_op = rightmost_op.cast(Node.SimplePrefixOp).?;
+ prefix_op.rhs = try p.expectNode(childParseFn, .{
+ .InvalidToken = .{ .token = p.tok_i },
+ });
+ },
+ .ArrayType => {
+ const prefix_op = rightmost_op.cast(Node.ArrayType).?;
+ prefix_op.rhs = try p.expectNode(childParseFn, .{
+ .InvalidToken = .{ .token = p.tok_i },
+ });
+ },
+ .ArrayTypeSentinel => {
+ const prefix_op = rightmost_op.cast(Node.ArrayTypeSentinel).?;
+ prefix_op.rhs = try p.expectNode(childParseFn, .{
+ .InvalidToken = .{ .token = p.tok_i },
+ });
+ },
+ .PtrType => {
+ const prefix_op = rightmost_op.cast(Node.PtrType).?;
+ prefix_op.rhs = try p.expectNode(childParseFn, .{
+ .InvalidToken = .{ .token = p.tok_i },
+ });
+ },
+ .SliceType => {
+ const prefix_op = rightmost_op.cast(Node.SliceType).?;
prefix_op.rhs = try p.expectNode(childParseFn, .{
.InvalidToken = .{ .token = p.tok_i },
});
@@ -3295,9 +3451,13 @@ const Parser = struct {
const left = res;
res = node;
- const op = node.cast(Node.InfixOp).?;
- op.*.lhs = left;
- op.*.rhs = right;
+ if (node.castTag(.Catch)) |op| {
+ op.lhs = left;
+ op.rhs = right;
+ } else if (node.cast(Node.SimpleInfixOp)) |op| {
+ op.lhs = left;
+ op.rhs = right;
+ }
switch (chain) {
.Once => break,
@@ -3308,12 +3468,12 @@ const Parser = struct {
return res;
}
- fn createInfixOp(p: *Parser, index: TokenIndex, op: Node.InfixOp.Op) !*Node {
- const node = try p.arena.allocator.create(Node.InfixOp);
+ fn createInfixOp(p: *Parser, op_token: TokenIndex, tag: Node.Tag) !*Node {
+ const node = try p.arena.allocator.create(Node.SimpleInfixOp);
node.* = .{
- .op_token = index,
+ .base = Node{ .tag = tag },
+ .op_token = op_token,
.lhs = undefined, // set by caller
- .op = op,
.rhs = undefined, // set by caller
};
return &node.base;
diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig
index c8e7abd5cb..aa0a76c8a5 100644
--- a/lib/std/zig/parser_test.zig
+++ b/lib/std/zig/parser_test.zig
@@ -1,4 +1,32 @@
-const builtin = @import("builtin");
+test "zig fmt: convert var to anytype" {
+ // TODO remove in next release cycle
+ try testTransform(
+ \\pub fn main(
+ \\ a: var,
+ \\ bar: var,
+ \\) void {}
+ ,
+ \\pub fn main(
+ \\ a: anytype,
+ \\ bar: anytype,
+ \\) void {}
+ \\
+ );
+}
+
+test "zig fmt: noasync to nosuspend" {
+ // TODO: remove this
+ try testTransform(
+ \\pub fn main() void {
+ \\ noasync call();
+ \\}
+ ,
+ \\pub fn main() void {
+ \\ nosuspend call();
+ \\}
+ \\
+ );
+}
test "recovery: top level" {
try testError(
@@ -422,10 +450,10 @@ test "zig fmt: asm expression with comptime content" {
);
}
-test "zig fmt: var struct field" {
+test "zig fmt: anytype struct field" {
try testCanonical(
\\pub const Pointer = struct {
- \\ sentinel: var,
+ \\ sentinel: anytype,
\\};
\\
);
@@ -1932,7 +1960,7 @@ test "zig fmt: preserve spacing" {
test "zig fmt: return types" {
try testCanonical(
\\pub fn main() !void {}
- \\pub fn main() var {}
+ \\pub fn main() anytype {}
\\pub fn main() i32 {}
\\
);
@@ -2140,9 +2168,9 @@ test "zig fmt: call expression" {
);
}
-test "zig fmt: var type" {
+test "zig fmt: anytype type" {
try testCanonical(
- \\fn print(args: var) var {}
+ \\fn print(args: anytype) anytype {}
\\
);
}
@@ -3146,20 +3174,6 @@ test "zig fmt: hexadeciaml float literals with underscore separators" {
);
}
-test "zig fmt: noasync to nosuspend" {
- // TODO: remove this
- try testTransform(
- \\pub fn main() void {
- \\ noasync call();
- \\}
- ,
- \\pub fn main() void {
- \\ nosuspend call();
- \\}
- \\
- );
-}
-
test "zig fmt: convert async fn into callconv(.Async)" {
try testTransform(
\\async fn foo() void {}
@@ -3180,6 +3194,13 @@ test "zig fmt: convert extern fn proto into callconv(.C)" {
);
}
+test "zig fmt: C var args" {
+ try testCanonical(
+ \\pub extern "c" fn printf(format: [*:0]const u8, ...) c_int;
+ \\
+ );
+}
+
const std = @import("std");
const mem = std.mem;
const warn = std.debug.warn;
diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig
index 1a30e46ee0..7f8a18299b 100644
--- a/lib/std/zig/render.zig
+++ b/lib/std/zig/render.zig
@@ -12,7 +12,7 @@ pub const Error = error{
};
/// Returns whether anything changed
-pub fn render(allocator: *mem.Allocator, stream: var, tree: *ast.Tree) (@TypeOf(stream).Error || Error)!bool {
+pub fn render(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree) (@TypeOf(stream).Error || Error)!bool {
// cannot render an invalid tree
std.debug.assert(tree.errors.len == 0);
@@ -64,7 +64,7 @@ pub fn render(allocator: *mem.Allocator, stream: var, tree: *ast.Tree) (@TypeOf(
fn renderRoot(
allocator: *mem.Allocator,
- stream: var,
+ stream: anytype,
tree: *ast.Tree,
) (@TypeOf(stream).Error || Error)!void {
// render all the line comments at the beginning of the file
@@ -191,13 +191,13 @@ fn renderRoot(
}
}
-fn renderExtraNewline(tree: *ast.Tree, stream: var, start_col: *usize, node: *ast.Node) @TypeOf(stream).Error!void {
+fn renderExtraNewline(tree: *ast.Tree, stream: anytype, start_col: *usize, node: *ast.Node) @TypeOf(stream).Error!void {
return renderExtraNewlineToken(tree, stream, start_col, node.firstToken());
}
fn renderExtraNewlineToken(
tree: *ast.Tree,
- stream: var,
+ stream: anytype,
start_col: *usize,
first_token: ast.TokenIndex,
) @TypeOf(stream).Error!void {
@@ -218,18 +218,18 @@ fn renderExtraNewlineToken(
}
}
-fn renderTopLevelDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, indent: usize, start_col: *usize, decl: *ast.Node) (@TypeOf(stream).Error || Error)!void {
+fn renderTopLevelDecl(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree, indent: usize, start_col: *usize, decl: *ast.Node) (@TypeOf(stream).Error || Error)!void {
try renderContainerDecl(allocator, stream, tree, indent, start_col, decl, .Newline);
}
-fn renderContainerDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, indent: usize, start_col: *usize, decl: *ast.Node, space: Space) (@TypeOf(stream).Error || Error)!void {
- switch (decl.id) {
+fn renderContainerDecl(allocator: *mem.Allocator, stream: anytype, tree: *ast.Tree, indent: usize, start_col: *usize, decl: *ast.Node, space: Space) (@TypeOf(stream).Error || Error)!void {
+ switch (decl.tag) {
.FnProto => {
const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl);
- try renderDocComments(tree, stream, fn_proto, indent, start_col);
+ try renderDocComments(tree, stream, fn_proto, fn_proto.getTrailer("doc_comments"), indent, start_col);
- if (fn_proto.body_node) |body_node| {
+ if (fn_proto.getTrailer("body_node")) |body_node| {
try renderExpression(allocator, stream, tree, indent, start_col, decl, .Space);
try renderExpression(allocator, stream, tree, indent, start_col, body_node, space);
} else {
@@ -252,14 +252,14 @@ fn renderContainerDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree,
.VarDecl => {
const var_decl = @fieldParentPtr(ast.Node.VarDecl, "base", decl);
- try renderDocComments(tree, stream, var_decl, indent, start_col);
+ try renderDocComments(tree, stream, var_decl, var_decl.getTrailer("doc_comments"), indent, start_col);
try renderVarDecl(allocator, stream, tree, indent, start_col, var_decl);
},
.TestDecl => {
const test_decl = @fieldParentPtr(ast.Node.TestDecl, "base", decl);
- try renderDocComments(tree, stream, test_decl, indent, start_col);
+ try renderDocComments(tree, stream, test_decl, test_decl.doc_comments, indent, start_col);
try renderToken(tree, stream, test_decl.test_token, indent, start_col, .Space);
try renderExpression(allocator, stream, tree, indent, start_col, test_decl.name, .Space);
try renderExpression(allocator, stream, tree, indent, start_col, test_decl.body_node, space);
@@ -268,7 +268,7 @@ fn renderContainerDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree,
.ContainerField => {
const field = @fieldParentPtr(ast.Node.ContainerField, "base", decl);
- try renderDocComments(tree, stream, field, indent, start_col);
+ try renderDocComments(tree, stream, field, field.doc_comments, indent, start_col);
if (field.comptime_token) |t| {
try renderToken(tree, stream, t, indent, start_col, .Space); // comptime
}
@@ -358,14 +358,14 @@ fn renderContainerDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree,
fn renderExpression(
allocator: *mem.Allocator,
- stream: var,
+ stream: anytype,
tree: *ast.Tree,
indent: usize,
start_col: *usize,
base: *ast.Node,
space: Space,
) (@TypeOf(stream).Error || Error)!void {
- switch (base.id) {
+ switch (base.tag) {
.Identifier => {
const identifier = @fieldParentPtr(ast.Node.Identifier, "base", base);
return renderToken(tree, stream, identifier.token, indent, start_col, space);
@@ -436,13 +436,10 @@ fn renderExpression(
}
},
- .InfixOp => {
- const infix_op_node = @fieldParentPtr(ast.Node.InfixOp, "base", base);
+ .Catch => {
+ const infix_op_node = @fieldParentPtr(ast.Node.Catch, "base", base);
- const op_space = switch (infix_op_node.op) {
- ast.Node.InfixOp.Op.Period, ast.Node.InfixOp.Op.ErrorUnion, ast.Node.InfixOp.Op.Range => Space.None,
- else => Space.Space,
- };
+ const op_space = Space.Space;
try renderExpression(allocator, stream, tree, indent, start_col, infix_op_node.lhs, op_space);
const after_op_space = blk: {
@@ -458,182 +455,247 @@ fn renderExpression(
start_col.* = indent + indent_delta;
}
- switch (infix_op_node.op) {
- ast.Node.InfixOp.Op.Catch => |maybe_payload| if (maybe_payload) |payload| {
- try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space);
- },
- else => {},
+ if (infix_op_node.payload) |payload| {
+ try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space);
}
return renderExpression(allocator, stream, tree, indent, start_col, infix_op_node.rhs, space);
},
- .PrefixOp => {
- const prefix_op_node = @fieldParentPtr(ast.Node.PrefixOp, "base", base);
-
- switch (prefix_op_node.op) {
- .PtrType => |ptr_info| {
- const op_tok_id = tree.token_ids[prefix_op_node.op_token];
- switch (op_tok_id) {
- .Asterisk, .AsteriskAsterisk => try stream.writeByte('*'),
- .LBracket => if (tree.token_ids[prefix_op_node.op_token + 2] == .Identifier)
- try stream.writeAll("[*c")
- else
- try stream.writeAll("[*"),
- else => unreachable,
- }
- if (ptr_info.sentinel) |sentinel| {
- const colon_token = tree.prevToken(sentinel.firstToken());
- try renderToken(tree, stream, colon_token, indent, start_col, Space.None); // :
- const sentinel_space = switch (op_tok_id) {
- .LBracket => Space.None,
- else => Space.Space,
- };
- try renderExpression(allocator, stream, tree, indent, start_col, sentinel, sentinel_space);
- }
- switch (op_tok_id) {
- .Asterisk, .AsteriskAsterisk => {},
- .LBracket => try stream.writeByte(']'),
- else => unreachable,
- }
- if (ptr_info.allowzero_token) |allowzero_token| {
- try renderToken(tree, stream, allowzero_token, indent, start_col, Space.Space); // allowzero
- }
- if (ptr_info.align_info) |align_info| {
- const lparen_token = tree.prevToken(align_info.node.firstToken());
- const align_token = tree.prevToken(lparen_token);
+ .Add,
+ .AddWrap,
+ .ArrayCat,
+ .ArrayMult,
+ .Assign,
+ .AssignBitAnd,
+ .AssignBitOr,
+ .AssignBitShiftLeft,
+ .AssignBitShiftRight,
+ .AssignBitXor,
+ .AssignDiv,
+ .AssignSub,
+ .AssignSubWrap,
+ .AssignMod,
+ .AssignAdd,
+ .AssignAddWrap,
+ .AssignMul,
+ .AssignMulWrap,
+ .BangEqual,
+ .BitAnd,
+ .BitOr,
+ .BitShiftLeft,
+ .BitShiftRight,
+ .BitXor,
+ .BoolAnd,
+ .BoolOr,
+ .Div,
+ .EqualEqual,
+ .ErrorUnion,
+ .GreaterOrEqual,
+ .GreaterThan,
+ .LessOrEqual,
+ .LessThan,
+ .MergeErrorSets,
+ .Mod,
+ .Mul,
+ .MulWrap,
+ .Period,
+ .Range,
+ .Sub,
+ .SubWrap,
+ .UnwrapOptional,
+ => {
+ const infix_op_node = @fieldParentPtr(ast.Node.SimpleInfixOp, "base", base);
+
+ const op_space = switch (base.tag) {
+ .Period, .ErrorUnion, .Range => Space.None,
+ else => Space.Space,
+ };
+ try renderExpression(allocator, stream, tree, indent, start_col, infix_op_node.lhs, op_space);
- try renderToken(tree, stream, align_token, indent, start_col, Space.None); // align
- try renderToken(tree, stream, lparen_token, indent, start_col, Space.None); // (
+ const after_op_space = blk: {
+ const loc = tree.tokenLocation(tree.token_locs[infix_op_node.op_token].end, tree.nextToken(infix_op_node.op_token));
+ break :blk if (loc.line == 0) op_space else Space.Newline;
+ };
- try renderExpression(allocator, stream, tree, indent, start_col, align_info.node, Space.None);
+ try renderToken(tree, stream, infix_op_node.op_token, indent, start_col, after_op_space);
+ if (after_op_space == Space.Newline and
+ tree.token_ids[tree.nextToken(infix_op_node.op_token)] != .MultilineStringLiteralLine)
+ {
+ try stream.writeByteNTimes(' ', indent + indent_delta);
+ start_col.* = indent + indent_delta;
+ }
- if (align_info.bit_range) |bit_range| {
- const colon1 = tree.prevToken(bit_range.start.firstToken());
- const colon2 = tree.prevToken(bit_range.end.firstToken());
+ return renderExpression(allocator, stream, tree, indent, start_col, infix_op_node.rhs, space);
+ },
- try renderToken(tree, stream, colon1, indent, start_col, Space.None); // :
- try renderExpression(allocator, stream, tree, indent, start_col, bit_range.start, Space.None);
- try renderToken(tree, stream, colon2, indent, start_col, Space.None); // :
- try renderExpression(allocator, stream, tree, indent, start_col, bit_range.end, Space.None);
+ .BitNot,
+ .BoolNot,
+ .Negation,
+ .NegationWrap,
+ .OptionalType,
+ .AddressOf,
+ => {
+ const casted_node = @fieldParentPtr(ast.Node.SimplePrefixOp, "base", base);
+ try renderToken(tree, stream, casted_node.op_token, indent, start_col, Space.None);
+ return renderExpression(allocator, stream, tree, indent, start_col, casted_node.rhs, space);
+ },
- const rparen_token = tree.nextToken(bit_range.end.lastToken());
- try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // )
- } else {
- const rparen_token = tree.nextToken(align_info.node.lastToken());
- try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // )
- }
- }
- if (ptr_info.const_token) |const_token| {
- try renderToken(tree, stream, const_token, indent, start_col, Space.Space); // const
- }
- if (ptr_info.volatile_token) |volatile_token| {
- try renderToken(tree, stream, volatile_token, indent, start_col, Space.Space); // volatile
- }
- },
+ .Try,
+ .Resume,
+ .Await,
+ => {
+ const casted_node = @fieldParentPtr(ast.Node.SimplePrefixOp, "base", base);
+ try renderToken(tree, stream, casted_node.op_token, indent, start_col, Space.Space);
+ return renderExpression(allocator, stream, tree, indent, start_col, casted_node.rhs, space);
+ },
- .SliceType => |ptr_info| {
- try renderToken(tree, stream, prefix_op_node.op_token, indent, start_col, Space.None); // [
- if (ptr_info.sentinel) |sentinel| {
- const colon_token = tree.prevToken(sentinel.firstToken());
- try renderToken(tree, stream, colon_token, indent, start_col, Space.None); // :
- try renderExpression(allocator, stream, tree, indent, start_col, sentinel, Space.None);
- try renderToken(tree, stream, tree.nextToken(sentinel.lastToken()), indent, start_col, Space.None); // ]
- } else {
- try renderToken(tree, stream, tree.nextToken(prefix_op_node.op_token), indent, start_col, Space.None); // ]
- }
+ .ArrayType => {
+ const array_type = @fieldParentPtr(ast.Node.ArrayType, "base", base);
+ return renderArrayType(
+ allocator,
+ stream,
+ tree,
+ indent,
+ start_col,
+ array_type.op_token,
+ array_type.rhs,
+ array_type.len_expr,
+ null,
+ space,
+ );
+ },
+ .ArrayTypeSentinel => {
+ const array_type = @fieldParentPtr(ast.Node.ArrayTypeSentinel, "base", base);
+ return renderArrayType(
+ allocator,
+ stream,
+ tree,
+ indent,
+ start_col,
+ array_type.op_token,
+ array_type.rhs,
+ array_type.len_expr,
+ array_type.sentinel,
+ space,
+ );
+ },
- if (ptr_info.allowzero_token) |allowzero_token| {
- try renderToken(tree, stream, allowzero_token, indent, start_col, Space.Space); // allowzero
- }
- if (ptr_info.align_info) |align_info| {
- const lparen_token = tree.prevToken(align_info.node.firstToken());
- const align_token = tree.prevToken(lparen_token);
+ .PtrType => {
+ const ptr_type = @fieldParentPtr(ast.Node.PtrType, "base", base);
+ const op_tok_id = tree.token_ids[ptr_type.op_token];
+ switch (op_tok_id) {
+ .Asterisk, .AsteriskAsterisk => try stream.writeByte('*'),
+ .LBracket => if (tree.token_ids[ptr_type.op_token + 2] == .Identifier)
+ try stream.writeAll("[*c")
+ else
+ try stream.writeAll("[*"),
+ else => unreachable,
+ }
+ if (ptr_type.ptr_info.sentinel) |sentinel| {
+ const colon_token = tree.prevToken(sentinel.firstToken());
+ try renderToken(tree, stream, colon_token, indent, start_col, Space.None); // :
+ const sentinel_space = switch (op_tok_id) {
+ .LBracket => Space.None,
+ else => Space.Space,
+ };
+ try renderExpression(allocator, stream, tree, indent, start_col, sentinel, sentinel_space);
+ }
+ switch (op_tok_id) {
+ .Asterisk, .AsteriskAsterisk => {},
+ .LBracket => try stream.writeByte(']'),
+ else => unreachable,
+ }
+ if (ptr_type.ptr_info.allowzero_token) |allowzero_token| {
+ try renderToken(tree, stream, allowzero_token, indent, start_col, Space.Space); // allowzero
+ }
+ if (ptr_type.ptr_info.align_info) |align_info| {
+ const lparen_token = tree.prevToken(align_info.node.firstToken());
+ const align_token = tree.prevToken(lparen_token);
- try renderToken(tree, stream, align_token, indent, start_col, Space.None); // align
- try renderToken(tree, stream, lparen_token, indent, start_col, Space.None); // (
+ try renderToken(tree, stream, align_token, indent, start_col, Space.None); // align
+ try renderToken(tree, stream, lparen_token, indent, start_col, Space.None); // (
- try renderExpression(allocator, stream, tree, indent, start_col, align_info.node, Space.None);
+ try renderExpression(allocator, stream, tree, indent, start_col, align_info.node, Space.None);
- if (align_info.bit_range) |bit_range| {
- const colon1 = tree.prevToken(bit_range.start.firstToken());
- const colon2 = tree.prevToken(bit_range.end.firstToken());
+ if (align_info.bit_range) |bit_range| {
+ const colon1 = tree.prevToken(bit_range.start.firstToken());
+ const colon2 = tree.prevToken(bit_range.end.firstToken());
- try renderToken(tree, stream, colon1, indent, start_col, Space.None); // :
- try renderExpression(allocator, stream, tree, indent, start_col, bit_range.start, Space.None);
- try renderToken(tree, stream, colon2, indent, start_col, Space.None); // :
- try renderExpression(allocator, stream, tree, indent, start_col, bit_range.end, Space.None);
+ try renderToken(tree, stream, colon1, indent, start_col, Space.None); // :
+ try renderExpression(allocator, stream, tree, indent, start_col, bit_range.start, Space.None);
+ try renderToken(tree, stream, colon2, indent, start_col, Space.None); // :
+ try renderExpression(allocator, stream, tree, indent, start_col, bit_range.end, Space.None);
- const rparen_token = tree.nextToken(bit_range.end.lastToken());
- try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // )
- } else {
- const rparen_token = tree.nextToken(align_info.node.lastToken());
- try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // )
- }
- }
- if (ptr_info.const_token) |const_token| {
- try renderToken(tree, stream, const_token, indent, start_col, Space.Space);
- }
- if (ptr_info.volatile_token) |volatile_token| {
- try renderToken(tree, stream, volatile_token, indent, start_col, Space.Space);
- }
- },
+ const rparen_token = tree.nextToken(bit_range.end.lastToken());
+ try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // )
+ } else {
+ const rparen_token = tree.nextToken(align_info.node.lastToken());
+ try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // )
+ }
+ }
+ if (ptr_type.ptr_info.const_token) |const_token| {
+ try renderToken(tree, stream, const_token, indent, start_col, Space.Space); // const
+ }
+ if (ptr_type.ptr_info.volatile_token) |volatile_token| {
+ try renderToken(tree, stream, volatile_token, indent, start_col, Space.Space); // volatile
+ }
+ return renderExpression(allocator, stream, tree, indent, start_col, ptr_type.rhs, space);
+ },
- .ArrayType => |array_info| {
- const lbracket = prefix_op_node.op_token;
- const rbracket = tree.nextToken(if (array_info.sentinel) |sentinel|
- sentinel.lastToken()
- else
- array_info.len_expr.lastToken());
+ .SliceType => {
+ const slice_type = @fieldParentPtr(ast.Node.SliceType, "base", base);
+ try renderToken(tree, stream, slice_type.op_token, indent, start_col, Space.None); // [
+ if (slice_type.ptr_info.sentinel) |sentinel| {
+ const colon_token = tree.prevToken(sentinel.firstToken());
+ try renderToken(tree, stream, colon_token, indent, start_col, Space.None); // :
+ try renderExpression(allocator, stream, tree, indent, start_col, sentinel, Space.None);
+ try renderToken(tree, stream, tree.nextToken(sentinel.lastToken()), indent, start_col, Space.None); // ]
+ } else {
+ try renderToken(tree, stream, tree.nextToken(slice_type.op_token), indent, start_col, Space.None); // ]
+ }
- try renderToken(tree, stream, lbracket, indent, start_col, Space.None); // [
+ if (slice_type.ptr_info.allowzero_token) |allowzero_token| {
+ try renderToken(tree, stream, allowzero_token, indent, start_col, Space.Space); // allowzero
+ }
+ if (slice_type.ptr_info.align_info) |align_info| {
+ const lparen_token = tree.prevToken(align_info.node.firstToken());
+ const align_token = tree.prevToken(lparen_token);
- const starts_with_comment = tree.token_ids[lbracket + 1] == .LineComment;
- const ends_with_comment = tree.token_ids[rbracket - 1] == .LineComment;
- const new_indent = if (ends_with_comment) indent + indent_delta else indent;
- const new_space = if (ends_with_comment) Space.Newline else Space.None;
- try renderExpression(allocator, stream, tree, new_indent, start_col, array_info.len_expr, new_space);
- if (starts_with_comment) {
- try stream.writeByte('\n');
- }
- if (ends_with_comment or starts_with_comment) {
- try stream.writeByteNTimes(' ', indent);
- }
- if (array_info.sentinel) |sentinel| {
- const colon_token = tree.prevToken(sentinel.firstToken());
- try renderToken(tree, stream, colon_token, indent, start_col, Space.None); // :
- try renderExpression(allocator, stream, tree, indent, start_col, sentinel, Space.None);
- }
- try renderToken(tree, stream, rbracket, indent, start_col, Space.None); // ]
- },
- .BitNot,
- .BoolNot,
- .Negation,
- .NegationWrap,
- .OptionalType,
- .AddressOf,
- => {
- try renderToken(tree, stream, prefix_op_node.op_token, indent, start_col, Space.None);
- },
+ try renderToken(tree, stream, align_token, indent, start_col, Space.None); // align
+ try renderToken(tree, stream, lparen_token, indent, start_col, Space.None); // (
- .Try,
- .Resume,
- => {
- try renderToken(tree, stream, prefix_op_node.op_token, indent, start_col, Space.Space);
- },
+ try renderExpression(allocator, stream, tree, indent, start_col, align_info.node, Space.None);
- .Await => |await_info| {
- try renderToken(tree, stream, prefix_op_node.op_token, indent, start_col, Space.Space);
- },
- }
+ if (align_info.bit_range) |bit_range| {
+ const colon1 = tree.prevToken(bit_range.start.firstToken());
+ const colon2 = tree.prevToken(bit_range.end.firstToken());
+
+ try renderToken(tree, stream, colon1, indent, start_col, Space.None); // :
+ try renderExpression(allocator, stream, tree, indent, start_col, bit_range.start, Space.None);
+ try renderToken(tree, stream, colon2, indent, start_col, Space.None); // :
+ try renderExpression(allocator, stream, tree, indent, start_col, bit_range.end, Space.None);
- return renderExpression(allocator, stream, tree, indent, start_col, prefix_op_node.rhs, space);
+ const rparen_token = tree.nextToken(bit_range.end.lastToken());
+ try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // )
+ } else {
+ const rparen_token = tree.nextToken(align_info.node.lastToken());
+ try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // )
+ }
+ }
+ if (slice_type.ptr_info.const_token) |const_token| {
+ try renderToken(tree, stream, const_token, indent, start_col, Space.Space);
+ }
+ if (slice_type.ptr_info.volatile_token) |volatile_token| {
+ try renderToken(tree, stream, volatile_token, indent, start_col, Space.Space);
+ }
+ return renderExpression(allocator, stream, tree, indent, start_col, slice_type.rhs, space);
},
.ArrayInitializer, .ArrayInitializerDot => {
var rtoken: ast.TokenIndex = undefined;
var exprs: []*ast.Node = undefined;
- const lhs: union(enum) { dot: ast.TokenIndex, node: *ast.Node } = switch (base.id) {
+ const lhs: union(enum) { dot: ast.TokenIndex, node: *ast.Node } = switch (base.tag) {
.ArrayInitializerDot => blk: {
const casted = @fieldParentPtr(ast.Node.ArrayInitializerDot, "base", base);
rtoken = casted.rtoken;
@@ -767,14 +829,14 @@ fn renderExpression(
}
try renderExtraNewline(tree, stream, start_col, next_expr);
- if (next_expr.id != .MultilineStringLiteral) {
+ if (next_expr.tag != .MultilineStringLiteral) {
try stream.writeByteNTimes(' ', new_indent);
}
} else {
try renderExpression(allocator, stream, tree, new_indent, start_col, expr, Space.Comma); // ,
}
}
- if (exprs[exprs.len - 1].id != .MultilineStringLiteral) {
+ if (exprs[exprs.len - 1].tag != .MultilineStringLiteral) {
try stream.writeByteNTimes(' ', indent);
}
return renderToken(tree, stream, rtoken, indent, start_col, space);
@@ -797,7 +859,7 @@ fn renderExpression(
.StructInitializer, .StructInitializerDot => {
var rtoken: ast.TokenIndex = undefined;
var field_inits: []*ast.Node = undefined;
- const lhs: union(enum) { dot: ast.TokenIndex, node: *ast.Node } = switch (base.id) {
+ const lhs: union(enum) { dot: ast.TokenIndex, node: *ast.Node } = switch (base.tag) {
.StructInitializerDot => blk: {
const casted = @fieldParentPtr(ast.Node.StructInitializerDot, "base", base);
rtoken = casted.rtoken;
@@ -851,7 +913,7 @@ fn renderExpression(
if (field_inits.len == 1) blk: {
const field_init = field_inits[0].cast(ast.Node.FieldInitializer).?;
- switch (field_init.expr.id) {
+ switch (field_init.expr.tag) {
.StructInitializer,
.StructInitializerDot,
=> break :blk,
@@ -948,7 +1010,7 @@ fn renderExpression(
const params = call.params();
for (params) |param_node, i| {
- const param_node_new_indent = if (param_node.id == .MultilineStringLiteral) blk: {
+ const param_node_new_indent = if (param_node.tag == .MultilineStringLiteral) blk: {
break :blk indent;
} else blk: {
try stream.writeByteNTimes(' ', new_indent);
@@ -1179,9 +1241,15 @@ fn renderExpression(
const error_type = @fieldParentPtr(ast.Node.ErrorType, "base", base);
return renderToken(tree, stream, error_type.token, indent, start_col, space);
},
- .VarType => {
- const var_type = @fieldParentPtr(ast.Node.VarType, "base", base);
- return renderToken(tree, stream, var_type.token, indent, start_col, space);
+ .AnyType => {
+ const any_type = @fieldParentPtr(ast.Node.AnyType, "base", base);
+ if (mem.eql(u8, tree.tokenSlice(any_type.token), "var")) {
+ // TODO remove in next release cycle
+ try stream.writeAll("anytype");
+ if (space == .Comma) try stream.writeAll(",\n");
+ return;
+ }
+ return renderToken(tree, stream, any_type.token, indent, start_col, space);
},
.ContainerDecl => {
const container_decl = @fieldParentPtr(ast.Node.ContainerDecl, "base", base);
@@ -1252,7 +1320,7 @@ fn renderExpression(
// declarations inside are fields
const src_has_only_fields = blk: {
for (fields_and_decls) |decl| {
- if (decl.id != .ContainerField) break :blk false;
+ if (decl.tag != .ContainerField) break :blk false;
}
break :blk true;
};
@@ -1377,7 +1445,7 @@ fn renderExpression(
.ErrorTag => {
const tag = @fieldParentPtr(ast.Node.ErrorTag, "base", base);
- try renderDocComments(tree, stream, tag, indent, start_col);
+ try renderDocComments(tree, stream, tag, tag.doc_comments, indent, start_col);
return renderToken(tree, stream, tag.name_token, indent, start_col, space); // name
},
@@ -1451,23 +1519,23 @@ fn renderExpression(
.FnProto => {
const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", base);
- if (fn_proto.visib_token) |visib_token_index| {
+ if (fn_proto.getTrailer("visib_token")) |visib_token_index| {
const visib_token = tree.token_ids[visib_token_index];
assert(visib_token == .Keyword_pub or visib_token == .Keyword_export);
try renderToken(tree, stream, visib_token_index, indent, start_col, Space.Space); // pub
}
- if (fn_proto.extern_export_inline_token) |extern_export_inline_token| {
- if (!fn_proto.is_extern_prototype)
+ if (fn_proto.getTrailer("extern_export_inline_token")) |extern_export_inline_token| {
+ if (fn_proto.getTrailer("is_extern_prototype") == null)
try renderToken(tree, stream, extern_export_inline_token, indent, start_col, Space.Space); // extern/export/inline
}
- if (fn_proto.lib_name) |lib_name| {
+ if (fn_proto.getTrailer("lib_name")) |lib_name| {
try renderExpression(allocator, stream, tree, indent, start_col, lib_name, Space.Space);
}
- const lparen = if (fn_proto.name_token) |name_token| blk: {
+ const lparen = if (fn_proto.getTrailer("name_token")) |name_token| blk: {
try renderToken(tree, stream, fn_proto.fn_token, indent, start_col, Space.Space); // fn
try renderToken(tree, stream, name_token, indent, start_col, Space.None); // name
break :blk tree.nextToken(name_token);
@@ -1480,11 +1548,11 @@ fn renderExpression(
const rparen = tree.prevToken(
// the first token for the annotation expressions is the left
// parenthesis, hence the need for two prevToken
- if (fn_proto.align_expr) |align_expr|
+ if (fn_proto.getTrailer("align_expr")) |align_expr|
tree.prevToken(tree.prevToken(align_expr.firstToken()))
- else if (fn_proto.section_expr) |section_expr|
+ else if (fn_proto.getTrailer("section_expr")) |section_expr|
tree.prevToken(tree.prevToken(section_expr.firstToken()))
- else if (fn_proto.callconv_expr) |callconv_expr|
+ else if (fn_proto.getTrailer("callconv_expr")) |callconv_expr|
tree.prevToken(tree.prevToken(callconv_expr.firstToken()))
else switch (fn_proto.return_type) {
.Explicit => |node| node.firstToken(),
@@ -1505,11 +1573,14 @@ fn renderExpression(
for (fn_proto.params()) |param_decl, i| {
try renderParamDecl(allocator, stream, tree, indent, start_col, param_decl, Space.None);
- if (i + 1 < fn_proto.params_len) {
+ if (i + 1 < fn_proto.params_len or fn_proto.getTrailer("var_args_token") != null) {
const comma = tree.nextToken(param_decl.lastToken());
try renderToken(tree, stream, comma, indent, start_col, Space.Space); // ,
}
}
+ if (fn_proto.getTrailer("var_args_token")) |var_args_token| {
+ try renderToken(tree, stream, var_args_token, indent, start_col, Space.None);
+ }
} else {
// one param per line
const new_indent = indent + indent_delta;
@@ -1519,12 +1590,16 @@ fn renderExpression(
try stream.writeByteNTimes(' ', new_indent);
try renderParamDecl(allocator, stream, tree, new_indent, start_col, param_decl, Space.Comma);
}
+ if (fn_proto.getTrailer("var_args_token")) |var_args_token| {
+ try stream.writeByteNTimes(' ', new_indent);
+ try renderToken(tree, stream, var_args_token, new_indent, start_col, Space.Comma);
+ }
try stream.writeByteNTimes(' ', indent);
}
try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // )
- if (fn_proto.align_expr) |align_expr| {
+ if (fn_proto.getTrailer("align_expr")) |align_expr| {
const align_rparen = tree.nextToken(align_expr.lastToken());
const align_lparen = tree.prevToken(align_expr.firstToken());
const align_kw = tree.prevToken(align_lparen);
@@ -1535,7 +1610,7 @@ fn renderExpression(
try renderToken(tree, stream, align_rparen, indent, start_col, Space.Space); // )
}
- if (fn_proto.section_expr) |section_expr| {
+ if (fn_proto.getTrailer("section_expr")) |section_expr| {
const section_rparen = tree.nextToken(section_expr.lastToken());
const section_lparen = tree.prevToken(section_expr.firstToken());
const section_kw = tree.prevToken(section_lparen);
@@ -1546,7 +1621,7 @@ fn renderExpression(
try renderToken(tree, stream, section_rparen, indent, start_col, Space.Space); // )
}
- if (fn_proto.callconv_expr) |callconv_expr| {
+ if (fn_proto.getTrailer("callconv_expr")) |callconv_expr| {
const callconv_rparen = tree.nextToken(callconv_expr.lastToken());
const callconv_lparen = tree.prevToken(callconv_expr.firstToken());
const callconv_kw = tree.prevToken(callconv_lparen);
@@ -1555,9 +1630,9 @@ fn renderExpression(
try renderToken(tree, stream, callconv_lparen, indent, start_col, Space.None); // (
try renderExpression(allocator, stream, tree, indent, start_col, callconv_expr, Space.None);
try renderToken(tree, stream, callconv_rparen, indent, start_col, Space.Space); // )
- } else if (fn_proto.is_extern_prototype) {
+ } else if (fn_proto.getTrailer("is_extern_prototype") != null) {
try stream.writeAll("callconv(.C) ");
- } else if (fn_proto.is_async) {
+ } else if (fn_proto.getTrailer("is_async") != null) {
try stream.writeAll("callconv(.Async) ");
}
@@ -1792,7 +1867,7 @@ fn renderExpression(
const rparen = tree.nextToken(for_node.array_expr.lastToken());
- const body_is_block = for_node.body.id == .Block;
+ const body_is_block = for_node.body.tag == .Block;
const src_one_line_to_body = !body_is_block and tree.tokensOnSameLine(rparen, for_node.body.firstToken());
const body_on_same_line = body_is_block or src_one_line_to_body;
@@ -1835,7 +1910,7 @@ fn renderExpression(
try renderExpression(allocator, stream, tree, indent, start_col, if_node.condition, Space.None); // condition
- const body_is_if_block = if_node.body.id == .If;
+ const body_is_if_block = if_node.body.tag == .If;
const body_is_block = nodeIsBlock(if_node.body);
if (body_is_if_block) {
@@ -1939,7 +2014,7 @@ fn renderExpression(
const indent_once = indent + indent_delta;
- if (asm_node.template.id == .MultilineStringLiteral) {
+ if (asm_node.template.tag == .MultilineStringLiteral) {
// After rendering a multiline string literal the cursor is
// already offset by indent
try stream.writeByteNTimes(' ', indent_delta);
@@ -2051,9 +2126,49 @@ fn renderExpression(
}
}
+fn renderArrayType(
+ allocator: *mem.Allocator,
+ stream: anytype,
+ tree: *ast.Tree,
+ indent: usize,
+ start_col: *usize,
+ lbracket: ast.TokenIndex,
+ rhs: *ast.Node,
+ len_expr: *ast.Node,
+ opt_sentinel: ?*ast.Node,
+ space: Space,
+) (@TypeOf(stream).Error || Error)!void {
+ const rbracket = tree.nextToken(if (opt_sentinel) |sentinel|
+ sentinel.lastToken()
+ else
+ len_expr.lastToken());
+
+ try renderToken(tree, stream, lbracket, indent, start_col, Space.None); // [
+
+ const starts_with_comment = tree.token_ids[lbracket + 1] == .LineComment;
+ const ends_with_comment = tree.token_ids[rbracket - 1] == .LineComment;
+ const new_indent = if (ends_with_comment) indent + indent_delta else indent;
+ const new_space = if (ends_with_comment) Space.Newline else Space.None;
+ try renderExpression(allocator, stream, tree, new_indent, start_col, len_expr, new_space);
+ if (starts_with_comment) {
+ try stream.writeByte('\n');
+ }
+ if (ends_with_comment or starts_with_comment) {
+ try stream.writeByteNTimes(' ', indent);
+ }
+ if (opt_sentinel) |sentinel| {
+ const colon_token = tree.prevToken(sentinel.firstToken());
+ try renderToken(tree, stream, colon_token, indent, start_col, Space.None); // :
+ try renderExpression(allocator, stream, tree, indent, start_col, sentinel, Space.None);
+ }
+ try renderToken(tree, stream, rbracket, indent, start_col, Space.None); // ]
+
+ return renderExpression(allocator, stream, tree, indent, start_col, rhs, space);
+}
+
fn renderAsmOutput(
allocator: *mem.Allocator,
- stream: var,
+ stream: anytype,
tree: *ast.Tree,
indent: usize,
start_col: *usize,
@@ -2081,7 +2196,7 @@ fn renderAsmOutput(
fn renderAsmInput(
allocator: *mem.Allocator,
- stream: var,
+ stream: anytype,
tree: *ast.Tree,
indent: usize,
start_col: *usize,
@@ -2099,70 +2214,75 @@ fn renderAsmInput(
fn renderVarDecl(
allocator: *mem.Allocator,
- stream: var,
+ stream: anytype,
tree: *ast.Tree,
indent: usize,
start_col: *usize,
var_decl: *ast.Node.VarDecl,
) (@TypeOf(stream).Error || Error)!void {
- if (var_decl.visib_token) |visib_token| {
+ if (var_decl.getTrailer("visib_token")) |visib_token| {
try renderToken(tree, stream, visib_token, indent, start_col, Space.Space); // pub
}
- if (var_decl.extern_export_token) |extern_export_token| {
+ if (var_decl.getTrailer("extern_export_token")) |extern_export_token| {
try renderToken(tree, stream, extern_export_token, indent, start_col, Space.Space); // extern
- if (var_decl.lib_name) |lib_name| {
+ if (var_decl.getTrailer("lib_name")) |lib_name| {
try renderExpression(allocator, stream, tree, indent, start_col, lib_name, Space.Space); // "lib"
}
}
- if (var_decl.comptime_token) |comptime_token| {
+ if (var_decl.getTrailer("comptime_token")) |comptime_token| {
try renderToken(tree, stream, comptime_token, indent, start_col, Space.Space); // comptime
}
- if (var_decl.thread_local_token) |thread_local_token| {
+ if (var_decl.getTrailer("thread_local_token")) |thread_local_token| {
try renderToken(tree, stream, thread_local_token, indent, start_col, Space.Space); // threadlocal
}
try renderToken(tree, stream, var_decl.mut_token, indent, start_col, Space.Space); // var
- const name_space = if (var_decl.type_node == null and (var_decl.align_node != null or
- var_decl.section_node != null or var_decl.init_node != null)) Space.Space else Space.None;
+ const name_space = if (var_decl.getTrailer("type_node") == null and
+ (var_decl.getTrailer("align_node") != null or
+ var_decl.getTrailer("section_node") != null or
+ var_decl.getTrailer("init_node") != null))
+ Space.Space
+ else
+ Space.None;
try renderToken(tree, stream, var_decl.name_token, indent, start_col, name_space);
- if (var_decl.type_node) |type_node| {
+ if (var_decl.getTrailer("type_node")) |type_node| {
try renderToken(tree, stream, tree.nextToken(var_decl.name_token), indent, start_col, Space.Space);
- const s = if (var_decl.align_node != null or
- var_decl.section_node != null or
- var_decl.init_node != null) Space.Space else Space.None;
+ const s = if (var_decl.getTrailer("align_node") != null or
+ var_decl.getTrailer("section_node") != null or
+ var_decl.getTrailer("init_node") != null) Space.Space else Space.None;
try renderExpression(allocator, stream, tree, indent, start_col, type_node, s);
}
- if (var_decl.align_node) |align_node| {
+ if (var_decl.getTrailer("align_node")) |align_node| {
const lparen = tree.prevToken(align_node.firstToken());
const align_kw = tree.prevToken(lparen);
const rparen = tree.nextToken(align_node.lastToken());
try renderToken(tree, stream, align_kw, indent, start_col, Space.None); // align
try renderToken(tree, stream, lparen, indent, start_col, Space.None); // (
try renderExpression(allocator, stream, tree, indent, start_col, align_node, Space.None);
- const s = if (var_decl.section_node != null or var_decl.init_node != null) Space.Space else Space.None;
+ const s = if (var_decl.getTrailer("section_node") != null or var_decl.getTrailer("init_node") != null) Space.Space else Space.None;
try renderToken(tree, stream, rparen, indent, start_col, s); // )
}
- if (var_decl.section_node) |section_node| {
+ if (var_decl.getTrailer("section_node")) |section_node| {
const lparen = tree.prevToken(section_node.firstToken());
const section_kw = tree.prevToken(lparen);
const rparen = tree.nextToken(section_node.lastToken());
try renderToken(tree, stream, section_kw, indent, start_col, Space.None); // linksection
try renderToken(tree, stream, lparen, indent, start_col, Space.None); // (
try renderExpression(allocator, stream, tree, indent, start_col, section_node, Space.None);
- const s = if (var_decl.init_node != null) Space.Space else Space.None;
+ const s = if (var_decl.getTrailer("init_node") != null) Space.Space else Space.None;
try renderToken(tree, stream, rparen, indent, start_col, s); // )
}
- if (var_decl.init_node) |init_node| {
- const s = if (init_node.id == .MultilineStringLiteral) Space.None else Space.Space;
- try renderToken(tree, stream, var_decl.eq_token.?, indent, start_col, s); // =
+ if (var_decl.getTrailer("init_node")) |init_node| {
+ const s = if (init_node.tag == .MultilineStringLiteral) Space.None else Space.Space;
+ try renderToken(tree, stream, var_decl.getTrailer("eq_token").?, indent, start_col, s); // =
try renderExpression(allocator, stream, tree, indent, start_col, init_node, Space.None);
}
@@ -2171,14 +2291,14 @@ fn renderVarDecl(
fn renderParamDecl(
allocator: *mem.Allocator,
- stream: var,
+ stream: anytype,
tree: *ast.Tree,
indent: usize,
start_col: *usize,
param_decl: ast.Node.FnProto.ParamDecl,
space: Space,
) (@TypeOf(stream).Error || Error)!void {
- try renderDocComments(tree, stream, param_decl, indent, start_col);
+ try renderDocComments(tree, stream, param_decl, param_decl.doc_comments, indent, start_col);
if (param_decl.comptime_token) |comptime_token| {
try renderToken(tree, stream, comptime_token, indent, start_col, Space.Space);
@@ -2191,20 +2311,19 @@ fn renderParamDecl(
try renderToken(tree, stream, tree.nextToken(name_token), indent, start_col, Space.Space); // :
}
switch (param_decl.param_type) {
- .var_args => |token| try renderToken(tree, stream, token, indent, start_col, space),
- .var_type, .type_expr => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, space),
+ .any_type, .type_expr => |node| try renderExpression(allocator, stream, tree, indent, start_col, node, space),
}
}
fn renderStatement(
allocator: *mem.Allocator,
- stream: var,
+ stream: anytype,
tree: *ast.Tree,
indent: usize,
start_col: *usize,
base: *ast.Node,
) (@TypeOf(stream).Error || Error)!void {
- switch (base.id) {
+ switch (base.tag) {
.VarDecl => {
const var_decl = @fieldParentPtr(ast.Node.VarDecl, "base", base);
try renderVarDecl(allocator, stream, tree, indent, start_col, var_decl);
@@ -2236,7 +2355,7 @@ const Space = enum {
fn renderTokenOffset(
tree: *ast.Tree,
- stream: var,
+ stream: anytype,
token_index: ast.TokenIndex,
indent: usize,
start_col: *usize,
@@ -2434,7 +2553,7 @@ fn renderTokenOffset(
fn renderToken(
tree: *ast.Tree,
- stream: var,
+ stream: anytype,
token_index: ast.TokenIndex,
indent: usize,
start_col: *usize,
@@ -2445,18 +2564,19 @@ fn renderToken(
fn renderDocComments(
tree: *ast.Tree,
- stream: var,
- node: var,
+ stream: anytype,
+ node: anytype,
+ doc_comments: ?*ast.Node.DocComment,
indent: usize,
start_col: *usize,
) (@TypeOf(stream).Error || Error)!void {
- const comment = node.doc_comments orelse return;
+ const comment = doc_comments orelse return;
return renderDocCommentsToken(tree, stream, comment, node.firstToken(), indent, start_col);
}
fn renderDocCommentsToken(
tree: *ast.Tree,
- stream: var,
+ stream: anytype,
comment: *ast.Node.DocComment,
first_token: ast.TokenIndex,
indent: usize,
@@ -2482,7 +2602,7 @@ fn renderDocCommentsToken(
}
fn nodeIsBlock(base: *const ast.Node) bool {
- return switch (base.id) {
+ return switch (base.tag) {
.Block,
.If,
.For,
@@ -2494,10 +2614,52 @@ fn nodeIsBlock(base: *const ast.Node) bool {
}
fn nodeCausesSliceOpSpace(base: *ast.Node) bool {
- const infix_op = base.cast(ast.Node.InfixOp) orelse return false;
- return switch (infix_op.op) {
- ast.Node.InfixOp.Op.Period => false,
- else => true,
+ return switch (base.tag) {
+ .Catch,
+ .Add,
+ .AddWrap,
+ .ArrayCat,
+ .ArrayMult,
+ .Assign,
+ .AssignBitAnd,
+ .AssignBitOr,
+ .AssignBitShiftLeft,
+ .AssignBitShiftRight,
+ .AssignBitXor,
+ .AssignDiv,
+ .AssignSub,
+ .AssignSubWrap,
+ .AssignMod,
+ .AssignAdd,
+ .AssignAddWrap,
+ .AssignMul,
+ .AssignMulWrap,
+ .BangEqual,
+ .BitAnd,
+ .BitOr,
+ .BitShiftLeft,
+ .BitShiftRight,
+ .BitXor,
+ .BoolAnd,
+ .BoolOr,
+ .Div,
+ .EqualEqual,
+ .ErrorUnion,
+ .GreaterOrEqual,
+ .GreaterThan,
+ .LessOrEqual,
+ .LessThan,
+ .MergeErrorSets,
+ .Mod,
+ .Mul,
+ .MulWrap,
+ .Range,
+ .Sub,
+ .SubWrap,
+ .UnwrapOptional,
+ => true,
+
+ else => false,
};
}
@@ -2532,7 +2694,7 @@ const FindByteOutStream = struct {
}
};
-fn copyFixingWhitespace(stream: var, slice: []const u8) @TypeOf(stream).Error!void {
+fn copyFixingWhitespace(stream: anytype, slice: []const u8) @TypeOf(stream).Error!void {
for (slice) |byte| switch (byte) {
'\t' => try stream.writeAll(" "),
'\r' => {},
diff --git a/lib/std/zig/string_literal.zig b/lib/std/zig/string_literal.zig
index cc6030ad15..f7ceee16ac 100644
--- a/lib/std/zig/string_literal.zig
+++ b/lib/std/zig/string_literal.zig
@@ -125,7 +125,7 @@ test "parse" {
}
/// Writes a Zig-syntax escaped string literal to the stream. Includes the double quotes.
-pub fn render(utf8: []const u8, out_stream: var) !void {
+pub fn render(utf8: []const u8, out_stream: anytype) !void {
try out_stream.writeByte('"');
for (utf8) |byte| switch (byte) {
'\n' => try out_stream.writeAll("\\n"),
diff --git a/lib/std/zig/system.zig b/lib/std/zig/system.zig
index 64c9401dbc..af494efbab 100644
--- a/lib/std/zig/system.zig
+++ b/lib/std/zig/system.zig
@@ -130,7 +130,7 @@ pub const NativePaths = struct {
return self.appendArray(&self.include_dirs, s);
}
- pub fn addIncludeDirFmt(self: *NativePaths, comptime fmt: []const u8, args: var) !void {
+ pub fn addIncludeDirFmt(self: *NativePaths, comptime fmt: []const u8, args: anytype) !void {
const item = try std.fmt.allocPrint0(self.include_dirs.allocator, fmt, args);
errdefer self.include_dirs.allocator.free(item);
try self.include_dirs.append(item);
@@ -140,7 +140,7 @@ pub const NativePaths = struct {
return self.appendArray(&self.lib_dirs, s);
}
- pub fn addLibDirFmt(self: *NativePaths, comptime fmt: []const u8, args: var) !void {
+ pub fn addLibDirFmt(self: *NativePaths, comptime fmt: []const u8, args: anytype) !void {
const item = try std.fmt.allocPrint0(self.lib_dirs.allocator, fmt, args);
errdefer self.lib_dirs.allocator.free(item);
try self.lib_dirs.append(item);
@@ -150,7 +150,7 @@ pub const NativePaths = struct {
return self.appendArray(&self.warnings, s);
}
- pub fn addWarningFmt(self: *NativePaths, comptime fmt: []const u8, args: var) !void {
+ pub fn addWarningFmt(self: *NativePaths, comptime fmt: []const u8, args: anytype) !void {
const item = try std.fmt.allocPrint0(self.warnings.allocator, fmt, args);
errdefer self.warnings.allocator.free(item);
try self.warnings.append(item);
@@ -161,7 +161,7 @@ pub const NativePaths = struct {
}
fn appendArray(self: *NativePaths, array: *ArrayList([:0]u8), s: []const u8) !void {
- const item = try std.mem.dupeZ(array.allocator, u8, s);
+ const item = try array.allocator.dupeZ(u8, s);
errdefer array.allocator.free(item);
try array.append(item);
}
@@ -859,6 +859,7 @@ pub const NativeTargetInfo = struct {
error.ConnectionTimedOut => return error.UnableToReadElfFile,
error.Unexpected => return error.Unexpected,
error.InputOutput => return error.FileSystem,
+ error.AccessDenied => return error.Unexpected,
};
if (len == 0) return error.UnexpectedEndOfFile;
i += len;
@@ -886,7 +887,7 @@ pub const NativeTargetInfo = struct {
abi: Target.Abi,
};
- pub fn elfInt(is_64: bool, need_bswap: bool, int_32: var, int_64: var) @TypeOf(int_64) {
+ pub fn elfInt(is_64: bool, need_bswap: bool, int_32: anytype, int_64: anytype) @TypeOf(int_64) {
if (is_64) {
if (need_bswap) {
return @byteSwap(@TypeOf(int_64), int_64);
diff --git a/lib/std/zig/tokenizer.zig b/lib/std/zig/tokenizer.zig
index 3bf0d350cf..7f9c6f6288 100644
--- a/lib/std/zig/tokenizer.zig
+++ b/lib/std/zig/tokenizer.zig
@@ -15,6 +15,7 @@ pub const Token = struct {
.{ "allowzero", .Keyword_allowzero },
.{ "and", .Keyword_and },
.{ "anyframe", .Keyword_anyframe },
+ .{ "anytype", .Keyword_anytype },
.{ "asm", .Keyword_asm },
.{ "async", .Keyword_async },
.{ "await", .Keyword_await },
@@ -140,6 +141,8 @@ pub const Token = struct {
Keyword_align,
Keyword_allowzero,
Keyword_and,
+ Keyword_anyframe,
+ Keyword_anytype,
Keyword_asm,
Keyword_async,
Keyword_await,
@@ -168,7 +171,6 @@ pub const Token = struct {
Keyword_or,
Keyword_orelse,
Keyword_packed,
- Keyword_anyframe,
Keyword_pub,
Keyword_resume,
Keyword_return,
@@ -263,6 +265,7 @@ pub const Token = struct {
.Keyword_allowzero => "allowzero",
.Keyword_and => "and",
.Keyword_anyframe => "anyframe",
+ .Keyword_anytype => "anytype",
.Keyword_asm => "asm",
.Keyword_async => "async",
.Keyword_await => "await",