From f0b8791da75d2cb9c73424b8270da81e7d0ec333 Mon Sep 17 00:00:00 2001 From: Jonathan Marler Date: Sat, 13 Jun 2020 11:35:18 -0600 Subject: ArrayList(u8) support writer interface --- lib/std/array_list.zig | 41 +++++++++++++++++++++++++++++------------ 1 file changed, 29 insertions(+), 12 deletions(-) (limited to 'lib/std/array_list.zig') diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index e096a65491..d42a3c3d73 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -162,19 +162,24 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { mem.copy(T, self.items[oldlen..], items); } - /// Same as `append` except it returns the number of bytes written, which is always the same - /// as `m.len`. The purpose of this function existing is to match `std.io.OutStream` API. - /// This function may be called only when `T` is `u8`. - fn appendWrite(self: *Self, m: []const u8) !usize { - try self.appendSlice(m); - return m.len; - } + pub usingnamespace if (T != u8) struct { } else struct { + pub const Writer = std.io.Writer(*Self, error{OutOfMemory}, appendWrite); - /// Initializes an OutStream which will append to the list. - /// This function may be called only when `T` is `u8`. - pub fn outStream(self: *Self) std.io.OutStream(*Self, error{OutOfMemory}, appendWrite) { - return .{ .context = self }; - } + /// Initializes a Writer which will append to the list. + pub fn writer(self: *Self) Writer { + return .{ .context = self }; + } + + /// Deprecated: use `writer` + pub const outStream = writer; + + /// Same as `append` except it returns the number of bytes written, which is always the same + /// as `m.len`. The purpose of this function existing is to match `std.io.Writer` API. + fn appendWrite(self: *Self, m: []const u8) !usize { + try self.appendSlice(m); + return m.len; + } + }; /// Append a value to the list `n` times. /// Allocates more memory as necessary. @@ -694,3 +699,15 @@ test "std.ArrayList.shrink still sets length on error.OutOfMemory" { list.shrink(1); testing.expect(list.items.len == 1); } + +test "std.ArrayList.writer" { + var list = ArrayList(u8).init(std.testing.allocator); + defer list.deinit(); + + const writer = list.writer(); + try writer.writeAll("a"); + try writer.writeAll("bc"); + try writer.writeAll("d"); + try writer.writeAll("efg"); + testing.expectEqualSlices(u8, list.items, "abcdefg"); +} -- cgit v1.2.3 From da549a72e19d4f24520ca151bbd4cd0e74dc752c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 20 Jun 2020 18:39:15 -0400 Subject: zig fmt --- lib/std/array_list.zig | 2 +- lib/std/c/tokenizer.zig | 100 ++++++++++++++++++------------------- lib/std/io/buffered_out_stream.zig | 2 +- lib/std/json.zig | 5 +- lib/std/os/windows/ws2_32.zig | 18 +++---- lib/std/zig/parse.zig | 1 - 6 files changed, 63 insertions(+), 65 deletions(-) (limited to 'lib/std/array_list.zig') diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index d42a3c3d73..452ebe2124 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -162,7 +162,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { mem.copy(T, self.items[oldlen..], items); } - pub usingnamespace if (T != u8) struct { } else struct { + pub usingnamespace if (T != u8) struct {} else struct { pub const Writer = std.io.Writer(*Self, error{OutOfMemory}, appendWrite); /// Initializes a Writer which will append to the list. diff --git a/lib/std/c/tokenizer.zig b/lib/std/c/tokenizer.zig index 198c69e2a7..f3fc8b8107 100644 --- a/lib/std/c/tokenizer.zig +++ b/lib/std/c/tokenizer.zig @@ -278,62 +278,62 @@ pub const Token = struct { // TODO extensions pub const keywords = std.ComptimeStringMap(Id, .{ - .{"auto", .Keyword_auto}, - .{"break", .Keyword_break}, - .{"case", .Keyword_case}, - .{"char", .Keyword_char}, - .{"const", .Keyword_const}, - .{"continue", .Keyword_continue}, - .{"default", .Keyword_default}, - .{"do", .Keyword_do}, - .{"double", .Keyword_double}, - .{"else", .Keyword_else}, - .{"enum", .Keyword_enum}, - .{"extern", .Keyword_extern}, - .{"float", .Keyword_float}, - .{"for", .Keyword_for}, - .{"goto", .Keyword_goto}, - .{"if", .Keyword_if}, - .{"int", .Keyword_int}, - .{"long", .Keyword_long}, - .{"register", .Keyword_register}, - .{"return", .Keyword_return}, - .{"short", .Keyword_short}, - .{"signed", .Keyword_signed}, - .{"sizeof", .Keyword_sizeof}, - .{"static", .Keyword_static}, - .{"struct", .Keyword_struct}, - .{"switch", .Keyword_switch}, - .{"typedef", .Keyword_typedef}, - .{"union", .Keyword_union}, - .{"unsigned", .Keyword_unsigned}, - .{"void", .Keyword_void}, - .{"volatile", .Keyword_volatile}, - .{"while", .Keyword_while}, + .{ "auto", .Keyword_auto }, + .{ "break", .Keyword_break }, + .{ "case", .Keyword_case }, + .{ "char", .Keyword_char }, + .{ "const", .Keyword_const }, + .{ "continue", .Keyword_continue }, + .{ "default", .Keyword_default }, + .{ "do", .Keyword_do }, + .{ "double", .Keyword_double }, + .{ "else", .Keyword_else }, + .{ "enum", .Keyword_enum }, + .{ "extern", .Keyword_extern }, + .{ "float", .Keyword_float }, + .{ "for", .Keyword_for }, + .{ "goto", .Keyword_goto }, + .{ "if", .Keyword_if }, + .{ "int", .Keyword_int }, + .{ "long", .Keyword_long }, + .{ "register", .Keyword_register }, + .{ "return", .Keyword_return }, + .{ "short", .Keyword_short }, + .{ "signed", .Keyword_signed }, + .{ "sizeof", .Keyword_sizeof }, + .{ "static", .Keyword_static }, + .{ "struct", .Keyword_struct }, + .{ "switch", .Keyword_switch }, + .{ "typedef", .Keyword_typedef }, + .{ "union", .Keyword_union }, + .{ "unsigned", .Keyword_unsigned }, + .{ "void", .Keyword_void }, + .{ "volatile", .Keyword_volatile }, + .{ "while", .Keyword_while }, // ISO C99 - .{"_Bool", .Keyword_bool}, - .{"_Complex", .Keyword_complex}, - .{"_Imaginary", .Keyword_imaginary}, - .{"inline", .Keyword_inline}, - .{"restrict", .Keyword_restrict}, + .{ "_Bool", .Keyword_bool }, + .{ "_Complex", .Keyword_complex }, + .{ "_Imaginary", .Keyword_imaginary }, + .{ "inline", .Keyword_inline }, + .{ "restrict", .Keyword_restrict }, // ISO C11 - .{"_Alignas", .Keyword_alignas}, - .{"_Alignof", .Keyword_alignof}, - .{"_Atomic", .Keyword_atomic}, - .{"_Generic", .Keyword_generic}, - .{"_Noreturn", .Keyword_noreturn}, - .{"_Static_assert", .Keyword_static_assert}, - .{"_Thread_local", .Keyword_thread_local}, + .{ "_Alignas", .Keyword_alignas }, + .{ "_Alignof", .Keyword_alignof }, + .{ "_Atomic", .Keyword_atomic }, + .{ "_Generic", .Keyword_generic }, + .{ "_Noreturn", .Keyword_noreturn }, + .{ "_Static_assert", .Keyword_static_assert }, + .{ "_Thread_local", .Keyword_thread_local }, // Preprocessor directives - .{"include", .Keyword_include}, - .{"define", .Keyword_define}, - .{"ifdef", .Keyword_ifdef}, - .{"ifndef", .Keyword_ifndef}, - .{"error", .Keyword_error}, - .{"pragma", .Keyword_pragma}, + .{ "include", .Keyword_include }, + .{ "define", .Keyword_define }, + .{ "ifdef", .Keyword_ifdef }, + .{ "ifndef", .Keyword_ifndef }, + .{ "error", .Keyword_error }, + .{ "pragma", .Keyword_pragma }, }); // TODO do this in the preprocessor diff --git a/lib/std/io/buffered_out_stream.zig b/lib/std/io/buffered_out_stream.zig index 6b8ede5489..6f9efa9575 100644 --- a/lib/std/io/buffered_out_stream.zig +++ b/lib/std/io/buffered_out_stream.zig @@ -2,4 +2,4 @@ pub const BufferedOutStream = @import("./buffered_writer.zig").BufferedWriter; /// Deprecated: use `std.io.buffered_writer.bufferedWriter` -pub const bufferedOutStream = @import("./buffered_writer.zig").bufferedWriter +pub const bufferedOutStream = @import("./buffered_writer.zig").bufferedWriter; diff --git a/lib/std/json.zig b/lib/std/json.zig index 4acdbc7d1a..ae10dc9559 100644 --- a/lib/std/json.zig +++ b/lib/std/json.zig @@ -2576,8 +2576,8 @@ pub fn stringify( }, .Array => return stringify(&value, options, out_stream), .Vector => |info| { - const array: [info.len]info.child = value; - return stringify(&array, options, out_stream); + const array: [info.len]info.child = value; + return stringify(&array, options, out_stream); }, else => @compileError("Unable to stringify type '" ++ @typeName(T) ++ "'"), } @@ -2770,4 +2770,3 @@ test "stringify struct with custom stringifier" { test "stringify vector" { try teststringify("[1,1]", @splat(2, @as(u32, 1)), StringifyOptions{}); } - diff --git a/lib/std/os/windows/ws2_32.zig b/lib/std/os/windows/ws2_32.zig index 8b16f54361..1e36a72038 100644 --- a/lib/std/os/windows/ws2_32.zig +++ b/lib/std/os/windows/ws2_32.zig @@ -163,16 +163,16 @@ pub const IPPROTO_UDP = 17; pub const IPPROTO_ICMPV6 = 58; pub const IPPROTO_RM = 113; -pub const AI_PASSIVE = 0x00001; -pub const AI_CANONNAME = 0x00002; -pub const AI_NUMERICHOST = 0x00004; -pub const AI_NUMERICSERV = 0x00008; -pub const AI_ADDRCONFIG = 0x00400; -pub const AI_V4MAPPED = 0x00800; -pub const AI_NON_AUTHORITATIVE = 0x04000; -pub const AI_SECURE = 0x08000; +pub const AI_PASSIVE = 0x00001; +pub const AI_CANONNAME = 0x00002; +pub const AI_NUMERICHOST = 0x00004; +pub const AI_NUMERICSERV = 0x00008; +pub const AI_ADDRCONFIG = 0x00400; +pub const AI_V4MAPPED = 0x00800; +pub const AI_NON_AUTHORITATIVE = 0x04000; +pub const AI_SECURE = 0x08000; pub const AI_RETURN_PREFERRED_NAMES = 0x10000; -pub const AI_DISABLE_IDN_ENCODING = 0x80000; +pub const AI_DISABLE_IDN_ENCODING = 0x80000; pub const FIONBIO = -2147195266; diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig index 2a3ff9d9de..e6cd7a8b6d 100644 --- a/lib/std/zig/parse.zig +++ b/lib/std/zig/parse.zig @@ -937,7 +937,6 @@ const Parser = struct { return node; } - while_prefix.body = try p.expectNode(parseAssignExpr, .{ .ExpectedBlockOrAssignment = .{ .token = p.tok_i }, }); -- cgit v1.2.3 From dc9648f868ed8ad08f040753767c03976bbcf3b7 Mon Sep 17 00:00:00 2001 From: Jonathan Marler Date: Fri, 17 Apr 2020 14:15:36 -0600 Subject: new allocator interface --- lib/std/array_list.zig | 6 +- lib/std/c.zig | 11 + lib/std/heap.zig | 594 ++++++++++++++----------------- lib/std/heap/arena_allocator.zig | 29 +- lib/std/heap/logging_allocator.zig | 61 ++-- lib/std/mem.zig | 319 ++++++++++++++--- lib/std/os/windows/bits.zig | 1 + lib/std/testing.zig | 2 +- lib/std/testing/failing_allocator.zig | 41 +-- lib/std/testing/leak_count_allocator.zig | 21 +- 10 files changed, 624 insertions(+), 461 deletions(-) (limited to 'lib/std/array_list.zig') diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index 452ebe2124..896e5bf9dc 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -219,7 +219,8 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { if (better_capacity >= new_capacity) break; } - const new_memory = try self.allocator.realloc(self.allocatedSlice(), better_capacity); + const new_memory = try self.allocator.reallocAtLeast(self.allocatedSlice(), better_capacity); + assert(new_memory.len >= better_capacity); self.items.ptr = new_memory.ptr; self.capacity = new_memory.len; } @@ -441,7 +442,8 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ if (better_capacity >= new_capacity) break; } - const new_memory = try allocator.realloc(self.allocatedSlice(), better_capacity); + const new_memory = try allocator.reallocAtLeast(self.allocatedSlice(), better_capacity); + assert(new_memory.len >= better_capacity); self.items.ptr = new_memory.ptr; self.capacity = new_memory.len; } diff --git a/lib/std/c.zig b/lib/std/c.zig index 97d6bf5215..57e6de65a3 100644 --- a/lib/std/c.zig +++ b/lib/std/c.zig @@ -232,6 +232,17 @@ pub extern "c" fn setuid(uid: c_uint) c_int; pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?*c_void; pub extern "c" fn malloc(usize) ?*c_void; + +pub usingnamespace switch (builtin.os.tag) { + .linux, .freebsd, .kfreebsd, .netbsd, .openbsd => struct { + pub extern "c" fn malloc_usable_size(?*const c_void) usize; + }, + .macosx, .ios, .watchos, .tvos => struct { + pub extern "c" fn malloc_size(?*const c_void) usize; + }, + else => struct {}, +}; + pub extern "c" fn realloc(?*c_void, usize) ?*c_void; pub extern "c" fn free(*c_void) void; pub extern "c" fn posix_memalign(memptr: **c_void, alignment: usize, size: usize) c_int; diff --git a/lib/std/heap.zig b/lib/std/heap.zig index f05378c215..322c24934e 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -15,48 +15,88 @@ pub const ArenaAllocator = @import("heap/arena_allocator.zig").ArenaAllocator; const Allocator = mem.Allocator; -pub const c_allocator = &c_allocator_state; +usingnamespace if (comptime @hasDecl(c, "malloc_size")) struct { + pub const supports_malloc_size = true; + pub const malloc_size = c.malloc_size; +} else if (comptime @hasDecl(c, "malloc_usable_size")) struct { + pub const supports_malloc_size = true; + pub const malloc_size = c.malloc_usable_size; +} else struct { + pub const supports_malloc_size = false; +}; + +pub const c_allocator = mem.getAllocatorPtr(&c_allocator_state); var c_allocator_state = Allocator{ - .reallocFn = cRealloc, - .shrinkFn = cShrink, + .allocFn = cAlloc, + .resizeFn = cResize, }; -fn cRealloc(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { - assert(new_align <= @alignOf(c_longdouble)); - const old_ptr = if (old_mem.len == 0) null else @ptrCast(*c_void, old_mem.ptr); - const buf = c.realloc(old_ptr, new_size) orelse return error.OutOfMemory; - return @ptrCast([*]u8, buf)[0..new_size]; +fn cAlloc(self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Allocator.Error![]u8 { + assert(ptr_align <= @alignOf(c_longdouble)); + const ptr = @ptrCast([*]u8, c.malloc(len) orelse return error.OutOfMemory); + if (len_align == 0) { + return ptr[0..len]; + } + const full_len = init: { + if (comptime supports_malloc_size) { + const s = malloc_size(ptr); + assert(s >= len); + break :init s; + } + break :init len; + }; + return ptr[0..mem.alignBackwardAnyAlign(full_len, len_align)]; } -fn cShrink(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { - const old_ptr = @ptrCast(*c_void, old_mem.ptr); - const buf = c.realloc(old_ptr, new_size) orelse return old_mem[0..new_size]; - return @ptrCast([*]u8, buf)[0..new_size]; +fn cResize(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Allocator.Error!usize { + if (new_len == 0) { + c.free(buf.ptr); + return 0; + } + if (new_len <= buf.len) { + return mem.alignAllocLen(buf.len, new_len, len_align); + } + if (comptime supports_malloc_size) { + const full_len = malloc_size(buf.ptr); + if (new_len <= full_len) { + return mem.alignAllocLen(full_len, new_len, len_align); + } + } + // TODO: could we still use realloc? are there any cases where we can guarantee that realloc won't move memory? + return error.OutOfMemory; } /// This allocator makes a syscall directly for every allocation and free. /// Thread-safe and lock-free. pub const page_allocator = if (std.Target.current.isWasm()) - &wasm_page_allocator_state + mem.getAllocatorPtr(&wasm_page_allocator_state) else if (std.Target.current.os.tag == .freestanding) root.os.heap.page_allocator else - &page_allocator_state; + mem.getAllocatorPtr(&page_allocator_state); var page_allocator_state = Allocator{ - .reallocFn = PageAllocator.realloc, - .shrinkFn = PageAllocator.shrink, + .allocFn = PageAllocator.alloc, + .resizeFn = PageAllocator.resize, }; var wasm_page_allocator_state = Allocator{ - .reallocFn = WasmPageAllocator.realloc, - .shrinkFn = WasmPageAllocator.shrink, + .allocFn = WasmPageAllocator.alloc, + .resizeFn = WasmPageAllocator.resize, }; pub const direct_allocator = @compileError("deprecated; use std.heap.page_allocator"); +/// Verifies that the adjusted length will still map to the full length +pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize { + const aligned_len = mem.alignAllocLen(full_len, len, len_align); + assert(mem.alignForward(aligned_len, mem.page_size) == full_len); + return aligned_len; +} + const PageAllocator = struct { - fn alloc(allocator: *Allocator, n: usize, alignment: u29) error{OutOfMemory}![]u8 { - if (n == 0) return &[0]u8{}; + fn alloc(allocator: *Allocator, n: usize, alignment: u29, len_align: u29) error{OutOfMemory}![]u8 { + assert(n > 0); + const alignedLen = mem.alignForward(n, mem.page_size); if (builtin.os.tag == .windows) { const w = os.windows; @@ -68,21 +108,21 @@ const PageAllocator = struct { // see https://devblogs.microsoft.com/oldnewthing/?p=42223 const addr = w.VirtualAlloc( null, - n, + alignedLen, w.MEM_COMMIT | w.MEM_RESERVE, w.PAGE_READWRITE, ) catch return error.OutOfMemory; // If the allocation is sufficiently aligned, use it. if (@ptrToInt(addr) & (alignment - 1) == 0) { - return @ptrCast([*]u8, addr)[0..n]; + return @ptrCast([*]u8, addr)[0..alignPageAllocLen(alignedLen, n, len_align)]; } // If it wasn't, actually do an explicitely aligned allocation. w.VirtualFree(addr, 0, w.MEM_RELEASE); - const alloc_size = n + alignment; + const alloc_size = n + alignment - mem.page_size; - const final_addr = while (true) { + while (true) { // Reserve a range of memory large enough to find a sufficiently // aligned address. const reserved_addr = w.VirtualAlloc( @@ -102,48 +142,50 @@ const PageAllocator = struct { // until it succeeds. const ptr = w.VirtualAlloc( @intToPtr(*c_void, aligned_addr), - n, + alignedLen, w.MEM_COMMIT | w.MEM_RESERVE, w.PAGE_READWRITE, ) catch continue; - return @ptrCast([*]u8, ptr)[0..n]; - }; - - return @ptrCast([*]u8, final_addr)[0..n]; + return @ptrCast([*]u8, ptr)[0..alignPageAllocLen(alignedLen, n, len_align)]; + } } - const alloc_size = if (alignment <= mem.page_size) n else n + alignment; + const maxDropLen = alignment - std.math.min(alignment, mem.page_size); + const allocLen = if (maxDropLen <= alignedLen - n) alignedLen + else mem.alignForward(alignedLen + maxDropLen, mem.page_size); const slice = os.mmap( null, - mem.alignForward(alloc_size, mem.page_size), + allocLen, os.PROT_READ | os.PROT_WRITE, os.MAP_PRIVATE | os.MAP_ANONYMOUS, -1, 0, ) catch return error.OutOfMemory; - if (alloc_size == n) return slice[0..n]; + assert(mem.isAligned(@ptrToInt(slice.ptr), mem.page_size)); const aligned_addr = mem.alignForward(@ptrToInt(slice.ptr), alignment); // Unmap the extra bytes that were only requested in order to guarantee // that the range of memory we were provided had a proper alignment in // it somewhere. The extra bytes could be at the beginning, or end, or both. - const unused_start_len = aligned_addr - @ptrToInt(slice.ptr); - if (unused_start_len != 0) { - os.munmap(slice[0..unused_start_len]); + const dropLen = aligned_addr - @ptrToInt(slice.ptr); + if (dropLen != 0) { + os.munmap(slice[0..dropLen]); } - const aligned_end_addr = mem.alignForward(aligned_addr + n, mem.page_size); - const unused_end_len = @ptrToInt(slice.ptr) + slice.len - aligned_end_addr; - if (unused_end_len != 0) { - os.munmap(@intToPtr([*]align(mem.page_size) u8, aligned_end_addr)[0..unused_end_len]); + + // Unmap extra pages + const alignedBufferLen = allocLen - dropLen; + if (alignedBufferLen > alignedLen) { + os.munmap(@alignCast(mem.page_size, @intToPtr([*]u8, aligned_addr))[alignedLen..alignedBufferLen]); } - return @intToPtr([*]u8, aligned_addr)[0..n]; + return @intToPtr([*]u8, aligned_addr)[0..alignPageAllocLen(alignedLen, n, len_align)]; } - fn shrink(allocator: *Allocator, old_mem_unaligned: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { - const old_mem = @alignCast(mem.page_size, old_mem_unaligned); + fn resize(allocator: *Allocator, buf_unaligned: []u8, new_size: usize, len_align: u29) Allocator.Error!usize { + const new_size_aligned = mem.alignForward(new_size, mem.page_size); + if (builtin.os.tag == .windows) { const w = os.windows; if (new_size == 0) { @@ -153,100 +195,45 @@ const PageAllocator = struct { // is reserved in the initial allocation call to VirtualAlloc." // So we can only use MEM_RELEASE when actually releasing the // whole allocation. - w.VirtualFree(old_mem.ptr, 0, w.MEM_RELEASE); - } else { - const base_addr = @ptrToInt(old_mem.ptr); - const old_addr_end = base_addr + old_mem.len; - const new_addr_end = base_addr + new_size; - const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size); - if (old_addr_end > new_addr_end_rounded) { + w.VirtualFree(buf_unaligned.ptr, 0, w.MEM_RELEASE); + return 0; + } + if (new_size < buf_unaligned.len) { + const base_addr = @ptrToInt(buf_unaligned.ptr); + const old_addr_end = base_addr + buf_unaligned.len; + const new_addr_end = mem.alignForward(base_addr + new_size, mem.page_size); + if (old_addr_end > new_addr_end) { // For shrinking that is not releasing, we will only // decommit the pages not needed anymore. w.VirtualFree( - @intToPtr(*c_void, new_addr_end_rounded), - old_addr_end - new_addr_end_rounded, + @intToPtr(*c_void, new_addr_end), + old_addr_end - new_addr_end, w.MEM_DECOMMIT, ); } + return alignPageAllocLen(new_size_aligned, new_size, len_align); } - return old_mem[0..new_size]; - } - const base_addr = @ptrToInt(old_mem.ptr); - const old_addr_end = base_addr + old_mem.len; - const new_addr_end = base_addr + new_size; - const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size); - if (old_addr_end > new_addr_end_rounded) { - const ptr = @intToPtr([*]align(mem.page_size) u8, new_addr_end_rounded); - os.munmap(ptr[0 .. old_addr_end - new_addr_end_rounded]); - } - return old_mem[0..new_size]; - } - - fn realloc(allocator: *Allocator, old_mem_unaligned: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { - const old_mem = @alignCast(mem.page_size, old_mem_unaligned); - if (builtin.os.tag == .windows) { - if (old_mem.len == 0) { - return alloc(allocator, new_size, new_align); + if (new_size == buf_unaligned.len) { + return alignPageAllocLen(new_size_aligned, new_size, len_align); } + // new_size > buf_unaligned.len not implemented + return error.OutOfMemory; + } - if (new_size <= old_mem.len and new_align <= old_align) { - return shrink(allocator, old_mem, old_align, new_size, new_align); - } - - const w = os.windows; - const base_addr = @ptrToInt(old_mem.ptr); - - if (new_align > old_align and base_addr & (new_align - 1) != 0) { - // Current allocation doesn't satisfy the new alignment. - // For now we'll do a new one no matter what, but maybe - // there is something smarter to do instead. - const result = try alloc(allocator, new_size, new_align); - assert(old_mem.len != 0); - @memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len)); - w.VirtualFree(old_mem.ptr, 0, w.MEM_RELEASE); - - return result; - } - - const old_addr_end = base_addr + old_mem.len; - const old_addr_end_rounded = mem.alignForward(old_addr_end, mem.page_size); - const new_addr_end = base_addr + new_size; - const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size); - if (new_addr_end_rounded == old_addr_end_rounded) { - // The reallocation fits in the already allocated pages. - return @ptrCast([*]u8, old_mem.ptr)[0..new_size]; - } - assert(new_addr_end_rounded > old_addr_end_rounded); - - // We need to commit new pages. - const additional_size = new_addr_end - old_addr_end_rounded; - const realloc_addr = w.kernel32.VirtualAlloc( - @intToPtr(*c_void, old_addr_end_rounded), - additional_size, - w.MEM_COMMIT | w.MEM_RESERVE, - w.PAGE_READWRITE, - ) orelse { - // Committing new pages at the end of the existing allocation - // failed, we need to try a new one. - const new_alloc_mem = try alloc(allocator, new_size, new_align); - @memcpy(new_alloc_mem.ptr, old_mem.ptr, old_mem.len); - w.VirtualFree(old_mem.ptr, 0, w.MEM_RELEASE); - - return new_alloc_mem; - }; + const buf_aligned_len = mem.alignForward(buf_unaligned.len, mem.page_size); + if (new_size_aligned == buf_aligned_len) + return alignPageAllocLen(new_size_aligned, new_size, len_align); - assert(@ptrToInt(realloc_addr) == old_addr_end_rounded); - return @ptrCast([*]u8, old_mem.ptr)[0..new_size]; - } - if (new_size <= old_mem.len and new_align <= old_align) { - return shrink(allocator, old_mem, old_align, new_size, new_align); - } - const result = try alloc(allocator, new_size, new_align); - if (old_mem.len != 0) { - @memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len)); - os.munmap(old_mem); + if (new_size_aligned < buf_aligned_len) { + const ptr = @intToPtr([*]align(mem.page_size) u8, @ptrToInt(buf_unaligned.ptr) + new_size_aligned); + os.munmap(ptr[0 .. buf_aligned_len - new_size_aligned]); + if (new_size_aligned == 0) + return 0; + return alignPageAllocLen(new_size_aligned, new_size, len_align); } - return result; + + // TODO: call mremap + return error.OutOfMemory; } }; @@ -338,16 +325,24 @@ const WasmPageAllocator = struct { } fn nPages(memsize: usize) usize { - return std.mem.alignForward(memsize, std.mem.page_size) / std.mem.page_size; + return mem.alignForward(memsize, mem.page_size) / mem.page_size; } - fn alloc(allocator: *Allocator, page_count: usize, alignment: u29) error{OutOfMemory}!usize { - var idx = conventional.useRecycled(page_count); - if (idx != FreeBlock.not_found) { - return idx; + fn alloc(allocator: *Allocator, len: usize, alignment: u29, len_align: u29) error{OutOfMemory}![]u8 { + const page_count = nPages(len); + const page_idx = try allocPages(page_count); + return @intToPtr([*]u8, page_idx * mem.page_size) + [0..alignPageAllocLen(page_count * mem.page_size, len, len_align)]; + } + fn allocPages(page_count: usize) !usize { + { + const idx = conventional.useRecycled(page_count); + if (idx != FreeBlock.not_found) { + return idx; + } } - idx = extended.useRecycled(page_count); + const idx = extended.useRecycled(page_count); if (idx != FreeBlock.not_found) { return idx + extendedOffset(); } @@ -360,51 +355,36 @@ const WasmPageAllocator = struct { return @intCast(usize, prev_page_count); } - pub fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) Allocator.Error![]u8 { - if (new_align > std.mem.page_size) { - return error.OutOfMemory; + fn freePages(start: usize, end: usize) void { + if (start < extendedOffset()) { + conventional.recycle(start, std.math.min(extendedOffset(), end) - start); } - - if (nPages(new_size) == nPages(old_mem.len)) { - return old_mem.ptr[0..new_size]; - } else if (new_size < old_mem.len) { - return shrink(allocator, old_mem, old_align, new_size, new_align); - } else { - const page_idx = try alloc(allocator, nPages(new_size), new_align); - const new_mem = @intToPtr([*]u8, page_idx * std.mem.page_size)[0..new_size]; - std.mem.copy(u8, new_mem, old_mem); - _ = shrink(allocator, old_mem, old_align, 0, 0); - return new_mem; + if (end > extendedOffset()) { + var new_end = end; + if (!extended.isInitialized()) { + // Steal the last page from the memory currently being recycled + // TODO: would it be better if we use the first page instead? + new_end -= 1; + + extended.data = @intToPtr([*]u128, new_end * mem.page_size)[0 .. mem.page_size / @sizeOf(u128)]; + // Since this is the first page being freed and we consume it, assume *nothing* is free. + mem.set(u128, extended.data, PageStatus.none_free); + } + const clamped_start = std.math.max(extendedOffset(), start); + extended.recycle(clamped_start - extendedOffset(), new_end - clamped_start); } } - pub fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { - @setCold(true); - const free_start = nPages(@ptrToInt(old_mem.ptr) + new_size); - var free_end = nPages(@ptrToInt(old_mem.ptr) + old_mem.len); - - if (free_end > free_start) { - if (free_start < extendedOffset()) { - const clamped_end = std.math.min(extendedOffset(), free_end); - conventional.recycle(free_start, clamped_end - free_start); - } - - if (free_end > extendedOffset()) { - if (!extended.isInitialized()) { - // Steal the last page from the memory currently being recycled - // TODO: would it be better if we use the first page instead? - free_end -= 1; - - extended.data = @intToPtr([*]u128, free_end * std.mem.page_size)[0 .. std.mem.page_size / @sizeOf(u128)]; - // Since this is the first page being freed and we consume it, assume *nothing* is free. - std.mem.set(u128, extended.data, PageStatus.none_free); - } - const clamped_start = std.math.max(extendedOffset(), free_start); - extended.recycle(clamped_start - extendedOffset(), free_end - clamped_start); - } + fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!usize { + const aligned_len = mem.alignForward(buf.len, mem.page_size); + if (new_len > aligned_len) return error.OutOfMemory; + const current_n = nPages(aligned_len); + const new_n = nPages(new_len); + if (new_n != current_n) { + const base = nPages(@ptrToInt(buf.ptr)); + freePages(base + new_n, base + current_n); } - - return old_mem[0..new_size]; + return if (new_len == 0) 0 else alignPageAllocLen(new_n * mem.page_size, new_len, len_align); } }; @@ -418,8 +398,8 @@ pub const HeapAllocator = switch (builtin.os.tag) { pub fn init() HeapAllocator { return HeapAllocator{ .allocator = Allocator{ - .reallocFn = realloc, - .shrinkFn = shrink, + .allocFn = alloc, + .resizeFn = resize, }, .heap_handle = null, }; @@ -431,11 +411,14 @@ pub const HeapAllocator = switch (builtin.os.tag) { } } - fn alloc(allocator: *Allocator, n: usize, alignment: u29) error{OutOfMemory}![]u8 { + fn getRecordPtr(buf: []u8) *align(1) usize { + return @intToPtr(*align(1) usize, @ptrToInt(buf.ptr) + buf.len); + } + + fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 { const self = @fieldParentPtr(HeapAllocator, "allocator", allocator); - if (n == 0) return &[0]u8{}; - const amt = n + alignment + @sizeOf(usize); + const amt = n + ptr_align - 1 + @sizeOf(usize); const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, builtin.AtomicOrder.SeqCst); const heap_handle = optional_heap_handle orelse blk: { const options = if (builtin.single_threaded) os.windows.HEAP_NO_SERIALIZE else 0; @@ -446,66 +429,60 @@ pub const HeapAllocator = switch (builtin.os.tag) { }; const ptr = os.windows.kernel32.HeapAlloc(heap_handle, 0, amt) orelse return error.OutOfMemory; const root_addr = @ptrToInt(ptr); - const adjusted_addr = mem.alignForward(root_addr, alignment); - const record_addr = adjusted_addr + n; - @intToPtr(*align(1) usize, record_addr).* = root_addr; - return @intToPtr([*]u8, adjusted_addr)[0..n]; - } - - fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { - return realloc(allocator, old_mem, old_align, new_size, new_align) catch { - const old_adjusted_addr = @ptrToInt(old_mem.ptr); - const old_record_addr = old_adjusted_addr + old_mem.len; - const root_addr = @intToPtr(*align(1) usize, old_record_addr).*; - const old_ptr = @intToPtr(*c_void, root_addr); - const new_record_addr = old_record_addr - new_size + old_mem.len; - @intToPtr(*align(1) usize, new_record_addr).* = root_addr; - return old_mem[0..new_size]; + const aligned_addr = mem.alignForward(root_addr, ptr_align); + const return_len = init: { + if (len_align == 0) break :init n; + const full_len = os.windows.kernel32.HeapSize(heap_handle, 0, ptr); + assert(full_len != std.math.maxInt(usize)); + assert(full_len >= amt); + break :init mem.alignBackwardAnyAlign(full_len - (aligned_addr - root_addr), len_align); }; + const buf = @intToPtr([*]u8, aligned_addr)[0..return_len]; + getRecordPtr(buf).* = root_addr; + return buf; } - fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { - if (old_mem.len == 0) return alloc(allocator, new_size, new_align); - + fn resize(allocator: *Allocator, buf: []u8, new_size: usize, len_align: u29) error{OutOfMemory}!usize { const self = @fieldParentPtr(HeapAllocator, "allocator", allocator); - const old_adjusted_addr = @ptrToInt(old_mem.ptr); - const old_record_addr = old_adjusted_addr + old_mem.len; - const root_addr = @intToPtr(*align(1) usize, old_record_addr).*; - const old_ptr = @intToPtr(*c_void, root_addr); - if (new_size == 0) { - os.windows.HeapFree(self.heap_handle.?, 0, old_ptr); - return old_mem[0..0]; + os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void ,getRecordPtr(buf).*)); + return 0; } - const amt = new_size + new_align + @sizeOf(usize); + const root_addr = getRecordPtr(buf).*; + const align_offset = @ptrToInt(buf.ptr) - root_addr; + const amt = align_offset + new_size + @sizeOf(usize); const new_ptr = os.windows.kernel32.HeapReAlloc( self.heap_handle.?, - 0, - old_ptr, + os.windows.HEAP_REALLOC_IN_PLACE_ONLY, + @intToPtr(*c_void, root_addr), amt, ) orelse return error.OutOfMemory; - const offset = old_adjusted_addr - root_addr; - const new_root_addr = @ptrToInt(new_ptr); - var new_adjusted_addr = new_root_addr + offset; - const offset_is_valid = new_adjusted_addr + new_size + @sizeOf(usize) <= new_root_addr + amt; - const offset_is_aligned = new_adjusted_addr % new_align == 0; - if (!offset_is_valid or !offset_is_aligned) { - // If HeapReAlloc didn't happen to move the memory to the new alignment, - // or the memory starting at the old offset would be outside of the new allocation, - // then we need to copy the memory to a valid aligned address and use that - const new_aligned_addr = mem.alignForward(new_root_addr, new_align); - @memcpy(@intToPtr([*]u8, new_aligned_addr), @intToPtr([*]u8, new_adjusted_addr), std.math.min(old_mem.len, new_size)); - new_adjusted_addr = new_aligned_addr; - } - const new_record_addr = new_adjusted_addr + new_size; - @intToPtr(*align(1) usize, new_record_addr).* = new_root_addr; - return @intToPtr([*]u8, new_adjusted_addr)[0..new_size]; + assert(new_ptr == @intToPtr(*c_void, root_addr)); + const return_len = init: { + if (len_align == 0) break :init new_size; + const full_len = os.windows.kernel32.HeapSize(self.heap_handle.?, 0, new_ptr); + assert(full_len != std.math.maxInt(usize)); + assert(full_len >= amt); + break :init mem.alignBackwardAnyAlign(full_len - align_offset, len_align); + }; + getRecordPtr(buf.ptr[0..return_len]).* = root_addr; + return return_len; } }, else => @compileError("Unsupported OS"), }; +fn sliceContainsPtr(container: []u8, ptr: [*]u8) bool { + return @ptrToInt(ptr) >= @ptrToInt(container.ptr) and + @ptrToInt(ptr) < (@ptrToInt(container.ptr) + container.len); +} + +fn sliceContainsSlice(container: []u8, slice: []u8) bool { + return @ptrToInt(slice.ptr) >= @ptrToInt(container.ptr) and + (@ptrToInt(slice.ptr) + slice.len) <= (@ptrToInt(container.ptr) + container.len); +} + pub const FixedBufferAllocator = struct { allocator: Allocator, end_index: usize, @@ -514,19 +491,33 @@ pub const FixedBufferAllocator = struct { pub fn init(buffer: []u8) FixedBufferAllocator { return FixedBufferAllocator{ .allocator = Allocator{ - .reallocFn = realloc, - .shrinkFn = shrink, + .allocFn = alloc, + .resizeFn = resize, }, .buffer = buffer, .end_index = 0, }; } - fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 { + pub fn ownsPtr(self: *FixedBufferAllocator, ptr: [*]u8) bool { + return sliceContainsPtr(self.buffer, ptr); + } + + pub fn ownsSlice(self: *FixedBufferAllocator, slice: []u8) bool { + return sliceContainsSlice(self.buffer, slice); + } + + // NOTE: this will not work in all cases, if the last allocation had an adjusted_index + // then we won't be able to determine what the last allocation was. This is because + // the alignForward operation done in alloc is not reverisible. + pub fn isLastAllocation(self: *FixedBufferAllocator, buf: []u8) bool { + return buf.ptr + buf.len == self.buffer.ptr + self.end_index; + } + + fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) ![]u8 { const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator); - const addr = @ptrToInt(self.buffer.ptr) + self.end_index; - const adjusted_addr = mem.alignForward(addr, alignment); - const adjusted_index = self.end_index + (adjusted_addr - addr); + const aligned_addr = mem.alignForward(@ptrToInt(self.buffer.ptr) + self.end_index, ptr_align); + const adjusted_index = aligned_addr - @ptrToInt(self.buffer.ptr); const new_end_index = adjusted_index + n; if (new_end_index > self.buffer.len) { return error.OutOfMemory; @@ -534,33 +525,32 @@ pub const FixedBufferAllocator = struct { const result = self.buffer[adjusted_index..new_end_index]; self.end_index = new_end_index; - return result; + return result[0..mem.alignAllocLen(result.len, n, len_align)]; } - fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { + fn resize(allocator: *Allocator, buf: []u8, new_size: usize, len_align: u29) Allocator.Error!usize { const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator); - assert(old_mem.len <= self.end_index); - if (old_mem.ptr == self.buffer.ptr + self.end_index - old_mem.len and - mem.alignForward(@ptrToInt(old_mem.ptr), new_align) == @ptrToInt(old_mem.ptr)) - { - const start_index = self.end_index - old_mem.len; - const new_end_index = start_index + new_size; - if (new_end_index > self.buffer.len) return error.OutOfMemory; - const result = self.buffer[start_index..new_end_index]; - self.end_index = new_end_index; - return result; - } else if (new_size <= old_mem.len and new_align <= old_align) { - // We can't do anything with the memory, so tell the client to keep it. - return error.OutOfMemory; - } else { - const result = try alloc(allocator, new_size, new_align); - @memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len)); - return result; + assert(self.ownsSlice(buf)); // sanity check + + if (!self.isLastAllocation(buf)) { + if (new_size > buf.len) + return error.OutOfMemory; + return if (new_size == 0) 0 else mem.alignAllocLen(buf.len, new_size, len_align); } - } - fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { - return old_mem[0..new_size]; + if (new_size <= buf.len) { + const sub = buf.len - new_size; + self.end_index -= sub; + return if (new_size == 0) 0 else mem.alignAllocLen(buf.len - sub, new_size, len_align); + } + + var add = new_size - buf.len; + if (add + self.end_index > self.buffer.len) { + //add = self.buffer.len - self.end_index; + return error.OutOfMemory; + } + self.end_index += add; + return mem.alignAllocLen(buf.len + add, new_size, len_align); } pub fn reset(self: *FixedBufferAllocator) void { @@ -581,20 +571,20 @@ pub const ThreadSafeFixedBufferAllocator = blk: { pub fn init(buffer: []u8) ThreadSafeFixedBufferAllocator { return ThreadSafeFixedBufferAllocator{ .allocator = Allocator{ - .reallocFn = realloc, - .shrinkFn = shrink, + .allocFn = alloc, + .resizeFn = Allocator.noResize, }, .buffer = buffer, .end_index = 0, }; } - fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 { + fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) ![]u8 { const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator); var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst); while (true) { const addr = @ptrToInt(self.buffer.ptr) + end_index; - const adjusted_addr = mem.alignForward(addr, alignment); + const adjusted_addr = mem.alignForward(addr, ptr_align); const adjusted_index = end_index + (adjusted_addr - addr); const new_end_index = adjusted_index + n; if (new_end_index > self.buffer.len) { @@ -604,21 +594,6 @@ pub const ThreadSafeFixedBufferAllocator = blk: { } } - fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { - if (new_size <= old_mem.len and new_align <= old_align) { - // We can't do anything useful with the memory, tell the client to keep it. - return error.OutOfMemory; - } else { - const result = try alloc(allocator, new_size, new_align); - @memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len)); - return result; - } - } - - fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { - return old_mem[0..new_size]; - } - pub fn reset(self: *ThreadSafeFixedBufferAllocator) void { self.end_index = 0; } @@ -632,8 +607,8 @@ pub fn stackFallback(comptime size: usize, fallback_allocator: *Allocator) Stack .fallback_allocator = fallback_allocator, .fixed_buffer_allocator = undefined, .allocator = Allocator{ - .reallocFn = StackFallbackAllocator(size).realloc, - .shrinkFn = StackFallbackAllocator(size).shrink, + .allocFn = StackFallbackAllocator(size).realloc, + .resizeFn = StackFallbackAllocator(size).resize, }, }; } @@ -652,58 +627,19 @@ pub fn StackFallbackAllocator(comptime size: usize) type { return &self.allocator; } - fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { + fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![*]u8 { const self = @fieldParentPtr(Self, "allocator", allocator); - const in_buffer = @ptrToInt(old_mem.ptr) >= @ptrToInt(&self.buffer) and - @ptrToInt(old_mem.ptr) < @ptrToInt(&self.buffer) + self.buffer.len; - if (in_buffer) { - return FixedBufferAllocator.realloc( - &self.fixed_buffer_allocator.allocator, - old_mem, - old_align, - new_size, - new_align, - ) catch { - const result = try self.fallback_allocator.reallocFn( - self.fallback_allocator, - &[0]u8{}, - undefined, - new_size, - new_align, - ); - mem.copy(u8, result, old_mem); - return result; - }; - } - return self.fallback_allocator.reallocFn( - self.fallback_allocator, - old_mem, - old_align, - new_size, - new_align, - ); + return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, ptr_align) catch + return fallback_allocator.alloc(len, ptr_align); } - fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { + fn resize(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!void { const self = @fieldParentPtr(Self, "allocator", allocator); - const in_buffer = @ptrToInt(old_mem.ptr) >= @ptrToInt(&self.buffer) and - @ptrToInt(old_mem.ptr) < @ptrToInt(&self.buffer) + self.buffer.len; - if (in_buffer) { - return FixedBufferAllocator.shrink( - &self.fixed_buffer_allocator.allocator, - old_mem, - old_align, - new_size, - new_align, - ); + if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { + try self.fixed_buffer_allocator.callResizeFn(buf, new_len); + } else { + try self.fallback_allocator.callResizeFn(buf, new_len); } - return self.fallback_allocator.shrinkFn( - self.fallback_allocator, - old_mem, - old_align, - new_size, - new_align, - ); } }; } @@ -718,8 +654,8 @@ test "c_allocator" { test "WasmPageAllocator internals" { if (comptime std.Target.current.isWasm()) { - const conventional_memsize = WasmPageAllocator.conventional.totalPages() * std.mem.page_size; - const initial = try page_allocator.alloc(u8, std.mem.page_size); + const conventional_memsize = WasmPageAllocator.conventional.totalPages() * mem.page_size; + const initial = try page_allocator.alloc(u8, mem.page_size); std.debug.assert(@ptrToInt(initial.ptr) < conventional_memsize); // If this isn't conventional, the rest of these tests don't make sense. Also we have a serious memory leak in the test suite. var inplace = try page_allocator.realloc(initial, 1); @@ -799,7 +735,7 @@ test "ArenaAllocator" { var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined; test "FixedBufferAllocator" { - var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]); + var fixed_buffer_allocator = mem.sanityWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..])); try testAllocator(&fixed_buffer_allocator.allocator); try testAllocatorAligned(&fixed_buffer_allocator.allocator, 16); @@ -865,7 +801,10 @@ test "ThreadSafeFixedBufferAllocator" { try testAllocatorAlignedShrink(&fixed_buffer_allocator.allocator); } -fn testAllocator(allocator: *mem.Allocator) !void { +fn testAllocator(base_allocator: *mem.Allocator) !void { + var sanityAllocator = mem.sanityWrap(base_allocator); + const allocator = &sanityAllocator.allocator; + var slice = try allocator.alloc(*i32, 100); testing.expect(slice.len == 100); for (slice) |*item, i| { @@ -893,7 +832,10 @@ fn testAllocator(allocator: *mem.Allocator) !void { allocator.free(slice); } -fn testAllocatorAligned(allocator: *mem.Allocator, comptime alignment: u29) !void { +fn testAllocatorAligned(base_allocator: *mem.Allocator, comptime alignment: u29) !void { + var sanityAllocator = mem.sanityWrap(base_allocator); + const allocator = &sanityAllocator.allocator; + // initial var slice = try allocator.alignedAlloc(u8, alignment, 10); testing.expect(slice.len == 10); @@ -917,7 +859,10 @@ fn testAllocatorAligned(allocator: *mem.Allocator, comptime alignment: u29) !voi testing.expect(slice.len == 0); } -fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!void { +fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) mem.Allocator.Error!void { + var sanityAllocator = mem.sanityWrap(base_allocator); + const allocator = &sanityAllocator.allocator; + //Maybe a platform's page_size is actually the same as or // very near usize? if (mem.page_size << 2 > maxInt(usize)) return; @@ -946,7 +891,10 @@ fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!vo allocator.free(slice); } -fn testAllocatorAlignedShrink(allocator: *mem.Allocator) mem.Allocator.Error!void { +fn testAllocatorAlignedShrink(base_allocator: *mem.Allocator) mem.Allocator.Error!void { + var sanityAllocator = mem.sanityWrap(base_allocator); + const allocator = &sanityAllocator.allocator; + var debug_buffer: [1000]u8 = undefined; const debug_allocator = &FixedBufferAllocator.init(&debug_buffer).allocator; diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig index b41399772a..ee3bfc19d4 100644 --- a/lib/std/heap/arena_allocator.zig +++ b/lib/std/heap/arena_allocator.zig @@ -20,8 +20,8 @@ pub const ArenaAllocator = struct { pub fn promote(self: State, child_allocator: *Allocator) ArenaAllocator { return .{ .allocator = Allocator{ - .reallocFn = realloc, - .shrinkFn = shrink, + .allocFn = alloc, + .resizeFn = Allocator.noResize, }, .child_allocator = child_allocator, .state = self, @@ -61,38 +61,23 @@ pub const ArenaAllocator = struct { return buf_node; } - fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 { + fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) ![]u8 { const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator); - var cur_node = if (self.state.buffer_list.first) |first_node| first_node else try self.createNode(0, n + alignment); + var cur_node = if (self.state.buffer_list.first) |first_node| first_node else try self.createNode(0, n + ptr_align); while (true) { const cur_buf = cur_node.data[@sizeOf(BufNode)..]; const addr = @ptrToInt(cur_buf.ptr) + self.state.end_index; - const adjusted_addr = mem.alignForward(addr, alignment); + const adjusted_addr = mem.alignForward(addr, ptr_align); const adjusted_index = self.state.end_index + (adjusted_addr - addr); const new_end_index = adjusted_index + n; if (new_end_index > cur_buf.len) { - cur_node = try self.createNode(cur_buf.len, n + alignment); + cur_node = try self.createNode(cur_buf.len, n + ptr_align); continue; } const result = cur_buf[adjusted_index..new_end_index]; self.state.end_index = new_end_index; - return result; + return result[0..mem.alignAllocLen(result.len, n, len_align)]; } } - - fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { - if (new_size <= old_mem.len and new_align <= new_size) { - // We can't do anything with the memory, so tell the client to keep it. - return error.OutOfMemory; - } else { - const result = try alloc(allocator, new_size, new_align); - @memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len)); - return result; - } - } - - fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { - return old_mem[0..new_size]; - } }; diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig index 0d15986a76..e465795b8f 100644 --- a/lib/std/heap/logging_allocator.zig +++ b/lib/std/heap/logging_allocator.zig @@ -15,39 +15,45 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type { pub fn init(parent_allocator: *Allocator, out_stream: OutStreamType) Self { return Self{ .allocator = Allocator{ - .reallocFn = realloc, - .shrinkFn = shrink, + .allocFn = alloc, + .resizeFn = resize, }, .parent_allocator = parent_allocator, .out_stream = out_stream, }; } - fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { + fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 { const self = @fieldParentPtr(Self, "allocator", allocator); - if (old_mem.len == 0) { - self.out_stream.print("allocation of {} ", .{new_size}) catch {}; - } else { - self.out_stream.print("resize from {} to {} ", .{ old_mem.len, new_size }) catch {}; - } - const result = self.parent_allocator.reallocFn(self.parent_allocator, old_mem, old_align, new_size, new_align); + self.out_stream.print("alloc : {}", .{len}) catch {}; + const result = self.parent_allocator.callAllocFn(len, ptr_align, len_align); if (result) |buff| { - self.out_stream.print("success!\n", .{}) catch {}; + self.out_stream.print(" success!\n", .{}) catch {}; } else |err| { - self.out_stream.print("failure!\n", .{}) catch {}; + self.out_stream.print(" failure!\n", .{}) catch {}; } return result; } - fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { + fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!usize { const self = @fieldParentPtr(Self, "allocator", allocator); - const result = self.parent_allocator.shrinkFn(self.parent_allocator, old_mem, old_align, new_size, new_align); - if (new_size == 0) { - self.out_stream.print("free of {} bytes success!\n", .{old_mem.len}) catch {}; + if (new_len == 0) { + self.out_stream.print("free : {}\n", .{buf.len}) catch {}; + } else if (new_len <= buf.len) { + self.out_stream.print("shrink: {} to {}\n", .{buf.len, new_len}) catch {}; } else { - self.out_stream.print("shrink from {} bytes to {} bytes success!\n", .{ old_mem.len, new_size }) catch {}; + self.out_stream.print("expand: {} to {}", .{ buf.len, new_len }) catch {}; + } + if (self.parent_allocator.callResizeFn(buf, new_len, len_align)) |resized_len| { + if (new_len > buf.len) { + self.out_stream.print(" success!\n", .{}) catch {}; + } + return resized_len; + } else |e| { + std.debug.assert(new_len > buf.len); + self.out_stream.print(" failure!\n", .{}) catch {}; + return e; } - return result; } }; } @@ -60,17 +66,24 @@ pub fn loggingAllocator( } test "LoggingAllocator" { - var buf: [255]u8 = undefined; - var fbs = std.io.fixedBufferStream(&buf); + var log_buf: [255]u8 = undefined; + var fbs = std.io.fixedBufferStream(&log_buf); - const allocator = &loggingAllocator(std.testing.allocator, fbs.outStream()).allocator; + var allocator_buf: [10]u8 = undefined; + var fixedBufferAllocator = std.mem.sanityWrap(std.heap.FixedBufferAllocator.init(&allocator_buf)); + const allocator = &loggingAllocator(&fixedBufferAllocator.allocator, fbs.outStream()).allocator; - const ptr = try allocator.alloc(u8, 10); - allocator.free(ptr); + var a = try allocator.alloc(u8, 10); + a.len = allocator.shrinkBytes(a, 5, 0); + std.debug.assert(a.len == 5); + std.testing.expectError(error.OutOfMemory, allocator.callResizeFn(a, 20, 0)); + allocator.free(a); std.testing.expectEqualSlices(u8, - \\allocation of 10 success! - \\free of 10 bytes success! + \\alloc : 10 success! + \\shrink: 10 to 5 + \\expand: 5 to 20 failure! + \\free : 5 \\ , fbs.getWritten()); } diff --git a/lib/std/mem.zig b/lib/std/mem.zig index b942fd3bf4..6a7e846f62 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -16,6 +16,52 @@ pub const page_size = switch (builtin.arch) { pub const Allocator = struct { pub const Error = error{OutOfMemory}; + /// Attempt to allocate at least `len` bytes aligned to `ptr_align`. + /// + /// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes, + /// otherwise, the length must be aligned to `len_align`. + /// + /// `len` must be greater than or equal to `len_align` and must be aligned by `len_align`. + allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Error![]u8, + + /// Attempt to expand or shrink memory in place. `buf.len` must equal the most recent + /// length returned by `allocFn` or `resizeFn`. + /// + /// Passing a `new_len` of 0 frees and invalidates the buffer such that it can no + /// longer be passed to `resizeFn`. + /// + /// error.OutOfMemory can only be returned if `new_len` is greater than `buf.len`. + /// If `buf` cannot be expanded to accomodate `new_len`, then the allocation MUST be + /// unmodified and error.OutOfMemory MUST be returned. + /// + /// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes, + /// otherwise, the length must be aligned to `len_align`. + /// + /// `new_len` must be greater than or equal to `len_align` and must be aligned by `len_align`. + resizeFn: fn (self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Error!usize, + + pub fn callAllocFn(self: *Allocator, new_len: usize, alignment: u29, len_align: u29) Error![]u8 { + return self.allocFn(self, new_len, alignment, len_align); + } + + pub fn callResizeFn(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Error!usize { + return self.resizeFn(self, buf, new_len, len_align); + } + + /// Set to resizeFn if in-place resize is not supported. + pub fn noResize(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Error!usize { + if (new_len > buf.len) + return error.OutOfMemory; + return new_len; + } + + /// Call `resizeFn`, but caller guarantees that `new_len` <= `buf.len` meaning + /// error.OutOfMemory should be impossible. + pub fn shrinkBytes(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) usize { + assert(new_len <= buf.len); + return self.callResizeFn(buf, new_len, len_align) catch unreachable; + } + /// Realloc is used to modify the size or alignment of an existing allocation, /// as well as to provide the allocator with an opportunity to move an allocation /// to a better location. @@ -24,7 +70,7 @@ pub const Allocator = struct { /// When the size/alignment is less than or equal to the previous allocation, /// this function returns `error.OutOfMemory` when the allocator decides the client /// would be better off keeping the extra alignment/size. Clients will call - /// `shrinkFn` when they require the allocator to track a new alignment/size, + /// `callResizeFn` when they require the allocator to track a new alignment/size, /// and so this function should only return success when the allocator considers /// the reallocation desirable from the allocator's perspective. /// As an example, `std.ArrayList` tracks a "capacity", and therefore can handle @@ -37,16 +83,15 @@ pub const Allocator = struct { /// as `old_mem` was when `reallocFn` is called. The bytes of /// `return_value[old_mem.len..]` have undefined values. /// The returned slice must have its pointer aligned at least to `new_alignment` bytes. - reallocFn: fn ( + fn reallocBytes( self: *Allocator, /// Guaranteed to be the same as what was returned from most recent call to - /// `reallocFn` or `shrinkFn`. + /// `allocFn` or `resizeFn`. /// If `old_mem.len == 0` then this is a new allocation and `new_byte_count` /// is guaranteed to be >= 1. old_mem: []u8, /// If `old_mem.len == 0` then this is `undefined`, otherwise: - /// Guaranteed to be the same as what was returned from most recent call to - /// `reallocFn` or `shrinkFn`. + /// Guaranteed to be the same as what was passed to `allocFn`. /// Guaranteed to be >= 1. /// Guaranteed to be a power of 2. old_alignment: u29, @@ -57,23 +102,49 @@ pub const Allocator = struct { /// Guaranteed to be a power of 2. /// Returned slice's pointer must have this alignment. new_alignment: u29, - ) Error![]u8, + /// 0 indicates the length of the slice returned MUST match `new_byte_count` exactly + /// non-zero means the length of the returned slice must be aligned by `len_align` + /// `new_len` must be aligned by `len_align` + len_align: u29, + ) Error![]u8 { + if (old_mem.len == 0) { + const new_mem = try self.callAllocFn(new_byte_count, new_alignment, len_align); + @memset(new_mem.ptr, undefined, new_byte_count); + return new_mem; + } - /// This function deallocates memory. It must succeed. - shrinkFn: fn ( - self: *Allocator, - /// Guaranteed to be the same as what was returned from most recent call to - /// `reallocFn` or `shrinkFn`. - old_mem: []u8, - /// Guaranteed to be the same as what was returned from most recent call to - /// `reallocFn` or `shrinkFn`. - old_alignment: u29, - /// Guaranteed to be less than or equal to `old_mem.len`. - new_byte_count: usize, - /// If `new_byte_count == 0` then this is `undefined`, otherwise: - /// Guaranteed to be less than or equal to `old_alignment`. - new_alignment: u29, - ) []u8, + if (isAligned(@ptrToInt(old_mem.ptr), new_alignment)) { + if (new_byte_count <= old_mem.len) { + const shrunk_len = self.shrinkBytes(old_mem, new_byte_count, len_align); + if (shrunk_len < old_mem.len) { + @memset(old_mem.ptr + shrunk_len, undefined, old_mem.len - shrunk_len); + } + return old_mem.ptr[0..shrunk_len]; + } + if (self.callResizeFn(old_mem, new_byte_count, len_align)) |resized_len| { + assert(resized_len >= new_byte_count); + @memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count); + return old_mem.ptr[0..resized_len]; + } else |_| { } + } + if (new_byte_count <= old_mem.len and new_alignment <= old_alignment) { + return error.OutOfMemory; + } + return self.moveBytes(old_mem, new_byte_count, new_alignment, len_align); + } + + /// Move the given memory to a new location in the given allocator to accomodate a new + /// size and alignment. + fn moveBytes(self: *Allocator, old_mem: []u8, new_len: usize, new_alignment: u29, len_align: u29) Error![]u8 { + assert(old_mem.len > 0); + assert(new_len > 0); + const new_mem = try self.callAllocFn(new_len, new_alignment, len_align); + @memcpy(new_mem.ptr, old_mem.ptr, std.math.min(new_len, old_mem.len)); + // DISABLED TO AVOID BUGS IN TRANSLATE C + //@memset(old_mem.ptr, undefined, old_mem.len); + _ = self.shrinkBytes(old_mem, 0, 0); + return new_mem; + } /// Returns a pointer to undefined memory. /// Call `destroy` with the result to free the memory. @@ -89,8 +160,7 @@ pub const Allocator = struct { const T = @TypeOf(ptr).Child; if (@sizeOf(T) == 0) return; const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr)); - const shrink_result = self.shrinkFn(self, non_const_ptr[0..@sizeOf(T)], @alignOf(T), 0, 1); - assert(shrink_result.len == 0); + _ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], 0, 0); } /// Allocates an array of `n` items of type `T` and sets all the @@ -150,9 +220,21 @@ pub const Allocator = struct { /// null means naturally aligned comptime alignment: ?u29, n: usize, + ) Error![]align(alignment orelse @alignOf(T)) T { + return self.alignedAlloc2(T, alignment, n, .exact); + } + + const Exact = enum {exact,atLeast}; + pub fn alignedAlloc2( + self: *Allocator, + comptime T: type, + /// null means naturally aligned + comptime alignment: ?u29, + n: usize, + exact: Exact, ) Error![]align(alignment orelse @alignOf(T)) T { const a = if (alignment) |a| blk: { - if (a == @alignOf(T)) return alignedAlloc(self, T, null, n); + if (a == @alignOf(T)) return alignedAlloc2(self, T, null, n, exact); break :blk a; } else @alignOf(T); @@ -161,15 +243,16 @@ pub const Allocator = struct { } const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory; - const byte_slice = try self.reallocFn(self, &[0]u8{}, undefined, byte_count, a); - assert(byte_slice.len == byte_count); + // TODO The `if (alignment == null)` blocks are workarounds for zig not being able to + // access certain type information about T without creating a circular dependency in async + // functions that heap-allocate their own frame with @Frame(func). + const sizeOfT = if (alignment == null) @intCast(u29, @divExact(byte_count, n)) else @sizeOf(T); + const byte_slice = try self.callAllocFn(byte_count, a, if (exact == .exact) @as(u29, 0) else sizeOfT); + assert(if (exact == .exact) byte_slice.len == byte_count else byte_slice.len >= byte_count); @memset(byte_slice.ptr, undefined, byte_slice.len); if (alignment == null) { - // TODO This is a workaround for zig not being able to successfully do - // @bytesToSlice(T, @alignCast(a, byte_slice)) without resolving alignment of T, - // which causes a circular dependency in async functions which try to heap-allocate - // their own frame with @Frame(func). - return @intToPtr([*]T, @ptrToInt(byte_slice.ptr))[0..n]; + // This if block is a workaround (see comment above) + return @intToPtr([*]T, @ptrToInt(byte_slice.ptr))[0..@divExact(byte_slice.len, @sizeOf(T))]; } else { return mem.bytesAsSlice(T, @alignCast(a, byte_slice)); } @@ -190,7 +273,15 @@ pub const Allocator = struct { break :t Error![]align(Slice.alignment) Slice.child; } { const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment; - return self.alignedRealloc(old_mem, old_alignment, new_n); + return self.alignedRealloc2(old_mem, old_alignment, new_n, .exact); + } + + pub fn reallocAtLeast(self: *Allocator, old_mem: var, new_n: usize) t: { + const Slice = @typeInfo(@TypeOf(old_mem)).Pointer; + break :t Error![]align(Slice.alignment) Slice.child; + } { + const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment; + return self.alignedRealloc2(old_mem, old_alignment, new_n, .atLeast); } /// This is the same as `realloc`, except caller may additionally request @@ -201,11 +292,24 @@ pub const Allocator = struct { old_mem: var, comptime new_alignment: u29, new_n: usize, + ) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child { + return self.alignedRealloc2(old_mem, new_alignment, new_n, .exact); + } + + /// This is the same as `realloc`, except caller may additionally request + /// a new alignment, which can be larger, smaller, or the same as the old + /// allocation. + pub fn alignedRealloc2( + self: *Allocator, + old_mem: var, + comptime new_alignment: u29, + new_n: usize, + exact: Exact, ) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child { const Slice = @typeInfo(@TypeOf(old_mem)).Pointer; const T = Slice.child; if (old_mem.len == 0) { - return self.alignedAlloc(T, new_alignment, new_n); + return self.alignedAlloc2(T, new_alignment, new_n, exact); } if (new_n == 0) { self.free(old_mem); @@ -215,12 +319,9 @@ pub const Allocator = struct { const old_byte_slice = mem.sliceAsBytes(old_mem); const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory; // Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure - const byte_slice = try self.reallocFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment); - assert(byte_slice.len == byte_count); - if (new_n > old_mem.len) { - @memset(byte_slice.ptr + old_byte_slice.len, undefined, byte_slice.len - old_byte_slice.len); - } - return mem.bytesAsSlice(T, @alignCast(new_alignment, byte_slice)); + const new_byte_slice = try self.reallocBytes(old_byte_slice, Slice.alignment, byte_count, new_alignment, + if (exact == .exact) @as(u29, 0) else @sizeOf(T)); + return mem.bytesAsSlice(T, @alignCast(new_alignment, new_byte_slice)); } /// Prefer calling realloc to shrink if you can tolerate failure, such as @@ -248,12 +349,9 @@ pub const Allocator = struct { const Slice = @typeInfo(@TypeOf(old_mem)).Pointer; const T = Slice.child; - if (new_n == 0) { - self.free(old_mem); - return old_mem[0..0]; - } - - assert(new_n <= old_mem.len); + if (new_n == old_mem.len) + return old_mem; + assert(new_n < old_mem.len); assert(new_alignment <= Slice.alignment); // Here we skip the overflow checking on the multiplication because @@ -262,9 +360,8 @@ pub const Allocator = struct { const old_byte_slice = mem.sliceAsBytes(old_mem); @memset(old_byte_slice.ptr + byte_count, undefined, old_byte_slice.len - byte_count); - const byte_slice = self.shrinkFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment); - assert(byte_slice.len == byte_count); - return mem.bytesAsSlice(T, @alignCast(new_alignment, byte_slice)); + _ = self.shrinkBytes(old_byte_slice, byte_count, 0); + return old_mem[0..new_n]; } /// Free an array allocated with `alloc`. To free a single item, @@ -276,8 +373,7 @@ pub const Allocator = struct { if (bytes_len == 0) return; const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr)); @memset(non_const_ptr, undefined, bytes_len); - const shrink_result = self.shrinkFn(self, non_const_ptr[0..bytes_len], Slice.alignment, 0, 1); - assert(shrink_result.len == 0); + _ = self.shrinkBytes(non_const_ptr[0..bytes_len], 0, 0); } /// Copies `m` to newly allocated memory. Caller owns the memory. @@ -296,16 +392,107 @@ pub const Allocator = struct { } }; +/// Given a pointer to an allocator, return the *Allocator for it. `allocatorStatePtr` can +/// either be a `*Allocator`, in which case it is returned as-is, otherwise, the address of +/// the `allocator` field is returned. +pub fn getAllocatorPtr(allocatorStatePtr: var) *Allocator { + // allocator must be a pointer or else this function will return a copy of the allocator which + // is not what this is for + const T = @TypeOf(allocatorStatePtr); + switch (@typeInfo(T)) { + .Pointer => {}, + else => @compileError("getAllocatorPtr expects a pointer to an allocator but got: " ++ @typeName(T)), + } + if (T == *Allocator) + return allocatorStatePtr; + return &allocatorStatePtr.allocator; +} + +/// Detects and asserts if the std.mem.Allocator interface is violated +pub fn SanityAllocator(comptime T: type) type { return struct { + const Self = @This(); + allocator: Allocator, + underlying_allocator: T, + pub fn init(allocator: T) @This() { + return .{ + .allocator = .{ + .allocFn = alloc, + .resizeFn = resize, + }, + .underlying_allocator = allocator, + }; + } + fn getUnderlyingAllocatorPtr(self: *@This()) *Allocator { + if (T == *Allocator) return self.underlying_allocator; + return getAllocatorPtr(&self.underlying_allocator); + } + pub fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) Allocator.Error![]u8 { + assert(n > 0); + assert(mem.isValidAlign(ptr_align)); + if (len_align != 0) { + assert(mem.isAlignedAnyAlign(n, len_align)); + assert(n >= len_align); + } + + const self = @fieldParentPtr(@This(), "allocator", allocator); + const result = try self.getUnderlyingAllocatorPtr().callAllocFn(n, ptr_align, len_align); + if (len_align == 0) { + assert(result.len == n); + } else { + assert(result.len >= n); + assert(mem.isAlignedAnyAlign(result.len, len_align)); + } + return result; + } + pub fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) Allocator.Error!usize { + assert(buf.len > 0); + if (len_align != 0) { + assert(mem.isAlignedAnyAlign(new_len, len_align)); + assert(new_len >= len_align); + } + const self = @fieldParentPtr(@This(), "allocator", allocator); + const result = try self.getUnderlyingAllocatorPtr().callResizeFn(buf, new_len, len_align); + if (len_align == 0) { + assert(result == new_len); + } else { + assert(result >= new_len); + assert(mem.isAlignedAnyAlign(result, len_align)); + } + return result; + } + pub usingnamespace if (T == *Allocator or !@hasDecl(T, "reset")) struct {} else struct { + pub fn reset(self: *Self) void { + self.underlying_allocator.reset(); + } + }; +};} + +pub fn sanityWrap(allocator: var) SanityAllocator(@TypeOf(allocator)) { + return SanityAllocator(@TypeOf(allocator)).init(allocator); +} + +/// An allocator helper function. Adjusts an allocation length satisfy `len_align`. +/// `full_len` should be the full capacity of the allocation which may be greater +/// than the `len` that was requsted. This function should only be used by allocators +/// that are unaffected by `len_align`. +pub fn alignAllocLen(full_len: usize, alloc_len: usize, len_align: u29) usize { + assert(alloc_len > 0); + assert(alloc_len >= len_align); + assert(full_len >= alloc_len); + if (len_align == 0) + return alloc_len; + const adjusted = alignBackwardAnyAlign(full_len, len_align); + assert(adjusted >= alloc_len); + return adjusted; +} + var failAllocator = Allocator{ - .reallocFn = failAllocatorRealloc, - .shrinkFn = failAllocatorShrink, + .allocFn = failAllocatorAlloc, + .resizeFn = Allocator.noResize, }; -fn failAllocatorRealloc(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { +fn failAllocatorAlloc(self: *Allocator, n: usize, alignment: u29, len_align: u29) Allocator.Error![]u8 { return error.OutOfMemory; } -fn failAllocatorShrink(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { - @panic("failAllocatorShrink should never be called because it cannot allocate"); -} test "mem.Allocator basics" { testing.expectError(error.OutOfMemory, failAllocator.alloc(u8, 1)); @@ -2190,6 +2377,13 @@ test "alignForward" { testing.expect(alignForward(17, 8) == 24); } +pub fn alignBackwardAnyAlign(i: usize, alignment: usize) usize { + if (@popCount(usize, alignment) == 1) + return alignBackward(i, alignment); + assert(alignment != 0); + return i - @mod(i, alignment); +} + /// Round an address up to the previous aligned address /// The alignment must be a power of 2 and greater than 0. pub fn alignBackward(addr: usize, alignment: usize) usize { @@ -2206,6 +2400,19 @@ pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T { return addr & ~(alignment - 1); } +/// Returns whether `alignment` is a valid alignment, meaning it is +/// a positive power of 2. +pub fn isValidAlign(alignment: u29) bool { + return @popCount(u29, alignment) == 1; +} + +pub fn isAlignedAnyAlign(i: usize, alignment: usize) bool { + if (@popCount(usize, alignment) == 1) + return isAligned(i, alignment); + assert(alignment != 0); + return 0 == @mod(i, alignment); +} + /// Given an address and an alignment, return true if the address is a multiple of the alignment /// The alignment must be a power of 2 and greater than 0. pub fn isAligned(addr: usize, alignment: usize) bool { diff --git a/lib/std/os/windows/bits.zig b/lib/std/os/windows/bits.zig index 191e8deded..a6a9e50d53 100644 --- a/lib/std/os/windows/bits.zig +++ b/lib/std/os/windows/bits.zig @@ -593,6 +593,7 @@ pub const FILE_CURRENT = 1; pub const FILE_END = 2; pub const HEAP_CREATE_ENABLE_EXECUTE = 0x00040000; +pub const HEAP_REALLOC_IN_PLACE_ONLY = 0x00000010; pub const HEAP_GENERATE_EXCEPTIONS = 0x00000004; pub const HEAP_NO_SERIALIZE = 0x00000001; diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 117a788e16..0f811cada8 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -11,7 +11,7 @@ pub var allocator_instance = LeakCountAllocator.init(&base_allocator_instance.al pub const failing_allocator = &failing_allocator_instance.allocator; pub var failing_allocator_instance = FailingAllocator.init(&base_allocator_instance.allocator, 0); -pub var base_allocator_instance = std.heap.ThreadSafeFixedBufferAllocator.init(allocator_mem[0..]); +pub var base_allocator_instance = std.mem.sanityWrap(std.heap.ThreadSafeFixedBufferAllocator.init(allocator_mem[0..])); var allocator_mem: [2 * 1024 * 1024]u8 = undefined; /// This function is intended to be used only in tests. It prints diagnostics to stderr diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig index 081a29cd97..ade3e9d85a 100644 --- a/lib/std/testing/failing_allocator.zig +++ b/lib/std/testing/failing_allocator.zig @@ -39,43 +39,38 @@ pub const FailingAllocator = struct { .allocations = 0, .deallocations = 0, .allocator = mem.Allocator{ - .reallocFn = realloc, - .shrinkFn = shrink, + .allocFn = alloc, + .resizeFn = resize, }, }; } - fn realloc(allocator: *mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { + fn alloc(allocator: *std.mem.Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 { const self = @fieldParentPtr(FailingAllocator, "allocator", allocator); if (self.index == self.fail_index) { return error.OutOfMemory; } - const result = try self.internal_allocator.reallocFn( - self.internal_allocator, - old_mem, - old_align, - new_size, - new_align, - ); - if (new_size < old_mem.len) { - self.freed_bytes += old_mem.len - new_size; - if (new_size == 0) - self.deallocations += 1; - } else if (new_size > old_mem.len) { - self.allocated_bytes += new_size - old_mem.len; - if (old_mem.len == 0) - self.allocations += 1; - } + const result = try self.internal_allocator.callAllocFn(len, ptr_align, len_align); + self.allocated_bytes += result.len; + self.allocations += 1; self.index += 1; return result; } - fn shrink(allocator: *mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { + fn resize(allocator: *std.mem.Allocator, old_mem: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!usize { const self = @fieldParentPtr(FailingAllocator, "allocator", allocator); - const r = self.internal_allocator.shrinkFn(self.internal_allocator, old_mem, old_align, new_size, new_align); - self.freed_bytes += old_mem.len - r.len; - if (new_size == 0) + const r = self.internal_allocator.callResizeFn(old_mem, new_len, len_align) catch |e| { + std.debug.assert(new_len > old_mem.len); + return e; + }; + if (new_len == 0) { self.deallocations += 1; + self.freed_bytes += old_mem.len; + } else if (r < old_mem.len) { + self.freed_bytes += old_mem.len - r; + } else { + self.allocated_bytes += r - old_mem.len; + } return r; } }; diff --git a/lib/std/testing/leak_count_allocator.zig b/lib/std/testing/leak_count_allocator.zig index 65244e529b..87564aeea7 100644 --- a/lib/std/testing/leak_count_allocator.zig +++ b/lib/std/testing/leak_count_allocator.zig @@ -14,23 +14,21 @@ pub const LeakCountAllocator = struct { return .{ .count = 0, .allocator = .{ - .reallocFn = realloc, - .shrinkFn = shrink, + .allocFn = alloc, + .resizeFn = resize, }, .internal_allocator = allocator, }; } - fn realloc(allocator: *std.mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { + fn alloc(allocator: *std.mem.Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 { const self = @fieldParentPtr(LeakCountAllocator, "allocator", allocator); - var data = try self.internal_allocator.reallocFn(self.internal_allocator, old_mem, old_align, new_size, new_align); - if (old_mem.len == 0) { - self.count += 1; - } - return data; + const ptr = try self.internal_allocator.callAllocFn(len, ptr_align, len_align); + self.count += 1; + return ptr; } - fn shrink(allocator: *std.mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { + fn resize(allocator: *std.mem.Allocator, old_mem: []u8, new_size: usize, len_align: u29) error{OutOfMemory}!usize { const self = @fieldParentPtr(LeakCountAllocator, "allocator", allocator); if (new_size == 0) { if (self.count == 0) { @@ -38,7 +36,10 @@ pub const LeakCountAllocator = struct { } self.count -= 1; } - return self.internal_allocator.shrinkFn(self.internal_allocator, old_mem, old_align, new_size, new_align); + return self.internal_allocator.callResizeFn(old_mem, new_size, len_align) catch |e| { + std.debug.assert(new_size > old_mem.len); + return e; + }; } pub fn validate(self: LeakCountAllocator) !void { -- cgit v1.2.3 From a728436992415d1bce44b0c63938f6443a4e9a11 Mon Sep 17 00:00:00 2001 From: Jonathan Marler Date: Fri, 26 Jun 2020 19:29:06 -0600 Subject: new allocator interface after Andrew Kelley review --- lib/std/array_list.zig | 2 -- lib/std/heap.zig | 42 ++++++++++++-------------- lib/std/heap/arena_allocator.zig | 2 +- lib/std/heap/logging_allocator.zig | 2 +- lib/std/mem.zig | 62 +++++++++++++++++--------------------- lib/std/testing.zig | 2 +- 6 files changed, 51 insertions(+), 61 deletions(-) (limited to 'lib/std/array_list.zig') diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index 896e5bf9dc..b57d051d2b 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -220,7 +220,6 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { } const new_memory = try self.allocator.reallocAtLeast(self.allocatedSlice(), better_capacity); - assert(new_memory.len >= better_capacity); self.items.ptr = new_memory.ptr; self.capacity = new_memory.len; } @@ -443,7 +442,6 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ } const new_memory = try allocator.reallocAtLeast(self.allocatedSlice(), better_capacity); - assert(new_memory.len >= better_capacity); self.items.ptr = new_memory.ptr; self.capacity = new_memory.len; } diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 322c24934e..70e0e9d538 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -25,7 +25,7 @@ usingnamespace if (comptime @hasDecl(c, "malloc_size")) struct { pub const supports_malloc_size = false; }; -pub const c_allocator = mem.getAllocatorPtr(&c_allocator_state); +pub const c_allocator = &c_allocator_state; var c_allocator_state = Allocator{ .allocFn = cAlloc, .resizeFn = cResize, @@ -38,7 +38,7 @@ fn cAlloc(self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Allocato return ptr[0..len]; } const full_len = init: { - if (comptime supports_malloc_size) { + if (supports_malloc_size) { const s = malloc_size(ptr); assert(s >= len); break :init s; @@ -56,24 +56,23 @@ fn cResize(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Allocato if (new_len <= buf.len) { return mem.alignAllocLen(buf.len, new_len, len_align); } - if (comptime supports_malloc_size) { + if (supports_malloc_size) { const full_len = malloc_size(buf.ptr); if (new_len <= full_len) { return mem.alignAllocLen(full_len, new_len, len_align); } } - // TODO: could we still use realloc? are there any cases where we can guarantee that realloc won't move memory? return error.OutOfMemory; } /// This allocator makes a syscall directly for every allocation and free. /// Thread-safe and lock-free. pub const page_allocator = if (std.Target.current.isWasm()) - mem.getAllocatorPtr(&wasm_page_allocator_state) + &wasm_page_allocator_state else if (std.Target.current.os.tag == .freestanding) root.os.heap.page_allocator else - mem.getAllocatorPtr(&page_allocator_state); + &page_allocator_state; var page_allocator_state = Allocator{ .allocFn = PageAllocator.alloc, @@ -507,9 +506,9 @@ pub const FixedBufferAllocator = struct { return sliceContainsSlice(self.buffer, slice); } - // NOTE: this will not work in all cases, if the last allocation had an adjusted_index - // then we won't be able to determine what the last allocation was. This is because - // the alignForward operation done in alloc is not reverisible. + /// NOTE: this will not work in all cases, if the last allocation had an adjusted_index + /// then we won't be able to determine what the last allocation was. This is because + /// the alignForward operation done in alloc is not reverisible. pub fn isLastAllocation(self: *FixedBufferAllocator, buf: []u8) bool { return buf.ptr + buf.len == self.buffer.ptr + self.end_index; } @@ -525,7 +524,7 @@ pub const FixedBufferAllocator = struct { const result = self.buffer[adjusted_index..new_end_index]; self.end_index = new_end_index; - return result[0..mem.alignAllocLen(result.len, n, len_align)]; + return result; } fn resize(allocator: *Allocator, buf: []u8, new_size: usize, len_align: u29) Allocator.Error!usize { @@ -544,13 +543,12 @@ pub const FixedBufferAllocator = struct { return if (new_size == 0) 0 else mem.alignAllocLen(buf.len - sub, new_size, len_align); } - var add = new_size - buf.len; + const add = new_size - buf.len; if (add + self.end_index > self.buffer.len) { - //add = self.buffer.len - self.end_index; return error.OutOfMemory; } self.end_index += add; - return mem.alignAllocLen(buf.len + add, new_size, len_align); + return new_size; } pub fn reset(self: *FixedBufferAllocator) void { @@ -735,7 +733,7 @@ test "ArenaAllocator" { var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined; test "FixedBufferAllocator" { - var fixed_buffer_allocator = mem.sanityWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..])); + var fixed_buffer_allocator = mem.validationWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..])); try testAllocator(&fixed_buffer_allocator.allocator); try testAllocatorAligned(&fixed_buffer_allocator.allocator, 16); @@ -802,8 +800,8 @@ test "ThreadSafeFixedBufferAllocator" { } fn testAllocator(base_allocator: *mem.Allocator) !void { - var sanityAllocator = mem.sanityWrap(base_allocator); - const allocator = &sanityAllocator.allocator; + var validationAllocator = mem.validationWrap(base_allocator); + const allocator = &validationAllocator.allocator; var slice = try allocator.alloc(*i32, 100); testing.expect(slice.len == 100); @@ -833,8 +831,8 @@ fn testAllocator(base_allocator: *mem.Allocator) !void { } fn testAllocatorAligned(base_allocator: *mem.Allocator, comptime alignment: u29) !void { - var sanityAllocator = mem.sanityWrap(base_allocator); - const allocator = &sanityAllocator.allocator; + var validationAllocator = mem.validationWrap(base_allocator); + const allocator = &validationAllocator.allocator; // initial var slice = try allocator.alignedAlloc(u8, alignment, 10); @@ -860,8 +858,8 @@ fn testAllocatorAligned(base_allocator: *mem.Allocator, comptime alignment: u29) } fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) mem.Allocator.Error!void { - var sanityAllocator = mem.sanityWrap(base_allocator); - const allocator = &sanityAllocator.allocator; + var validationAllocator = mem.validationWrap(base_allocator); + const allocator = &validationAllocator.allocator; //Maybe a platform's page_size is actually the same as or // very near usize? @@ -892,8 +890,8 @@ fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) mem.Allocator.Err } fn testAllocatorAlignedShrink(base_allocator: *mem.Allocator) mem.Allocator.Error!void { - var sanityAllocator = mem.sanityWrap(base_allocator); - const allocator = &sanityAllocator.allocator; + var validationAllocator = mem.validationWrap(base_allocator); + const allocator = &validationAllocator.allocator; var debug_buffer: [1000]u8 = undefined; const debug_allocator = &FixedBufferAllocator.init(&debug_buffer).allocator; diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig index ee3bfc19d4..74ced774ed 100644 --- a/lib/std/heap/arena_allocator.zig +++ b/lib/std/heap/arena_allocator.zig @@ -77,7 +77,7 @@ pub const ArenaAllocator = struct { } const result = cur_buf[adjusted_index..new_end_index]; self.state.end_index = new_end_index; - return result[0..mem.alignAllocLen(result.len, n, len_align)]; + return result; } } }; diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig index e465795b8f..b521515a79 100644 --- a/lib/std/heap/logging_allocator.zig +++ b/lib/std/heap/logging_allocator.zig @@ -70,7 +70,7 @@ test "LoggingAllocator" { var fbs = std.io.fixedBufferStream(&log_buf); var allocator_buf: [10]u8 = undefined; - var fixedBufferAllocator = std.mem.sanityWrap(std.heap.FixedBufferAllocator.init(&allocator_buf)); + var fixedBufferAllocator = std.mem.validationWrap(std.heap.FixedBufferAllocator.init(&allocator_buf)); const allocator = &loggingAllocator(&fixedBufferAllocator.allocator, fbs.outStream()).allocator; var a = try allocator.alloc(u8, 10); diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 6a7e846f62..bf1e000056 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -141,6 +141,9 @@ pub const Allocator = struct { const new_mem = try self.callAllocFn(new_len, new_alignment, len_align); @memcpy(new_mem.ptr, old_mem.ptr, std.math.min(new_len, old_mem.len)); // DISABLED TO AVOID BUGS IN TRANSLATE C + // use './zig build test-translate-c' to reproduce, some of the symbols in the + // generated C code will be a sequence of 0xaa (the undefined value), meaning + // it is printing data that has been freed //@memset(old_mem.ptr, undefined, old_mem.len); _ = self.shrinkBytes(old_mem, 0, 0); return new_mem; @@ -214,6 +217,7 @@ pub const Allocator = struct { return self.allocWithOptions(Elem, n, null, sentinel); } + /// Deprecated: use `allocAdvanced` pub fn alignedAlloc( self: *Allocator, comptime T: type, @@ -221,11 +225,11 @@ pub const Allocator = struct { comptime alignment: ?u29, n: usize, ) Error![]align(alignment orelse @alignOf(T)) T { - return self.alignedAlloc2(T, alignment, n, .exact); + return self.allocAdvanced(T, alignment, n, .exact); } - const Exact = enum {exact,atLeast}; - pub fn alignedAlloc2( + const Exact = enum {exact,at_least}; + pub fn allocAdvanced( self: *Allocator, comptime T: type, /// null means naturally aligned @@ -234,7 +238,7 @@ pub const Allocator = struct { exact: Exact, ) Error![]align(alignment orelse @alignOf(T)) T { const a = if (alignment) |a| blk: { - if (a == @alignOf(T)) return alignedAlloc2(self, T, null, n, exact); + if (a == @alignOf(T)) return allocAdvanced(self, T, null, n, exact); break :blk a; } else @alignOf(T); @@ -248,7 +252,10 @@ pub const Allocator = struct { // functions that heap-allocate their own frame with @Frame(func). const sizeOfT = if (alignment == null) @intCast(u29, @divExact(byte_count, n)) else @sizeOf(T); const byte_slice = try self.callAllocFn(byte_count, a, if (exact == .exact) @as(u29, 0) else sizeOfT); - assert(if (exact == .exact) byte_slice.len == byte_count else byte_slice.len >= byte_count); + switch (exact) { + .exact => assert(byte_slice.len == byte_count), + .at_least => assert(byte_slice.len >= byte_count), + } @memset(byte_slice.ptr, undefined, byte_slice.len); if (alignment == null) { // This if block is a workaround (see comment above) @@ -273,7 +280,7 @@ pub const Allocator = struct { break :t Error![]align(Slice.alignment) Slice.child; } { const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment; - return self.alignedRealloc2(old_mem, old_alignment, new_n, .exact); + return self.reallocAdvanced(old_mem, old_alignment, new_n, .exact); } pub fn reallocAtLeast(self: *Allocator, old_mem: var, new_n: usize) t: { @@ -281,25 +288,23 @@ pub const Allocator = struct { break :t Error![]align(Slice.alignment) Slice.child; } { const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment; - return self.alignedRealloc2(old_mem, old_alignment, new_n, .atLeast); + return self.reallocAdvanced(old_mem, old_alignment, new_n, .at_least); } - /// This is the same as `realloc`, except caller may additionally request - /// a new alignment, which can be larger, smaller, or the same as the old - /// allocation. + // Deprecated: use `reallocAdvanced` pub fn alignedRealloc( self: *Allocator, old_mem: var, comptime new_alignment: u29, new_n: usize, ) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child { - return self.alignedRealloc2(old_mem, new_alignment, new_n, .exact); + return self.reallocAdvanced(old_mem, new_alignment, new_n, .exact); } /// This is the same as `realloc`, except caller may additionally request /// a new alignment, which can be larger, smaller, or the same as the old /// allocation. - pub fn alignedRealloc2( + pub fn reallocAdvanced( self: *Allocator, old_mem: var, comptime new_alignment: u29, @@ -309,7 +314,7 @@ pub const Allocator = struct { const Slice = @typeInfo(@TypeOf(old_mem)).Pointer; const T = Slice.child; if (old_mem.len == 0) { - return self.alignedAlloc2(T, new_alignment, new_n, exact); + return self.allocAdvanced(T, new_alignment, new_n, exact); } if (new_n == 0) { self.free(old_mem); @@ -392,24 +397,9 @@ pub const Allocator = struct { } }; -/// Given a pointer to an allocator, return the *Allocator for it. `allocatorStatePtr` can -/// either be a `*Allocator`, in which case it is returned as-is, otherwise, the address of -/// the `allocator` field is returned. -pub fn getAllocatorPtr(allocatorStatePtr: var) *Allocator { - // allocator must be a pointer or else this function will return a copy of the allocator which - // is not what this is for - const T = @TypeOf(allocatorStatePtr); - switch (@typeInfo(T)) { - .Pointer => {}, - else => @compileError("getAllocatorPtr expects a pointer to an allocator but got: " ++ @typeName(T)), - } - if (T == *Allocator) - return allocatorStatePtr; - return &allocatorStatePtr.allocator; -} - -/// Detects and asserts if the std.mem.Allocator interface is violated -pub fn SanityAllocator(comptime T: type) type { return struct { +/// Detects and asserts if the std.mem.Allocator interface is violated by the caller +/// or the allocator. +pub fn ValidationAllocator(comptime T: type) type { return struct { const Self = @This(); allocator: Allocator, underlying_allocator: T, @@ -424,7 +414,8 @@ pub fn SanityAllocator(comptime T: type) type { return struct { } fn getUnderlyingAllocatorPtr(self: *@This()) *Allocator { if (T == *Allocator) return self.underlying_allocator; - return getAllocatorPtr(&self.underlying_allocator); + if (*T == *Allocator) return &self.underlying_allocator; + return &self.underlying_allocator.allocator; } pub fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) Allocator.Error![]u8 { assert(n > 0); @@ -436,6 +427,7 @@ pub fn SanityAllocator(comptime T: type) type { return struct { const self = @fieldParentPtr(@This(), "allocator", allocator); const result = try self.getUnderlyingAllocatorPtr().callAllocFn(n, ptr_align, len_align); + assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align)); if (len_align == 0) { assert(result.len == n); } else { @@ -467,8 +459,8 @@ pub fn SanityAllocator(comptime T: type) type { return struct { }; };} -pub fn sanityWrap(allocator: var) SanityAllocator(@TypeOf(allocator)) { - return SanityAllocator(@TypeOf(allocator)).init(allocator); +pub fn validationWrap(allocator: var) ValidationAllocator(@TypeOf(allocator)) { + return ValidationAllocator(@TypeOf(allocator)).init(allocator); } /// An allocator helper function. Adjusts an allocation length satisfy `len_align`. @@ -2377,6 +2369,8 @@ test "alignForward" { testing.expect(alignForward(17, 8) == 24); } +/// Round an address up to the previous aligned address +/// Unlike `alignBackward`, `alignment` can be any positive number, not just a power of 2. pub fn alignBackwardAnyAlign(i: usize, alignment: usize) usize { if (@popCount(usize, alignment) == 1) return alignBackward(i, alignment); diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 0f811cada8..670d8fd5d6 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -11,7 +11,7 @@ pub var allocator_instance = LeakCountAllocator.init(&base_allocator_instance.al pub const failing_allocator = &failing_allocator_instance.allocator; pub var failing_allocator_instance = FailingAllocator.init(&base_allocator_instance.allocator, 0); -pub var base_allocator_instance = std.mem.sanityWrap(std.heap.ThreadSafeFixedBufferAllocator.init(allocator_mem[0..])); +pub var base_allocator_instance = std.mem.validationWrap(std.heap.ThreadSafeFixedBufferAllocator.init(allocator_mem[0..])); var allocator_mem: [2 * 1024 * 1024]u8 = undefined; /// This function is intended to be used only in tests. It prints diagnostics to stderr -- cgit v1.2.3 From b3b6ccba50ef7a683ad05546cba2b71e7d10489f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 3 Jul 2020 23:57:24 +0000 Subject: reimplement std.HashMap * breaking changes to the API. Some of the weird decisions from before are changed to what would be more expected. - `get` returns `?V`, use `getEntry` for the old API. - `put` returns `!void`, use `fetchPut` for the old API. * HashMap now has a comptime parameter of whether to store hashes with entries. AutoHashMap has heuristics on whether to set this parameter. For example, for integers, it is false, since equality checking is cheap, but for strings, it is true, since equality checking is probably expensive. * The implementation has a separate array for entry_index / distance_from_start_index. Entries no longer has holes; it is an ArrayList, and iteration is simpler and more cache coherent. This is inspired by Python's new dictionaries. * HashMap is separated into an "unmanaged" and a "managed" API. The unmanaged API is where the actual implementation is; the managed API wraps it and provides a more convenient API, storing the allocator. * Memory usage: When there are less than or equal to 8 entries, HashMap now incurs only a single pointer-size integer as overhead, opposed to using an ArrayList. * Since the entries array is separate from the indexes array, the holes in the indexes array take up less room than the holes in the entries array otherwise would. However the entries array also allocates additional capacity for appending into the array. * HashMap now maintains insertion order. Deletion performs a "swap remove". It's now possible to modify the HashMap while iterating. --- lib/std/array_list.zig | 16 + lib/std/debug.zig | 2 +- lib/std/hash_map.zig | 1058 ++++++++++++++++++++++++++++++++++-------------- 3 files changed, 766 insertions(+), 310 deletions(-) (limited to 'lib/std/array_list.zig') diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index b57d051d2b..a68c1fa9d6 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -210,6 +210,14 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { self.capacity = new_len; } + /// Reduce length to `new_len`. + /// Invalidates element pointers. + /// Keeps capacity the same. + pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void { + assert(new_len <= self.items.len); + self.items.len = new_len; + } + pub fn ensureCapacity(self: *Self, new_capacity: usize) !void { var better_capacity = self.capacity; if (better_capacity >= new_capacity) return; @@ -432,6 +440,14 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ self.capacity = new_len; } + /// Reduce length to `new_len`. + /// Invalidates element pointers. + /// Keeps capacity the same. + pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void { + assert(new_len <= self.items.len); + self.items.len = new_len; + } + pub fn ensureCapacity(self: *Self, allocator: *Allocator, new_capacity: usize) !void { var better_capacity = self.capacity; if (better_capacity >= new_capacity) return; diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 92b79be35c..e9bafec94c 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -1278,7 +1278,7 @@ pub const DebugInfo = struct { else => return error.MissingDebugInfo, } - if (self.address_map.getValue(ctx.base_address)) |obj_di| { + if (self.address_map.get(ctx.base_address)) |obj_di| { return obj_di; } diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index bcd4280153..4b91a83ba2 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -9,17 +9,15 @@ const autoHash = std.hash.autoHash; const Wyhash = std.hash.Wyhash; const Allocator = mem.Allocator; const builtin = @import("builtin"); - -const want_modification_safety = std.debug.runtime_safety; -const debug_u32 = if (want_modification_safety) u32 else void; +const hash_map = @This(); pub fn AutoHashMap(comptime K: type, comptime V: type) type { - return HashMap(K, V, getAutoHashFn(K), getAutoEqlFn(K)); + return HashMap(K, V, getAutoHashFn(K), getAutoEqlFn(K), autoEqlIsCheap(K)); } /// Builtin hashmap for strings as keys. pub fn StringHashMap(comptime V: type) type { - return HashMap([]const u8, V, hashString, eqlString); + return HashMap([]const u8, V, hashString, eqlString, true); } pub fn eqlString(a: []const u8, b: []const u8) bool { @@ -30,422 +28,846 @@ pub fn hashString(s: []const u8) u32 { return @truncate(u32, std.hash.Wyhash.hash(0, s)); } -pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u32, comptime eql: fn (a: K, b: K) bool) type { +/// Insertion order is preserved. +/// Deletions perform a "swap removal" on the entries list. +/// Modifying the hash map while iterating is allowed, however one must understand +/// the (well defined) behavior when mixing insertions and deletions with iteration. +/// For a hash map that can be initialized directly that does not store an Allocator +/// field, see `HashMapUnmanaged`. +/// When `store_hash` is `false`, this data structure is biased towards cheap `eql` +/// functions. It does not store each item's hash in the table. Setting `store_hash` +/// to `true` incurs slightly more memory cost by storing each key's hash in the table +/// but only has to call `eql` for hash collisions. +pub fn HashMap( + comptime K: type, + comptime V: type, + comptime hash: fn (key: K) u32, + comptime eql: fn (a: K, b: K) bool, + comptime store_hash: bool, +) type { return struct { - entries: []Entry, - size: usize, - max_distance_from_start_index: usize, + unmanaged: Unmanaged, allocator: *Allocator, - /// This is used to detect bugs where a hashtable is edited while an iterator is running. - modification_count: debug_u32, - - const Self = @This(); - - /// A *KV is a mutable pointer into this HashMap's internal storage. - /// Modifying the key is undefined behavior. - /// Modifying the value is harmless. - /// *KV pointers become invalid whenever this HashMap is modified, - /// and then any access to the *KV is undefined behavior. - pub const KV = struct { - key: K, - value: V, - }; - - const Entry = struct { - used: bool, - distance_from_start_index: usize, - kv: KV, - }; - - pub const GetOrPutResult = struct { - kv: *KV, - found_existing: bool, - }; + pub const Unmanaged = HashMapUnmanaged(K, V, hash, eql, store_hash); + pub const Entry = Unmanaged.Entry; + pub const Hash = Unmanaged.Hash; + pub const GetOrPutResult = Unmanaged.GetOrPutResult; + /// Deprecated. Iterate using `items`. pub const Iterator = struct { hm: *const Self, - // how many items have we returned - count: usize, - // iterator through the entry array + /// Iterator through the entry array. index: usize, - // used to detect concurrent modification - initial_modification_count: debug_u32, - pub fn next(it: *Iterator) ?*KV { - if (want_modification_safety) { - assert(it.initial_modification_count == it.hm.modification_count); // concurrent modification - } - if (it.count >= it.hm.size) return null; - while (it.index < it.hm.entries.len) : (it.index += 1) { - const entry = &it.hm.entries[it.index]; - if (entry.used) { - it.index += 1; - it.count += 1; - return &entry.kv; - } - } - unreachable; // no next item + pub fn next(it: *Iterator) ?*Entry { + if (it.index >= it.hm.unmanaged.entries.items.len) return null; + const result = &it.hm.unmanaged.entries.items[it.index]; + it.index += 1; + return result; } - // Reset the iterator to the initial index + /// Reset the iterator to the initial index pub fn reset(it: *Iterator) void { - it.count = 0; it.index = 0; - // Resetting the modification count too - it.initial_modification_count = it.hm.modification_count; } }; + const Self = @This(); + const Index = Unmanaged.Index; + pub fn init(allocator: *Allocator) Self { - return Self{ - .entries = &[_]Entry{}, + return .{ + .unmanaged = .{}, .allocator = allocator, - .size = 0, - .max_distance_from_start_index = 0, - .modification_count = if (want_modification_safety) 0 else {}, }; } - pub fn deinit(hm: Self) void { - hm.allocator.free(hm.entries); + pub fn deinit(self: *Self) void { + self.unmanaged.deinit(self.allocator); + self.* = undefined; } - pub fn clear(hm: *Self) void { - for (hm.entries) |*entry| { - entry.used = false; - } - hm.size = 0; - hm.max_distance_from_start_index = 0; - hm.incrementModificationCount(); + pub fn clearRetainingCapacity(self: *Self) void { + return self.unmanaged.clearRetainingCapacity(); } + pub fn clearAndFree(self: *Self, allocator: *Allocator) void { + return self.unmanaged.clearAndFree(self.allocator); + } + + /// Deprecated. Use `items().len`. pub fn count(self: Self) usize { - return self.size; + return self.items().len; + } + + /// Deprecated. Iterate using `items`. + pub fn iterator(self: *const Self) Iterator { + return Iterator{ + .hm = self, + .index = 0, + }; } /// If key exists this function cannot fail. /// If there is an existing item with `key`, then the result - /// kv pointer points to it, and found_existing is true. + /// `Entry` pointer points to it, and found_existing is true. /// Otherwise, puts a new item with undefined value, and - /// the kv pointer points to it. Caller should then initialize - /// the data. + /// the `Entry` pointer points to it. Caller should then initialize + /// the value (but not the key). pub fn getOrPut(self: *Self, key: K) !GetOrPutResult { - // TODO this implementation can be improved - we should only - // have to hash once and find the entry once. - if (self.get(key)) |kv| { - return GetOrPutResult{ - .kv = kv, - .found_existing = true, - }; - } - self.incrementModificationCount(); - try self.autoCapacity(); - const put_result = self.internalPut(key); - assert(put_result.old_kv == null); - return GetOrPutResult{ - .kv = &put_result.new_entry.kv, - .found_existing = false, + return self.unmanaged.getOrPut(self.allocator, key); + } + + /// If there is an existing item with `key`, then the result + /// `Entry` pointer points to it, and found_existing is true. + /// Otherwise, puts a new item with undefined value, and + /// the `Entry` pointer points to it. Caller should then initialize + /// the value (but not the key). + /// If a new entry needs to be stored, this function asserts there + /// is enough capacity to store it. + pub fn getOrPutAssumeCapacity(self: *Self, key: K) GetOrPutResult { + return self.unmanaged.getOrPutAssumeCapacity(key); + } + + pub fn getOrPutValue(self: *Self, key: K, value: V) !*Entry { + return self.unmanaged.getOrPutValue(self.allocator, key, value); + } + + /// Increases capacity, guaranteeing that insertions up until the + /// `expected_count` will not cause an allocation, and therefore cannot fail. + pub fn ensureCapacity(self: *Self, new_capacity: usize) !void { + return self.unmanaged.ensureCapacity(self.allocator, new_capacity); + } + + /// Returns the number of total elements which may be present before it is + /// no longer guaranteed that no allocations will be performed. + pub fn capacity(self: *Self) usize { + return self.unmanaged.capacity(); + } + + /// Clobbers any existing data. To detect if a put would clobber + /// existing data, see `getOrPut`. + pub fn put(self: *Self, key: K, value: V) !void { + return self.unmanaged.put(self.allocator, key, value); + } + + /// Inserts a key-value pair into the hash map, asserting that no previous + /// entry with the same key is already present + pub fn putNoClobber(self: *Self, key: K, value: V) !void { + return self.unmanaged.putNoClobber(self.allocator, key, value); + } + + /// Asserts there is enough capacity to store the new key-value pair. + /// Clobbers any existing data. To detect if a put would clobber + /// existing data, see `getOrPutAssumeCapacity`. + pub fn putAssumeCapacity(self: *Self, key: K, value: V) void { + return self.unmanaged.putAssumeCapacity(key, value); + } + + /// Asserts there is enough capacity to store the new key-value pair. + /// Asserts that it does not clobber any existing data. + /// To detect if a put would clobber existing data, see `getOrPutAssumeCapacity`. + pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void { + return self.unmanaged.putAssumeCapacityNoClobber(key, value); + } + + /// Inserts a new `Entry` into the hash map, returning the previous one, if any. + pub fn fetchPut(self: *Self, key: K, value: V) !?Entry { + return self.unmanaged.fetchPut(self.allocator, key, value); + } + + /// Inserts a new `Entry` into the hash map, returning the previous one, if any. + /// If insertion happuns, asserts there is enough capacity without allocating. + pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?Entry { + return self.unmanaged.fetchPutAssumeCapacity(key, value); + } + + pub fn getEntry(self: Self, key: K) ?*Entry { + return self.unmanaged.getEntry(key); + } + + pub fn get(self: Self, key: K) ?V { + return self.unmanaged.get(key); + } + + pub fn contains(self: Self, key: K) bool { + return self.unmanaged.contains(key); + } + + /// If there is an `Entry` with a matching key, it is deleted from + /// the hash map, and then returned from this function. + pub fn remove(self: *Self, key: K) ?Entry { + return self.unmanaged.remove(key); + } + + /// Asserts there is an `Entry` with matching key, deletes it from the hash map, + /// and discards it. + pub fn removeAssertDiscard(self: *Self, key: K) void { + return self.unmanaged.removeAssertDiscard(key); + } + + pub fn items(self: Self) []Entry { + return self.unmanaged.items(); + } + + pub fn clone(self: Self) !Self { + var other = try self.unmanaged.clone(self.allocator); + return other.promote(self.allocator); + } + }; +} + +/// General purpose hash table. +/// Insertion order is preserved. +/// Deletions perform a "swap removal" on the entries list. +/// Modifying the hash map while iterating is allowed, however one must understand +/// the (well defined) behavior when mixing insertions and deletions with iteration. +/// This type does not store an Allocator field - the Allocator must be passed in +/// with each function call that requires it. See `HashMap` for a type that stores +/// an Allocator field for convenience. +/// Can be initialized directly using the default field values. +/// This type is designed to have low overhead for small numbers of entries. When +/// `store_hash` is `false` and the number of entries in the map is less than 9, +/// the overhead cost of using `HashMapUnmanaged` rather than `std.ArrayList` is +/// only a single pointer-sized integer. +/// When `store_hash` is `false`, this data structure is biased towards cheap `eql` +/// functions. It does not store each item's hash in the table. Setting `store_hash` +/// to `true` incurs slightly more memory cost by storing each key's hash in the table +/// but guarantees only one call to `eql` per insertion/deletion. +pub fn HashMapUnmanaged( + comptime K: type, + comptime V: type, + comptime hash: fn (key: K) u32, + comptime eql: fn (a: K, b: K) bool, + comptime store_hash: bool, +) type { + return struct { + /// It is permitted to access this field directly. + entries: std.ArrayListUnmanaged(Entry) = .{}, + + /// When entries length is less than `linear_scan_max`, this remains `null`. + /// Once entries length grows big enough, this field is allocated. There is + /// an IndexHeader followed by an array of Index(I) structs, where I is defined + /// by how many total indexes there are. + index_header: ?*IndexHeader = null, + + /// Modifying the key is illegal behavior. + /// Modifying the value is allowed. + /// Entry pointers become invalid whenever this HashMap is modified, + /// unless `ensureCapacity` was previously used. + pub const Entry = struct { + /// This field is `void` if `store_hash` is `false`. + hash: Hash, + key: K, + value: V, + }; + + pub const Hash = if (store_hash) u32 else void; + + pub const GetOrPutResult = struct { + entry: *Entry, + found_existing: bool, + }; + + pub const Managed = HashMap(K, V, hash, eql, store_hash); + + const Self = @This(); + + const linear_scan_max = 8; + + pub fn promote(self: Self, allocator: *Allocator) Managed { + return .{ + .unmanaged = self, + .allocator = allocator, }; } - pub fn getOrPutValue(self: *Self, key: K, value: V) !*KV { - const res = try self.getOrPut(key); - if (!res.found_existing) - res.kv.value = value; + pub fn deinit(self: *Self, allocator: *Allocator) void { + self.entries.deinit(allocator); + if (self.index_header) |header| { + header.free(allocator); + } + self.* = undefined; + } - return res.kv; + pub fn clearRetainingCapacity(self: *Self) void { + self.entries.items.len = 0; + if (self.header) |header| { + header.max_distance_from_start_index = 0; + const indexes = header.indexes(u8); + @memset(indexes.ptr, 0xff, indexes.len); + } } - fn optimizedCapacity(expected_count: usize) usize { - // ensure that the hash map will be at most 60% full if - // expected_count items are put into it - var optimized_capacity = expected_count * 5 / 3; - // an overflow here would mean the amount of memory required would not - // be representable in the address space - return math.ceilPowerOfTwo(usize, optimized_capacity) catch unreachable; + pub fn clearAndFree(self: *Self, allocator: *Allocator) void { + self.entries.shrink(allocator, 0); + if (self.header) |header| { + header.free(allocator); + self.header = null; + } } - /// Increases capacity so that the hash map will be at most - /// 60% full when expected_count items are put into it - pub fn ensureCapacity(self: *Self, expected_count: usize) !void { - if (expected_count == 0) return; - const optimized_capacity = optimizedCapacity(expected_count); - return self.ensureCapacityExact(optimized_capacity); + /// If key exists this function cannot fail. + /// If there is an existing item with `key`, then the result + /// `Entry` pointer points to it, and found_existing is true. + /// Otherwise, puts a new item with undefined value, and + /// the `Entry` pointer points to it. Caller should then initialize + /// the value (but not the key). + pub fn getOrPut(self: *Self, allocator: *Allocator, key: K) !GetOrPutResult { + self.ensureCapacity(allocator, self.entries.items.len + 1) catch |err| { + // "If key exists this function cannot fail." + return GetOrPutResult{ + .entry = self.getEntry(key) orelse return err, + .found_existing = true, + }; + }; + return self.getOrPutAssumeCapacity(key); } - /// Sets the capacity to the new capacity if the new - /// capacity is greater than the current capacity. - /// New capacity must be a power of two. - fn ensureCapacityExact(self: *Self, new_capacity: usize) !void { - // capacity must always be a power of two to allow for modulo - // optimization in the constrainIndex fn - assert(math.isPowerOfTwo(new_capacity)); + /// If there is an existing item with `key`, then the result + /// `Entry` pointer points to it, and found_existing is true. + /// Otherwise, puts a new item with undefined value, and + /// the `Entry` pointer points to it. Caller should then initialize + /// the value (but not the key). + /// If a new entry needs to be stored, this function asserts there + /// is enough capacity to store it. + pub fn getOrPutAssumeCapacity(self: *Self, key: K) GetOrPutResult { + const header = self.index_header orelse { + // Linear scan. + const h = if (store_hash) hash(key) else {}; + for (self.entries.items) |*item| { + if (item.hash == h and eql(key, item.key)) { + return GetOrPutResult{ + .entry = item, + .found_existing = true, + }; + } + } + const new_entry = self.entries.addOneAssumeCapacity(); + new_entry.* = .{ + .hash = if (store_hash) h else {}, + .key = key, + .value = undefined, + }; + return GetOrPutResult{ + .entry = new_entry, + .found_existing = false, + }; + }; - if (new_capacity <= self.entries.len) { - return; + switch (header.capacityIndexType()) { + .u8 => return self.getOrPutInternal(key, header, u8), + .u16 => return self.getOrPutInternal(key, header, u16), + .u32 => return self.getOrPutInternal(key, header, u32), + .usize => return self.getOrPutInternal(key, header, usize), } + } - const old_entries = self.entries; - try self.initCapacity(new_capacity); - self.incrementModificationCount(); - if (old_entries.len > 0) { - // dump all of the old elements into the new table - for (old_entries) |*old_entry| { - if (old_entry.used) { - self.internalPut(old_entry.kv.key).new_entry.kv.value = old_entry.kv.value; + pub fn getOrPutValue(self: *Self, allocator: *Allocator, key: K, value: V) !*Entry { + const res = try self.getOrPut(allocator, key); + if (!res.found_existing) + res.entry.value = value; + + return res.entry; + } + + /// Increases capacity, guaranteeing that insertions up until the + /// `expected_count` will not cause an allocation, and therefore cannot fail. + pub fn ensureCapacity(self: *Self, allocator: *Allocator, new_capacity: usize) !void { + try self.entries.ensureCapacity(allocator, new_capacity); + if (new_capacity <= linear_scan_max) return; + + // Resize if indexes would be more than 75% full. + const needed_len = new_capacity * 4 / 3; + if (self.index_header) |header| { + if (needed_len > header.indexes_len) { + var new_indexes_len = header.indexes_len; + while (true) { + new_indexes_len += new_indexes_len / 2 + 8; + if (new_indexes_len >= needed_len) break; } + const new_header = try IndexHeader.alloc(allocator, new_indexes_len); + self.insertAllEntriesIntoNewHeader(new_header); + header.free(allocator); + self.index_header = new_header; } - self.allocator.free(old_entries); + } else { + const header = try IndexHeader.alloc(allocator, needed_len); + self.insertAllEntriesIntoNewHeader(header); + self.index_header = header; } } - /// Returns the kv pair that was already there. - pub fn put(self: *Self, key: K, value: V) !?KV { - try self.autoCapacity(); - return putAssumeCapacity(self, key, value); + /// Returns the number of total elements which may be present before it is + /// no longer guaranteed that no allocations will be performed. + pub fn capacity(self: Self) usize { + const entry_cap = self.entries.capacity; + const header = self.index_header orelse return math.min(linear_scan_max, entry_cap); + const indexes_cap = (header.indexes_len + 1) * 3 / 4; + return math.min(entry_cap, indexes_cap); } - /// Calls put() and asserts that no kv pair is clobbered. - pub fn putNoClobber(self: *Self, key: K, value: V) !void { - assert((try self.put(key, value)) == null); + /// Clobbers any existing data. To detect if a put would clobber + /// existing data, see `getOrPut`. + pub fn put(self: *Self, allocator: *Allocator, key: K, value: V) !void { + const result = try self.getOrPut(allocator, key); + result.entry.value = value; } - pub fn putAssumeCapacity(self: *Self, key: K, value: V) ?KV { - assert(self.count() < self.entries.len); - self.incrementModificationCount(); + /// Inserts a key-value pair into the hash map, asserting that no previous + /// entry with the same key is already present + pub fn putNoClobber(self: *Self, allocator: *Allocator, key: K, value: V) !void { + const result = try self.getOrPut(allocator, key); + assert(!result.found_existing); + result.entry.value = value; + } - const put_result = self.internalPut(key); - put_result.new_entry.kv.value = value; - return put_result.old_kv; + /// Asserts there is enough capacity to store the new key-value pair. + /// Clobbers any existing data. To detect if a put would clobber + /// existing data, see `getOrPutAssumeCapacity`. + pub fn putAssumeCapacity(self: *Self, key: K, value: V) void { + const result = self.getOrPutAssumeCapacity(key); + result.entry.value = value; } + /// Asserts there is enough capacity to store the new key-value pair. + /// Asserts that it does not clobber any existing data. + /// To detect if a put would clobber existing data, see `getOrPutAssumeCapacity`. pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void { - assert(self.putAssumeCapacity(key, value) == null); + const result = self.getOrPutAssumeCapacity(key); + assert(!result.found_existing); + result.entry.value = value; } - pub fn get(hm: *const Self, key: K) ?*KV { - if (hm.entries.len == 0) { + /// Inserts a new `Entry` into the hash map, returning the previous one, if any. + pub fn fetchPut(self: *Self, allocator: *Allocator, key: K, value: V) !?Entry { + const gop = try self.getOrPut(allocator, key); + var result: ?Entry = null; + if (gop.found_existing) { + result = gop.entry.*; + } + gop.entry.value = value; + return result; + } + + /// Inserts a new `Entry` into the hash map, returning the previous one, if any. + /// If insertion happuns, asserts there is enough capacity without allocating. + pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?Entry { + const gop = self.getOrPutAssumeCapacity(key); + var result: ?Entry = null; + if (gop.found_existing) { + result = gop.entry.*; + } + gop.entry.value = value; + return result; + } + + pub fn getEntry(self: Self, key: K) ?*Entry { + const header = self.index_header orelse { + // Linear scan. + const h = if (store_hash) hash(key) else {}; + for (self.entries.items) |*item| { + if (item.hash == h and eql(key, item.key)) { + return item; + } + } return null; + }; + + switch (header.capacityIndexType()) { + .u8 => return self.getInternal(key, header, u8), + .u16 => return self.getInternal(key, header, u16), + .u32 => return self.getInternal(key, header, u32), + .usize => return self.getInternal(key, header, usize), } - return hm.internalGet(key); } - pub fn getValue(hm: *const Self, key: K) ?V { - return if (hm.get(key)) |kv| kv.value else null; + pub fn get(self: Self, key: K) ?V { + return if (self.getEntry(key)) |entry| entry.value else null; } - pub fn contains(hm: *const Self, key: K) bool { - return hm.get(key) != null; + pub fn contains(self: Self, key: K) bool { + return self.getEntry(key) != null; } - /// Returns any kv pair that was removed. - pub fn remove(hm: *Self, key: K) ?KV { - if (hm.entries.len == 0) return null; - hm.incrementModificationCount(); - const start_index = hm.keyToIndex(key); - { - var roll_over: usize = 0; - while (roll_over <= hm.max_distance_from_start_index) : (roll_over += 1) { - const index = hm.constrainIndex(start_index + roll_over); - var entry = &hm.entries[index]; - - if (!entry.used) return null; - - if (!eql(entry.kv.key, key)) continue; - - const removed_kv = entry.kv; - while (roll_over < hm.entries.len) : (roll_over += 1) { - const next_index = hm.constrainIndex(start_index + roll_over + 1); - const next_entry = &hm.entries[next_index]; - if (!next_entry.used or next_entry.distance_from_start_index == 0) { - entry.used = false; - hm.size -= 1; - return removed_kv; - } - entry.* = next_entry.*; - entry.distance_from_start_index -= 1; - entry = next_entry; + /// If there is an `Entry` with a matching key, it is deleted from + /// the hash map, and then returned from this function. + pub fn remove(self: *Self, key: K) ?Entry { + const header = self.index_header orelse { + // Linear scan. + const h = if (store_hash) hash(key) else {}; + for (self.entries.items) |item, i| { + if (item.hash == h and eql(key, item.key)) { + return self.entries.swapRemove(i); } - unreachable; // shifting everything in the table } + return null; + }; + switch (header.capacityIndexType()) { + .u8 => return self.removeInternal(key, header, u8), + .u16 => return self.removeInternal(key, header, u16), + .u32 => return self.removeInternal(key, header, u32), + .usize => return self.removeInternal(key, header, usize), } - return null; } - /// Calls remove(), asserts that a kv pair is removed, and discards it. - pub fn removeAssertDiscard(hm: *Self, key: K) void { - assert(hm.remove(key) != null); + /// Asserts there is an `Entry` with matching key, deletes it from the hash map, + /// and discards it. + pub fn removeAssertDiscard(self: *Self, key: K) void { + assert(self.remove(key) != null); } - pub fn iterator(hm: *const Self) Iterator { - return Iterator{ - .hm = hm, - .count = 0, - .index = 0, - .initial_modification_count = hm.modification_count, - }; + pub fn items(self: Self) []Entry { + return self.entries.items; } - pub fn clone(self: Self) !Self { - var other = Self.init(self.allocator); - try other.initCapacity(self.entries.len); - var it = self.iterator(); - while (it.next()) |entry| { - try other.putNoClobber(entry.key, entry.value); + pub fn clone(self: Self, allocator: *Allocator) !Self { + // TODO this can be made more efficient by directly allocating + // the memory slices and memcpying the elements. + var other = Self.init(); + try other.initCapacity(allocator, self.entries.len); + for (self.entries.items) |entry| { + other.putAssumeCapacityNoClobber(entry.key, entry.value); } return other; } - fn autoCapacity(self: *Self) !void { - if (self.entries.len == 0) { - return self.ensureCapacityExact(16); - } - // if we get too full (60%), double the capacity - if (self.size * 5 >= self.entries.len * 3) { - return self.ensureCapacityExact(self.entries.len * 2); - } - } + fn removeInternal(self: *Self, key: K, header: *IndexHeader, comptime I: type) ?Entry { + const indexes = header.indexes(I); + const h = hash(key); + const start_index = header.hashToIndex(h); + var roll_over: usize = 0; + while (roll_over <= header.max_distance_from_start_index) : (roll_over += 1) { + const index_index = (start_index + roll_over) % header.indexes_len; + var index = &indexes[index_index]; + if (index.isEmpty()) + return null; + + const entry = &self.entries.items[index.entry_index]; + + const hash_match = if (store_hash) h == entry.hash else true; + if (!hash_match or !eql(key, entry.key)) + continue; - fn initCapacity(hm: *Self, capacity: usize) !void { - hm.entries = try hm.allocator.alloc(Entry, capacity); - hm.size = 0; - hm.max_distance_from_start_index = 0; - for (hm.entries) |*entry| { - entry.used = false; + const removed_entry = self.entries.swapRemove(index.entry_index); + if (self.entries.items.len > 0 and self.entries.items.len != index.entry_index) { + // Because of the swap remove, now we need to update the index that was + // pointing to the last entry and is now pointing to this removed item slot. + self.updateEntryIndex(header, self.entries.items.len, index.entry_index, I, indexes); + } + + // Now we have to shift over the following indexes. + roll_over += 1; + while (roll_over < header.indexes_len) : (roll_over += 1) { + const next_index_index = (start_index + roll_over) % header.indexes_len; + const next_index = &indexes[next_index_index]; + if (next_index.isEmpty() or next_index.distance_from_start_index == 0) { + index.setEmpty(); + return removed_entry; + } + index.* = next_index.*; + index.distance_from_start_index -= 1; + index = next_index; + } + unreachable; } + return null; } - fn incrementModificationCount(hm: *Self) void { - if (want_modification_safety) { - hm.modification_count +%= 1; + fn updateEntryIndex( + self: *Self, + header: *IndexHeader, + old_entry_index: usize, + new_entry_index: usize, + comptime I: type, + indexes: []Index(I), + ) void { + const h = if (store_hash) self.entries.items[new_entry_index].hash else hash(self.entries.items[new_entry_index].key); + const start_index = header.hashToIndex(h); + var roll_over: usize = 0; + while (roll_over <= header.max_distance_from_start_index) : (roll_over += 1) { + const index_index = (start_index + roll_over) % header.indexes_len; + const index = &indexes[index_index]; + if (index.entry_index == old_entry_index) { + index.entry_index = @intCast(I, new_entry_index); + return; + } } + unreachable; } - const InternalPutResult = struct { - new_entry: *Entry, - old_kv: ?KV, - }; - - /// Returns a pointer to the new entry. - /// Asserts that there is enough space for the new item. - fn internalPut(self: *Self, orig_key: K) InternalPutResult { - var key = orig_key; - var value: V = undefined; - const start_index = self.keyToIndex(key); + /// Must ensureCapacity before calling this. + fn getOrPutInternal(self: *Self, key: K, header: *IndexHeader, comptime I: type) GetOrPutResult { + const indexes = header.indexes(I); + const h = hash(key); + const start_index = header.hashToIndex(h); var roll_over: usize = 0; var distance_from_start_index: usize = 0; - var got_result_entry = false; - var result = InternalPutResult{ - .new_entry = undefined, - .old_kv = null, - }; - while (roll_over < self.entries.len) : ({ + while (roll_over <= header.indexes_len) : ({ roll_over += 1; distance_from_start_index += 1; }) { - const index = self.constrainIndex(start_index + roll_over); - const entry = &self.entries[index]; - - if (entry.used and !eql(entry.kv.key, key)) { - if (entry.distance_from_start_index < distance_from_start_index) { - // robin hood to the rescue - const tmp = entry.*; - self.max_distance_from_start_index = math.max(self.max_distance_from_start_index, distance_from_start_index); - if (!got_result_entry) { - got_result_entry = true; - result.new_entry = entry; + const index_index = (start_index + roll_over) % header.indexes_len; + const index = indexes[index_index]; + if (index.isEmpty()) { + indexes[index_index] = .{ + .distance_from_start_index = @intCast(I, distance_from_start_index), + .entry_index = @intCast(I, self.entries.items.len), + }; + header.maybeBumpMax(distance_from_start_index); + const new_entry = self.entries.addOneAssumeCapacity(); + new_entry.* = .{ + .hash = if (store_hash) h else {}, + .key = key, + .value = undefined, + }; + return .{ + .found_existing = false, + .entry = new_entry, + }; + } + + // This pointer survives the following append because we call + // entries.ensureCapacity before getOrPutInternal. + const entry = &self.entries.items[index.entry_index]; + const hash_match = if (store_hash) h == entry.hash else true; + if (hash_match and eql(key, entry.key)) { + return .{ + .found_existing = true, + .entry = entry, + }; + } + if (index.distance_from_start_index < distance_from_start_index) { + // In this case, we did not find the item. We will put a new entry. + // However, we will use this index for the new entry, and move + // the previous index down the line, to keep the max_distance_from_start_index + // as small as possible. + indexes[index_index] = .{ + .distance_from_start_index = @intCast(I, distance_from_start_index), + .entry_index = @intCast(I, self.entries.items.len), + }; + header.maybeBumpMax(distance_from_start_index); + const new_entry = self.entries.addOneAssumeCapacity(); + new_entry.* = .{ + .hash = if (store_hash) h else {}, + .key = key, + .value = undefined, + }; + + distance_from_start_index = index.distance_from_start_index; + var prev_entry_index = index.entry_index; + + // Find somewhere to put the index we replaced by shifting + // following indexes backwards. + roll_over += 1; + distance_from_start_index += 1; + while (roll_over < header.indexes_len) : ({ + roll_over += 1; + distance_from_start_index += 1; + }) { + const next_index_index = (start_index + roll_over) % header.indexes_len; + const next_index = indexes[next_index_index]; + if (next_index.isEmpty()) { + header.maybeBumpMax(distance_from_start_index); + indexes[next_index_index] = .{ + .entry_index = prev_entry_index, + .distance_from_start_index = @intCast(I, distance_from_start_index), + }; + return .{ + .found_existing = false, + .entry = new_entry, + }; + } + if (next_index.distance_from_start_index < distance_from_start_index) { + header.maybeBumpMax(distance_from_start_index); + indexes[next_index_index] = .{ + .entry_index = prev_entry_index, + .distance_from_start_index = @intCast(I, distance_from_start_index), + }; + distance_from_start_index = next_index.distance_from_start_index; + prev_entry_index = next_index.entry_index; } - entry.* = Entry{ - .used = true, - .distance_from_start_index = distance_from_start_index, - .kv = KV{ - .key = key, - .value = value, - }, - }; - key = tmp.kv.key; - value = tmp.kv.value; - distance_from_start_index = tmp.distance_from_start_index; } - continue; + unreachable; } + } + unreachable; + } - if (entry.used) { - result.old_kv = entry.kv; - } else { - // adding an entry. otherwise overwriting old value with - // same key - self.size += 1; - } + fn getInternal(self: Self, key: K, header: *IndexHeader, comptime I: type) ?*Entry { + const indexes = header.indexes(I); + const h = hash(key); + const start_index = header.hashToIndex(h); + var roll_over: usize = 0; + while (roll_over <= header.max_distance_from_start_index) : (roll_over += 1) { + const index_index = (start_index + roll_over) % header.indexes_len; + const index = indexes[index_index]; + if (index.isEmpty()) + return null; + + const entry = &self.entries.items[index.entry_index]; + const hash_match = if (store_hash) h == entry.hash else true; + if (hash_match and eql(key, entry.key)) + return entry; + } + return null; + } - self.max_distance_from_start_index = math.max(distance_from_start_index, self.max_distance_from_start_index); - if (!got_result_entry) { - result.new_entry = entry; - } - entry.* = Entry{ - .used = true, - .distance_from_start_index = distance_from_start_index, - .kv = KV{ - .key = key, - .value = value, - }, - }; - return result; + fn insertAllEntriesIntoNewHeader(self: *Self, header: *IndexHeader) void { + switch (header.capacityIndexType()) { + .u8 => return self.insertAllEntriesIntoNewHeaderGeneric(header, u8), + .u16 => return self.insertAllEntriesIntoNewHeaderGeneric(header, u16), + .u32 => return self.insertAllEntriesIntoNewHeaderGeneric(header, u32), + .usize => return self.insertAllEntriesIntoNewHeaderGeneric(header, usize), } - unreachable; // put into a full map } - fn internalGet(hm: Self, key: K) ?*KV { - const start_index = hm.keyToIndex(key); - { + fn insertAllEntriesIntoNewHeaderGeneric(self: *Self, header: *IndexHeader, comptime I: type) void { + const indexes = header.indexes(I); + entry_loop: for (self.entries.items) |entry, i| { + const h = if (store_hash) entry.hash else hash(entry.key); + const start_index = header.hashToIndex(h); + var entry_index = i; var roll_over: usize = 0; - while (roll_over <= hm.max_distance_from_start_index) : (roll_over += 1) { - const index = hm.constrainIndex(start_index + roll_over); - const entry = &hm.entries[index]; - - if (!entry.used) return null; - if (eql(entry.kv.key, key)) return &entry.kv; + var distance_from_start_index: usize = 0; + while (roll_over < header.indexes_len) : ({ + roll_over += 1; + distance_from_start_index += 1; + }) { + const index_index = (start_index + roll_over) % header.indexes_len; + const next_index = indexes[index_index]; + if (next_index.isEmpty()) { + header.maybeBumpMax(distance_from_start_index); + indexes[index_index] = .{ + .distance_from_start_index = @intCast(I, distance_from_start_index), + .entry_index = @intCast(I, entry_index), + }; + continue :entry_loop; + } + if (next_index.distance_from_start_index < distance_from_start_index) { + header.maybeBumpMax(distance_from_start_index); + indexes[index_index] = .{ + .distance_from_start_index = @intCast(I, distance_from_start_index), + .entry_index = @intCast(I, entry_index), + }; + distance_from_start_index = next_index.distance_from_start_index; + entry_index = next_index.entry_index; + } } + unreachable; } - return null; } + }; +} + +const CapacityIndexType = enum { u8, u16, u32, usize }; - fn keyToIndex(hm: Self, key: K) usize { - return hm.constrainIndex(@as(usize, hash(key))); +fn capacityIndexType(indexes_len: usize) CapacityIndexType { + if (indexes_len < math.maxInt(u8)) + return .u8; + if (indexes_len < math.maxInt(u16)) + return .u16; + if (indexes_len < math.maxInt(u32)) + return .u32; + return .usize; +} + +fn capacityIndexSize(indexes_len: usize) usize { + switch (capacityIndexType(indexes_len)) { + .u8 => return @sizeOf(Index(u8)), + .u16 => return @sizeOf(Index(u16)), + .u32 => return @sizeOf(Index(u32)), + .usize => return @sizeOf(Index(usize)), + } +} + +fn Index(comptime I: type) type { + return extern struct { + entry_index: I, + distance_from_start_index: I, + + const Self = @This(); + + fn isEmpty(idx: Self) bool { + return idx.entry_index == math.maxInt(I); } - fn constrainIndex(hm: Self, i: usize) usize { - // this is an optimization for modulo of power of two integers; - // it requires hm.entries.len to always be a power of two - return i & (hm.entries.len - 1); + fn setEmpty(idx: *Self) void { + idx.entry_index = math.maxInt(I); } }; } +/// This struct is trailed by an array of `Index(I)`, where `I` +/// and the array length are determined by `indexes_len`. +const IndexHeader = struct { + max_distance_from_start_index: usize, + indexes_len: usize, + + fn hashToIndex(header: IndexHeader, h: u32) usize { + return @as(usize, h) % header.indexes_len; + } + + fn indexes(header: *IndexHeader, comptime I: type) []Index(I) { + const start = @ptrCast([*]Index(I), @ptrCast([*]u8, header) + @sizeOf(IndexHeader)); + return start[0..header.indexes_len]; + } + + fn capacityIndexType(header: IndexHeader) CapacityIndexType { + return hash_map.capacityIndexType(header.indexes_len); + } + + fn maybeBumpMax(header: *IndexHeader, distance_from_start_index: usize) void { + if (distance_from_start_index > header.max_distance_from_start_index) { + header.max_distance_from_start_index = distance_from_start_index; + } + } + + fn alloc(allocator: *Allocator, len: usize) !*IndexHeader { + const index_size = hash_map.capacityIndexSize(len); + const nbytes = @sizeOf(IndexHeader) + index_size * len; + const bytes = try allocator.allocAdvanced(u8, @alignOf(IndexHeader), nbytes, .exact); + @memset(bytes.ptr + @sizeOf(IndexHeader), 0xff, bytes.len - @sizeOf(IndexHeader)); + const result = @ptrCast(*IndexHeader, bytes.ptr); + result.* = .{ + .max_distance_from_start_index = 0, + .indexes_len = len, + }; + return result; + } + + fn free(header: *IndexHeader, allocator: *Allocator) void { + const index_size = hash_map.capacityIndexSize(header.indexes_len); + const ptr = @ptrCast([*]u8, header); + const slice = ptr[0 .. @sizeOf(IndexHeader) + header.indexes_len * index_size]; + allocator.free(slice); + } +}; + test "basic hash map usage" { var map = AutoHashMap(i32, i32).init(std.testing.allocator); defer map.deinit(); - testing.expect((try map.put(1, 11)) == null); - testing.expect((try map.put(2, 22)) == null); - testing.expect((try map.put(3, 33)) == null); - testing.expect((try map.put(4, 44)) == null); + testing.expect((try map.fetchPut(1, 11)) == null); + testing.expect((try map.fetchPut(2, 22)) == null); + testing.expect((try map.fetchPut(3, 33)) == null); + testing.expect((try map.fetchPut(4, 44)) == null); try map.putNoClobber(5, 55); - testing.expect((try map.put(5, 66)).?.value == 55); - testing.expect((try map.put(5, 55)).?.value == 66); + testing.expect((try map.fetchPut(5, 66)).?.value == 55); + testing.expect((try map.fetchPut(5, 55)).?.value == 66); const gop1 = try map.getOrPut(5); testing.expect(gop1.found_existing == true); - testing.expect(gop1.kv.value == 55); - gop1.kv.value = 77; - testing.expect(map.get(5).?.value == 77); + testing.expect(gop1.entry.value == 55); + gop1.entry.value = 77; + testing.expect(map.getEntry(5).?.value == 77); const gop2 = try map.getOrPut(99); testing.expect(gop2.found_existing == false); - gop2.kv.value = 42; - testing.expect(map.get(99).?.value == 42); + gop2.entry.value = 42; + testing.expect(map.getEntry(99).?.value == 42); const gop3 = try map.getOrPutValue(5, 5); testing.expect(gop3.value == 77); @@ -454,15 +876,15 @@ test "basic hash map usage" { testing.expect(gop4.value == 41); testing.expect(map.contains(2)); - testing.expect(map.get(2).?.value == 22); - testing.expect(map.getValue(2).? == 22); + testing.expect(map.getEntry(2).?.value == 22); + testing.expect(map.get(2).? == 22); const rmv1 = map.remove(2); testing.expect(rmv1.?.key == 2); testing.expect(rmv1.?.value == 22); testing.expect(map.remove(2) == null); + testing.expect(map.getEntry(2) == null); testing.expect(map.get(2) == null); - testing.expect(map.getValue(2) == null); map.removeAssertDiscard(3); } @@ -498,8 +920,8 @@ test "iterator hash map" { it.reset(); var count: usize = 0; - while (it.next()) |kv| : (count += 1) { - buffer[@intCast(usize, kv.key)] = kv.value; + while (it.next()) |entry| : (count += 1) { + buffer[@intCast(usize, entry.key)] = entry.value; } testing.expect(count == 3); testing.expect(it.next() == null); @@ -510,8 +932,8 @@ test "iterator hash map" { it.reset(); count = 0; - while (it.next()) |kv| { - buffer[@intCast(usize, kv.key)] = kv.value; + while (it.next()) |entry| { + buffer[@intCast(usize, entry.key)] = entry.value; count += 1; if (count >= 2) break; } @@ -531,14 +953,14 @@ test "ensure capacity" { defer map.deinit(); try map.ensureCapacity(20); - const initialCapacity = map.entries.len; - testing.expect(initialCapacity >= 20); + const initial_capacity = map.capacity(); + testing.expect(initial_capacity >= 20); var i: i32 = 0; while (i < 20) : (i += 1) { - testing.expect(map.putAssumeCapacity(i, i + 10) == null); + testing.expect(map.fetchPutAssumeCapacity(i, i + 10) == null); } // shouldn't resize from putAssumeCapacity - testing.expect(initialCapacity == map.entries.len); + testing.expect(initial_capacity == map.capacity()); } pub fn getHashPtrAddrFn(comptime K: type) (fn (K) u32) { @@ -575,6 +997,24 @@ pub fn getAutoEqlFn(comptime K: type) (fn (K, K) bool) { }.eql; } +pub fn autoEqlIsCheap(comptime K: type) bool { + return switch (@typeInfo(K)) { + .Bool, + .Int, + .Float, + .Pointer, + .ComptimeFloat, + .ComptimeInt, + .Enum, + .Fn, + .ErrorSet, + .AnyFrame, + .EnumLiteral, + => true, + else => false, + }; +} + pub fn getAutoHashStratFn(comptime K: type, comptime strategy: std.hash.Strategy) (fn (K) u32) { return struct { fn hash(key: K) u32 { -- cgit v1.2.3 From a2fd8f72c155d9c3ffeb3f625598fa81ed9629dc Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 6 Jul 2020 06:09:47 +0000 Subject: std: add new array list functions --- lib/std/array_list.zig | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) (limited to 'lib/std/array_list.zig') diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index a68c1fa9d6..d667bc4d17 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -257,6 +257,24 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { return &self.items[self.items.len - 1]; } + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is an array pointing to the newly allocated elements. + pub fn addManyAsArray(self: *Self, comptime n: usize) !*[n]T { + const prev_len = self.items.len; + try self.resize(self.items.len + n); + return self.items[prev_len..][0..n]; + } + + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is an array pointing to the newly allocated elements. + /// Asserts that there is already space for the new item without allocating more. + pub fn addManyAsArrayAssumeCapacity(self: *Self, comptime n: usize) *[n]T { + assert(self.items.len + n <= self.capacity); + const prev_len = self.items.len; + self.items.len += n; + return self.items[prev_len..][0..n]; + } + /// Remove and return the last element from the list. /// Asserts the list has at least one item. pub fn pop(self: *Self) T { @@ -488,6 +506,24 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ return &self.items[self.items.len - 1]; } + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is an array pointing to the newly allocated elements. + pub fn addManyAsArray(self: *Self, allocator: *Allocator, comptime n: usize) !*[n]T { + const prev_len = self.items.len; + try self.resize(allocator, self.items.len + n); + return self.items[prev_len..][0..n]; + } + + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is an array pointing to the newly allocated elements. + /// Asserts that there is already space for the new item without allocating more. + pub fn addManyAsArrayAssumeCapacity(self: *Self, comptime n: usize) *[n]T { + assert(self.items.len + n <= self.capacity); + const prev_len = self.items.len; + self.items.len += n; + return self.items[prev_len..][0..n]; + } + /// Remove and return the last element from the list. /// Asserts the list has at least one item. /// This operation does not invalidate any element pointers. @@ -727,3 +763,27 @@ test "std.ArrayList.writer" { try writer.writeAll("efg"); testing.expectEqualSlices(u8, list.items, "abcdefg"); } + +test "addManyAsArray" { + const a = std.testing.allocator; + { + var list = ArrayList(u8).init(a); + defer list.deinit(); + + (try list.addManyAsArray(4)).* = "aoeu".*; + try list.ensureCapacity(8); + list.addManyAsArrayAssumeCapacity(4).* = "asdf".*; + + testing.expectEqualSlices(u8, list.items, "aoeuasdf"); + } + { + var list = ArrayListUnmanaged(u8){}; + defer list.deinit(a); + + (try list.addManyAsArray(a, 4)).* = "aoeu".*; + try list.ensureCapacity(a, 8); + list.addManyAsArrayAssumeCapacity(4).* = "asdf".*; + + testing.expectEqualSlices(u8, list.items, "aoeuasdf"); + } +} -- cgit v1.2.3 From e85fe13e44b1e2957b9d90e19c171fdfa8cb5505 Mon Sep 17 00:00:00 2001 From: Vexu Date: Sat, 11 Jul 2020 14:09:04 +0300 Subject: run zig fmt on std lib and self hosted --- build.zig | 8 +- lib/std/array_list.zig | 2 +- lib/std/array_list_sentineled.zig | 4 +- lib/std/atomic/queue.zig | 4 +- lib/std/build.zig | 4 +- lib/std/build/emit_raw.zig | 2 +- lib/std/builtin.zig | 10 +- lib/std/c.zig | 2 +- lib/std/c/ast.zig | 14 +-- lib/std/cache_hash.zig | 2 +- lib/std/comptime_string_map.zig | 6 +- lib/std/crypto/benchmark.zig | 12 +-- lib/std/crypto/test.zig | 2 +- lib/std/debug.zig | 24 ++--- lib/std/debug/leb128.zig | 14 +-- lib/std/dwarf.zig | 18 ++-- lib/std/elf.zig | 4 +- lib/std/event/group.zig | 2 +- lib/std/fmt.zig | 70 ++++++------- lib/std/fs/wasi.zig | 2 +- lib/std/hash/auto_hash.zig | 16 +-- lib/std/hash/benchmark.zig | 4 +- lib/std/hash/cityhash.zig | 2 +- lib/std/hash/murmur.zig | 2 +- lib/std/heap.zig | 31 +++--- lib/std/heap/logging_allocator.zig | 4 +- lib/std/http/headers.zig | 2 +- lib/std/io/bit_reader.zig | 2 +- lib/std/io/bit_writer.zig | 4 +- lib/std/io/buffered_reader.zig | 2 +- lib/std/io/buffered_writer.zig | 2 +- lib/std/io/counting_writer.zig | 2 +- lib/std/io/fixed_buffer_stream.zig | 2 +- lib/std/io/multi_writer.zig | 2 +- lib/std/io/peek_stream.zig | 2 +- lib/std/io/serialization.zig | 14 +-- lib/std/io/writer.zig | 2 +- lib/std/json.zig | 16 +-- lib/std/json/write_stream.zig | 6 +- lib/std/log.zig | 18 ++-- lib/std/math.zig | 36 +++---- lib/std/math/acos.zig | 2 +- lib/std/math/acosh.zig | 2 +- lib/std/math/asin.zig | 2 +- lib/std/math/asinh.zig | 2 +- lib/std/math/atan.zig | 2 +- lib/std/math/atanh.zig | 2 +- lib/std/math/big/int.zig | 20 ++-- lib/std/math/big/rational.zig | 4 +- lib/std/math/cbrt.zig | 2 +- lib/std/math/ceil.zig | 2 +- lib/std/math/complex/abs.zig | 2 +- lib/std/math/complex/acos.zig | 2 +- lib/std/math/complex/acosh.zig | 2 +- lib/std/math/complex/arg.zig | 2 +- lib/std/math/complex/asin.zig | 2 +- lib/std/math/complex/asinh.zig | 2 +- lib/std/math/complex/atan.zig | 2 +- lib/std/math/complex/atanh.zig | 2 +- lib/std/math/complex/conj.zig | 2 +- lib/std/math/complex/cos.zig | 2 +- lib/std/math/complex/cosh.zig | 2 +- lib/std/math/complex/exp.zig | 2 +- lib/std/math/complex/ldexp.zig | 2 +- lib/std/math/complex/log.zig | 2 +- lib/std/math/complex/proj.zig | 2 +- lib/std/math/complex/sin.zig | 2 +- lib/std/math/complex/sinh.zig | 2 +- lib/std/math/complex/sqrt.zig | 2 +- lib/std/math/complex/tan.zig | 2 +- lib/std/math/complex/tanh.zig | 2 +- lib/std/math/cos.zig | 2 +- lib/std/math/cosh.zig | 2 +- lib/std/math/exp.zig | 2 +- lib/std/math/exp2.zig | 2 +- lib/std/math/expm1.zig | 2 +- lib/std/math/expo2.zig | 2 +- lib/std/math/fabs.zig | 2 +- lib/std/math/floor.zig | 2 +- lib/std/math/frexp.zig | 2 +- lib/std/math/ilogb.zig | 2 +- lib/std/math/isfinite.zig | 2 +- lib/std/math/isinf.zig | 6 +- lib/std/math/isnan.zig | 4 +- lib/std/math/isnormal.zig | 2 +- lib/std/math/ln.zig | 2 +- lib/std/math/log10.zig | 2 +- lib/std/math/log1p.zig | 2 +- lib/std/math/log2.zig | 2 +- lib/std/math/modf.zig | 2 +- lib/std/math/round.zig | 2 +- lib/std/math/scalbn.zig | 2 +- lib/std/math/signbit.zig | 2 +- lib/std/math/sin.zig | 2 +- lib/std/math/sinh.zig | 2 +- lib/std/math/sqrt.zig | 2 +- lib/std/math/tan.zig | 2 +- lib/std/math/tanh.zig | 2 +- lib/std/math/trunc.zig | 2 +- lib/std/mem.zig | 173 +++++++++++++++++---------------- lib/std/meta.zig | 10 +- lib/std/meta/trait.zig | 8 +- lib/std/net.zig | 6 +- lib/std/os.zig | 2 +- lib/std/os/uefi.zig | 2 +- lib/std/progress.zig | 4 +- lib/std/segmented_list.zig | 4 +- lib/std/sort.zig | 38 ++++---- lib/std/special/build_runner.zig | 4 +- lib/std/special/test_runner.zig | 4 +- lib/std/target.zig | 19 ++-- lib/std/testing.zig | 4 +- lib/std/thread.zig | 2 +- lib/std/zig/ast.zig | 16 +-- lib/std/zig/parse.zig | 2 +- lib/std/zig/string_literal.zig | 2 +- lib/std/zig/system.zig | 8 +- src-self-hosted/Module.zig | 10 +- src-self-hosted/codegen.zig | 21 ++-- src-self-hosted/dep_tokenizer.zig | 26 ++--- src-self-hosted/ir.zig | 2 +- src-self-hosted/libc_installation.zig | 4 +- src-self-hosted/link.zig | 8 +- src-self-hosted/liveness.zig | 2 +- src-self-hosted/main.zig | 2 +- src-self-hosted/print_targets.zig | 2 +- src-self-hosted/translate_c.zig | 18 ++-- src-self-hosted/type.zig | 3 +- src-self-hosted/value.zig | 2 +- src-self-hosted/zir.zig | 13 ++- test/stage1/behavior/async_fn.zig | 10 +- test/stage1/behavior/bitcast.zig | 4 +- test/stage1/behavior/bugs/2114.zig | 2 +- test/stage1/behavior/bugs/3742.zig | 2 +- test/stage1/behavior/bugs/4328.zig | 8 +- test/stage1/behavior/bugs/4769_a.zig | 2 +- test/stage1/behavior/bugs/4769_b.zig | 2 +- test/stage1/behavior/byval_arg_var.zig | 4 +- test/stage1/behavior/call.zig | 2 +- test/stage1/behavior/enum.zig | 2 +- test/stage1/behavior/error.zig | 2 +- test/stage1/behavior/eval.zig | 9 +- test/stage1/behavior/fn.zig | 6 +- test/stage1/behavior/generics.zig | 8 +- test/stage1/behavior/optional.zig | 16 ++- test/stage1/behavior/struct.zig | 10 +- test/stage1/behavior/tuple.zig | 4 +- test/stage1/behavior/type_info.zig | 2 +- test/stage1/behavior/union.zig | 2 +- test/stage1/behavior/var_args.zig | 16 +-- test/stage1/behavior/vector.zig | 8 +- 151 files changed, 534 insertions(+), 527 deletions(-) (limited to 'lib/std/array_list.zig') diff --git a/build.zig b/build.zig index f9cd1b0dea..e4ed04ea16 100644 --- a/build.zig +++ b/build.zig @@ -153,7 +153,7 @@ pub fn build(b: *Builder) !void { test_step.dependOn(docs_step); } -fn dependOnLib(b: *Builder, lib_exe_obj: var, dep: LibraryDep) void { +fn dependOnLib(b: *Builder, lib_exe_obj: anytype, dep: LibraryDep) void { for (dep.libdirs.items) |lib_dir| { lib_exe_obj.addLibPath(lib_dir); } @@ -193,7 +193,7 @@ fn fileExists(filename: []const u8) !bool { return true; } -fn addCppLib(b: *Builder, lib_exe_obj: var, cmake_binary_dir: []const u8, lib_name: []const u8) void { +fn addCppLib(b: *Builder, lib_exe_obj: anytype, cmake_binary_dir: []const u8, lib_name: []const u8) void { lib_exe_obj.addObjectFile(fs.path.join(b.allocator, &[_][]const u8{ cmake_binary_dir, "zig_cpp", @@ -275,7 +275,7 @@ fn findLLVM(b: *Builder, llvm_config_exe: []const u8) !LibraryDep { return result; } -fn configureStage2(b: *Builder, exe: var, ctx: Context) !void { +fn configureStage2(b: *Builder, exe: anytype, ctx: Context) !void { exe.addIncludeDir("src"); exe.addIncludeDir(ctx.cmake_binary_dir); addCppLib(b, exe, ctx.cmake_binary_dir, "zig_cpp"); @@ -340,7 +340,7 @@ fn configureStage2(b: *Builder, exe: var, ctx: Context) !void { fn addCxxKnownPath( b: *Builder, ctx: Context, - exe: var, + exe: anytype, objname: []const u8, errtxt: ?[]const u8, ) !void { diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index d667bc4d17..4d8cdc200c 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -53,7 +53,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { /// Deprecated: use `items` field directly. /// Return contents as a slice. Only valid while the list /// doesn't change size. - pub fn span(self: var) @TypeOf(self.items) { + pub fn span(self: anytype) @TypeOf(self.items) { return self.items; } diff --git a/lib/std/array_list_sentineled.zig b/lib/std/array_list_sentineled.zig index b83cc4ad62..828be7462f 100644 --- a/lib/std/array_list_sentineled.zig +++ b/lib/std/array_list_sentineled.zig @@ -69,7 +69,7 @@ pub fn ArrayListSentineled(comptime T: type, comptime sentinel: T) type { } /// Only works when `T` is `u8`. - pub fn allocPrint(allocator: *Allocator, comptime format: []const u8, args: var) !Self { + pub fn allocPrint(allocator: *Allocator, comptime format: []const u8, args: anytype) !Self { const size = std.math.cast(usize, std.fmt.count(format, args)) catch |err| switch (err) { error.Overflow => return error.OutOfMemory, }; @@ -82,7 +82,7 @@ pub fn ArrayListSentineled(comptime T: type, comptime sentinel: T) type { self.list.deinit(); } - pub fn span(self: var) @TypeOf(self.list.items[0..:sentinel]) { + pub fn span(self: anytype) @TypeOf(self.list.items[0..:sentinel]) { return self.list.items[0..self.len() :sentinel]; } diff --git a/lib/std/atomic/queue.zig b/lib/std/atomic/queue.zig index d6d0b70754..880af37ef4 100644 --- a/lib/std/atomic/queue.zig +++ b/lib/std/atomic/queue.zig @@ -123,10 +123,10 @@ pub fn Queue(comptime T: type) type { /// Dumps the contents of the queue to `stream`. /// Up to 4 elements from the head are dumped and the tail of the queue is /// dumped as well. - pub fn dumpToStream(self: *Self, stream: var) !void { + pub fn dumpToStream(self: *Self, stream: anytype) !void { const S = struct { fn dumpRecursive( - s: var, + s: anytype, optional_node: ?*Node, indent: usize, comptime depth: comptime_int, diff --git a/lib/std/build.zig b/lib/std/build.zig index 2d5ec4bd91..19de76b00d 100644 --- a/lib/std/build.zig +++ b/lib/std/build.zig @@ -312,7 +312,7 @@ pub const Builder = struct { return write_file_step; } - pub fn addLog(self: *Builder, comptime format: []const u8, args: var) *LogStep { + pub fn addLog(self: *Builder, comptime format: []const u8, args: anytype) *LogStep { const data = self.fmt(format, args); const log_step = self.allocator.create(LogStep) catch unreachable; log_step.* = LogStep.init(self, data); @@ -883,7 +883,7 @@ pub const Builder = struct { return fs.path.resolve(self.allocator, &[_][]const u8{ self.build_root, rel_path }) catch unreachable; } - pub fn fmt(self: *Builder, comptime format: []const u8, args: var) []u8 { + pub fn fmt(self: *Builder, comptime format: []const u8, args: anytype) []u8 { return fmt_lib.allocPrint(self.allocator, format, args) catch unreachable; } diff --git a/lib/std/build/emit_raw.zig b/lib/std/build/emit_raw.zig index 746b0ac91b..058a4a64ff 100644 --- a/lib/std/build/emit_raw.zig +++ b/lib/std/build/emit_raw.zig @@ -126,7 +126,7 @@ const BinaryElfOutput = struct { return segment.p_offset <= section.elfOffset and (segment.p_offset + segment.p_filesz) >= (section.elfOffset + section.fileSize); } - fn sectionValidForOutput(shdr: var) bool { + fn sectionValidForOutput(shdr: anytype) bool { return shdr.sh_size > 0 and shdr.sh_type != elf.SHT_NOBITS and ((shdr.sh_flags & elf.SHF_ALLOC) == elf.SHF_ALLOC); } diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index 0c7e534bed..5eafc4e409 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -198,7 +198,7 @@ pub const TypeInfo = union(enum) { /// The type of the sentinel is the element type of the pointer, which is /// the value of the `child` field in this struct. However there is no way /// to refer to that type here, so we use `var`. - sentinel: var, + sentinel: anytype, /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. @@ -220,7 +220,7 @@ pub const TypeInfo = union(enum) { /// The type of the sentinel is the element type of the array, which is /// the value of the `child` field in this struct. However there is no way /// to refer to that type here, so we use `var`. - sentinel: var, + sentinel: anytype, }; /// This data structure is used by the Zig language code generation and @@ -237,7 +237,7 @@ pub const TypeInfo = union(enum) { name: []const u8, offset: ?comptime_int, field_type: type, - default_value: var, + default_value: anytype, }; /// This data structure is used by the Zig language code generation and @@ -328,7 +328,7 @@ pub const TypeInfo = union(enum) { /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Frame = struct { - function: var, + function: anytype, }; /// This data structure is used by the Zig language code generation and @@ -452,7 +452,7 @@ pub const Version = struct { self: Version, comptime fmt: []const u8, options: std.fmt.FormatOptions, - out_stream: var, + out_stream: anytype, ) !void { if (fmt.len == 0) { if (self.patch == 0) { diff --git a/lib/std/c.zig b/lib/std/c.zig index 28d4157d6a..e483b5b50f 100644 --- a/lib/std/c.zig +++ b/lib/std/c.zig @@ -27,7 +27,7 @@ pub usingnamespace switch (std.Target.current.os.tag) { else => struct {}, }; -pub fn getErrno(rc: var) u16 { +pub fn getErrno(rc: anytype) u16 { if (rc == -1) { return @intCast(u16, _errno().*); } else { diff --git a/lib/std/c/ast.zig b/lib/std/c/ast.zig index bb8c01f138..467050d57d 100644 --- a/lib/std/c/ast.zig +++ b/lib/std/c/ast.zig @@ -64,7 +64,7 @@ pub const Error = union(enum) { NothingDeclared: SimpleError("declaration doesn't declare anything"), QualifierIgnored: SingleTokenError("qualifier '{}' ignored"), - pub fn render(self: *const Error, tree: *Tree, stream: var) !void { + pub fn render(self: *const Error, tree: *Tree, stream: anytype) !void { switch (self.*) { .InvalidToken => |*x| return x.render(tree, stream), .ExpectedToken => |*x| return x.render(tree, stream), @@ -114,7 +114,7 @@ pub const Error = union(enum) { token: TokenIndex, expected_id: @TagType(Token.Id), - pub fn render(self: *const ExpectedToken, tree: *Tree, stream: var) !void { + pub fn render(self: *const ExpectedToken, tree: *Tree, stream: anytype) !void { const found_token = tree.tokens.at(self.token); if (found_token.id == .Invalid) { return stream.print("expected '{}', found invalid bytes", .{self.expected_id.symbol()}); @@ -129,7 +129,7 @@ pub const Error = union(enum) { token: TokenIndex, type_spec: *Node.TypeSpec, - pub fn render(self: *const ExpectedToken, tree: *Tree, stream: var) !void { + pub fn render(self: *const ExpectedToken, tree: *Tree, stream: anytype) !void { try stream.write("invalid type specifier '"); try type_spec.spec.print(tree, stream); const token_name = tree.tokens.at(self.token).id.symbol(); @@ -141,7 +141,7 @@ pub const Error = union(enum) { kw: TokenIndex, name: TokenIndex, - pub fn render(self: *const ExpectedToken, tree: *Tree, stream: var) !void { + pub fn render(self: *const ExpectedToken, tree: *Tree, stream: anytype) !void { return stream.print("must use '{}' tag to refer to type '{}'", .{ tree.slice(kw), tree.slice(name) }); } }; @@ -150,7 +150,7 @@ pub const Error = union(enum) { return struct { token: TokenIndex, - pub fn render(self: *const @This(), tree: *Tree, stream: var) !void { + pub fn render(self: *const @This(), tree: *Tree, stream: anytype) !void { const actual_token = tree.tokens.at(self.token); return stream.print(msg, .{actual_token.id.symbol()}); } @@ -163,7 +163,7 @@ pub const Error = union(enum) { token: TokenIndex, - pub fn render(self: *const ThisError, tokens: *Tree.TokenList, stream: var) !void { + pub fn render(self: *const ThisError, tokens: *Tree.TokenList, stream: anytype) !void { return stream.write(msg); } }; @@ -317,7 +317,7 @@ pub const Node = struct { sym_type: *Type, }, - pub fn print(self: *@This(), self: *const @This(), tree: *Tree, stream: var) !void { + pub fn print(self: *@This(), self: *const @This(), tree: *Tree, stream: anytype) !void { switch (self.spec) { .None => unreachable, .Void => |index| try stream.write(tree.slice(index)), diff --git a/lib/std/cache_hash.zig b/lib/std/cache_hash.zig index 257f407826..acaa5edc8d 100644 --- a/lib/std/cache_hash.zig +++ b/lib/std/cache_hash.zig @@ -70,7 +70,7 @@ pub const CacheHash = struct { /// Convert the input value into bytes and record it as a dependency of the /// process being cached - pub fn add(self: *CacheHash, val: var) void { + pub fn add(self: *CacheHash, val: anytype) void { assert(self.manifest_file == null); const valPtr = switch (@typeInfo(@TypeOf(val))) { diff --git a/lib/std/comptime_string_map.zig b/lib/std/comptime_string_map.zig index 3021f6bc1e..8cc5cac130 100644 --- a/lib/std/comptime_string_map.zig +++ b/lib/std/comptime_string_map.zig @@ -8,7 +8,7 @@ const mem = std.mem; /// `kvs` expects a list literal containing list literals or an array/slice of structs /// where `.@"0"` is the `[]const u8` key and `.@"1"` is the associated value of type `V`. /// TODO: https://github.com/ziglang/zig/issues/4335 -pub fn ComptimeStringMap(comptime V: type, comptime kvs: var) type { +pub fn ComptimeStringMap(comptime V: type, comptime kvs: anytype) type { const precomputed = comptime blk: { @setEvalBranchQuota(2000); const KV = struct { @@ -126,7 +126,7 @@ test "ComptimeStringMap slice of structs" { testMap(map); } -fn testMap(comptime map: var) void { +fn testMap(comptime map: anytype) void { std.testing.expectEqual(TestEnum.A, map.get("have").?); std.testing.expectEqual(TestEnum.B, map.get("nothing").?); std.testing.expect(null == map.get("missing")); @@ -165,7 +165,7 @@ test "ComptimeStringMap void value type, list literal of list literals" { testSet(map); } -fn testSet(comptime map: var) void { +fn testSet(comptime map: anytype) void { std.testing.expectEqual({}, map.get("have").?); std.testing.expectEqual({}, map.get("nothing").?); std.testing.expect(null == map.get("missing")); diff --git a/lib/std/crypto/benchmark.zig b/lib/std/crypto/benchmark.zig index 8c5d75f80a..f0f40bd231 100644 --- a/lib/std/crypto/benchmark.zig +++ b/lib/std/crypto/benchmark.zig @@ -29,7 +29,7 @@ const hashes = [_]Crypto{ Crypto{ .ty = crypto.Blake3, .name = "blake3" }, }; -pub fn benchmarkHash(comptime Hash: var, comptime bytes: comptime_int) !u64 { +pub fn benchmarkHash(comptime Hash: anytype, comptime bytes: comptime_int) !u64 { var h = Hash.init(); var block: [Hash.digest_length]u8 = undefined; @@ -56,7 +56,7 @@ const macs = [_]Crypto{ Crypto{ .ty = crypto.HmacSha256, .name = "hmac-sha256" }, }; -pub fn benchmarkMac(comptime Mac: var, comptime bytes: comptime_int) !u64 { +pub fn benchmarkMac(comptime Mac: anytype, comptime bytes: comptime_int) !u64 { std.debug.assert(32 >= Mac.mac_length and 32 >= Mac.minimum_key_length); var in: [1 * MiB]u8 = undefined; @@ -81,7 +81,7 @@ pub fn benchmarkMac(comptime Mac: var, comptime bytes: comptime_int) !u64 { const exchanges = [_]Crypto{Crypto{ .ty = crypto.X25519, .name = "x25519" }}; -pub fn benchmarkKeyExchange(comptime DhKeyExchange: var, comptime exchange_count: comptime_int) !u64 { +pub fn benchmarkKeyExchange(comptime DhKeyExchange: anytype, comptime exchange_count: comptime_int) !u64 { std.debug.assert(DhKeyExchange.minimum_key_length >= DhKeyExchange.secret_length); var in: [DhKeyExchange.minimum_key_length]u8 = undefined; @@ -166,21 +166,21 @@ pub fn main() !void { inline for (hashes) |H| { if (filter == null or std.mem.indexOf(u8, H.name, filter.?) != null) { const throughput = try benchmarkHash(H.ty, mode(32 * MiB)); - try stdout.print("{:>11}: {:5} MiB/s\n", .{H.name, throughput / (1 * MiB)}); + try stdout.print("{:>11}: {:5} MiB/s\n", .{ H.name, throughput / (1 * MiB) }); } } inline for (macs) |M| { if (filter == null or std.mem.indexOf(u8, M.name, filter.?) != null) { const throughput = try benchmarkMac(M.ty, mode(128 * MiB)); - try stdout.print("{:>11}: {:5} MiB/s\n", .{M.name, throughput / (1 * MiB)}); + try stdout.print("{:>11}: {:5} MiB/s\n", .{ M.name, throughput / (1 * MiB) }); } } inline for (exchanges) |E| { if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) { const throughput = try benchmarkKeyExchange(E.ty, mode(1000)); - try stdout.print("{:>11}: {:5} exchanges/s\n", .{E.name, throughput}); + try stdout.print("{:>11}: {:5} exchanges/s\n", .{ E.name, throughput }); } } } diff --git a/lib/std/crypto/test.zig b/lib/std/crypto/test.zig index 1ff326cf39..61260c7e39 100644 --- a/lib/std/crypto/test.zig +++ b/lib/std/crypto/test.zig @@ -4,7 +4,7 @@ const mem = std.mem; const fmt = std.fmt; // Hash using the specified hasher `H` asserting `expected == H(input)`. -pub fn assertEqualHash(comptime Hasher: var, comptime expected: []const u8, input: []const u8) void { +pub fn assertEqualHash(comptime Hasher: anytype, comptime expected: []const u8, input: []const u8) void { var h: [expected.len / 2]u8 = undefined; Hasher.hash(input, h[0..]); diff --git a/lib/std/debug.zig b/lib/std/debug.zig index e6d0c17da4..3346598ab7 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -58,7 +58,7 @@ pub const warn = print; /// Print to stderr, unbuffered, and silently returning on failure. Intended /// for use in "printf debugging." Use `std.log` functions for proper logging. -pub fn print(comptime fmt: []const u8, args: var) void { +pub fn print(comptime fmt: []const u8, args: anytype) void { const held = stderr_mutex.acquire(); defer held.release(); const stderr = io.getStdErr().writer(); @@ -223,7 +223,7 @@ pub fn assert(ok: bool) void { if (!ok) unreachable; // assertion failure } -pub fn panic(comptime format: []const u8, args: var) noreturn { +pub fn panic(comptime format: []const u8, args: anytype) noreturn { @setCold(true); // TODO: remove conditional once wasi / LLVM defines __builtin_return_address const first_trace_addr = if (builtin.os.tag == .wasi) null else @returnAddress(); @@ -241,7 +241,7 @@ var panic_mutex = std.Mutex.init(); /// This is used to catch and handle panics triggered by the panic handler. threadlocal var panic_stage: usize = 0; -pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: var) noreturn { +pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: anytype) noreturn { @setCold(true); if (enable_segfault_handler) { @@ -306,7 +306,7 @@ const RESET = "\x1b[0m"; pub fn writeStackTrace( stack_trace: builtin.StackTrace, - out_stream: var, + out_stream: anytype, allocator: *mem.Allocator, debug_info: *DebugInfo, tty_config: TTY.Config, @@ -384,7 +384,7 @@ pub const StackIterator = struct { }; pub fn writeCurrentStackTrace( - out_stream: var, + out_stream: anytype, debug_info: *DebugInfo, tty_config: TTY.Config, start_addr: ?usize, @@ -399,7 +399,7 @@ pub fn writeCurrentStackTrace( } pub fn writeCurrentStackTraceWindows( - out_stream: var, + out_stream: anytype, debug_info: *DebugInfo, tty_config: TTY.Config, start_addr: ?usize, @@ -435,7 +435,7 @@ pub const TTY = struct { // TODO give this a payload of file handle windows_api, - fn setColor(conf: Config, out_stream: var, color: Color) void { + fn setColor(conf: Config, out_stream: anytype, color: Color) void { nosuspend switch (conf) { .no_color => return, .escape_codes => switch (color) { @@ -555,7 +555,7 @@ fn machoSearchSymbols(symbols: []const MachoSymbol, address: usize) ?*const Mach } /// TODO resources https://github.com/ziglang/zig/issues/4353 -pub fn printSourceAtAddress(debug_info: *DebugInfo, out_stream: var, address: usize, tty_config: TTY.Config) !void { +pub fn printSourceAtAddress(debug_info: *DebugInfo, out_stream: anytype, address: usize, tty_config: TTY.Config) !void { const module = debug_info.getModuleForAddress(address) catch |err| switch (err) { error.MissingDebugInfo, error.InvalidDebugInfo => { return printLineInfo( @@ -586,13 +586,13 @@ pub fn printSourceAtAddress(debug_info: *DebugInfo, out_stream: var, address: us } fn printLineInfo( - out_stream: var, + out_stream: anytype, line_info: ?LineInfo, address: usize, symbol_name: []const u8, compile_unit_name: []const u8, tty_config: TTY.Config, - comptime printLineFromFile: var, + comptime printLineFromFile: anytype, ) !void { nosuspend { tty_config.setColor(out_stream, .White); @@ -820,7 +820,7 @@ fn readCoffDebugInfo(allocator: *mem.Allocator, coff_file: File) !ModuleDebugInf } } -fn readSparseBitVector(stream: var, allocator: *mem.Allocator) ![]usize { +fn readSparseBitVector(stream: anytype, allocator: *mem.Allocator) ![]usize { const num_words = try stream.readIntLittle(u32); var word_i: usize = 0; var list = ArrayList(usize).init(allocator); @@ -1004,7 +1004,7 @@ fn readMachODebugInfo(allocator: *mem.Allocator, macho_file: File) !ModuleDebugI }; } -fn printLineFromFileAnyOs(out_stream: var, line_info: LineInfo) !void { +fn printLineFromFileAnyOs(out_stream: anytype, line_info: LineInfo) !void { // Need this to always block even in async I/O mode, because this could potentially // be called from e.g. the event loop code crashing. var f = try fs.cwd().openFile(line_info.file_name, .{ .intended_io_mode = .blocking }); diff --git a/lib/std/debug/leb128.zig b/lib/std/debug/leb128.zig index 16a23da123..8149554246 100644 --- a/lib/std/debug/leb128.zig +++ b/lib/std/debug/leb128.zig @@ -3,7 +3,7 @@ const testing = std.testing; /// Read a single unsigned LEB128 value from the given reader as type T, /// or error.Overflow if the value cannot fit. -pub fn readULEB128(comptime T: type, reader: var) !T { +pub fn readULEB128(comptime T: type, reader: anytype) !T { const U = if (T.bit_count < 8) u8 else T; const ShiftT = std.math.Log2Int(U); @@ -33,7 +33,7 @@ pub fn readULEB128(comptime T: type, reader: var) !T { } /// Write a single unsigned integer as unsigned LEB128 to the given writer. -pub fn writeULEB128(writer: var, uint_value: var) !void { +pub fn writeULEB128(writer: anytype, uint_value: anytype) !void { const T = @TypeOf(uint_value); const U = if (T.bit_count < 8) u8 else T; var value = @intCast(U, uint_value); @@ -61,7 +61,7 @@ pub fn readULEB128Mem(comptime T: type, ptr: *[]const u8) !T { /// Write a single unsigned LEB128 integer to the given memory as unsigned LEB128, /// returning the number of bytes written. -pub fn writeULEB128Mem(ptr: []u8, uint_value: var) !usize { +pub fn writeULEB128Mem(ptr: []u8, uint_value: anytype) !usize { const T = @TypeOf(uint_value); const max_group = (T.bit_count + 6) / 7; var buf = std.io.fixedBufferStream(ptr); @@ -71,7 +71,7 @@ pub fn writeULEB128Mem(ptr: []u8, uint_value: var) !usize { /// Read a single signed LEB128 value from the given reader as type T, /// or error.Overflow if the value cannot fit. -pub fn readILEB128(comptime T: type, reader: var) !T { +pub fn readILEB128(comptime T: type, reader: anytype) !T { const S = if (T.bit_count < 8) i8 else T; const U = std.meta.Int(false, S.bit_count); const ShiftU = std.math.Log2Int(U); @@ -120,7 +120,7 @@ pub fn readILEB128(comptime T: type, reader: var) !T { } /// Write a single signed integer as signed LEB128 to the given writer. -pub fn writeILEB128(writer: var, int_value: var) !void { +pub fn writeILEB128(writer: anytype, int_value: anytype) !void { const T = @TypeOf(int_value); const S = if (T.bit_count < 8) i8 else T; const U = std.meta.Int(false, S.bit_count); @@ -152,7 +152,7 @@ pub fn readILEB128Mem(comptime T: type, ptr: *[]const u8) !T { /// Write a single signed LEB128 integer to the given memory as unsigned LEB128, /// returning the number of bytes written. -pub fn writeILEB128Mem(ptr: []u8, int_value: var) !usize { +pub fn writeILEB128Mem(ptr: []u8, int_value: anytype) !usize { const T = @TypeOf(int_value); var buf = std.io.fixedBufferStream(ptr); try writeILEB128(buf.writer(), int_value); @@ -295,7 +295,7 @@ test "deserialize unsigned LEB128" { try test_read_uleb128_seq(u64, 4, "\x81\x01\x3f\x80\x7f\x80\x80\x80\x00"); } -fn test_write_leb128(value: var) !void { +fn test_write_leb128(value: anytype) !void { const T = @TypeOf(value); const writeStream = if (T.is_signed) writeILEB128 else writeULEB128; diff --git a/lib/std/dwarf.zig b/lib/std/dwarf.zig index 24792c7ca0..1400442247 100644 --- a/lib/std/dwarf.zig +++ b/lib/std/dwarf.zig @@ -236,7 +236,7 @@ const LineNumberProgram = struct { } }; -fn readUnitLength(in_stream: var, endian: builtin.Endian, is_64: *bool) !u64 { +fn readUnitLength(in_stream: anytype, endian: builtin.Endian, is_64: *bool) !u64 { const first_32_bits = try in_stream.readInt(u32, endian); is_64.* = (first_32_bits == 0xffffffff); if (is_64.*) { @@ -249,7 +249,7 @@ fn readUnitLength(in_stream: var, endian: builtin.Endian, is_64: *bool) !u64 { } // TODO the nosuspends here are workarounds -fn readAllocBytes(allocator: *mem.Allocator, in_stream: var, size: usize) ![]u8 { +fn readAllocBytes(allocator: *mem.Allocator, in_stream: anytype, size: usize) ![]u8 { const buf = try allocator.alloc(u8, size); errdefer allocator.free(buf); if ((try nosuspend in_stream.read(buf)) < size) return error.EndOfFile; @@ -257,25 +257,25 @@ fn readAllocBytes(allocator: *mem.Allocator, in_stream: var, size: usize) ![]u8 } // TODO the nosuspends here are workarounds -fn readAddress(in_stream: var, endian: builtin.Endian, is_64: bool) !u64 { +fn readAddress(in_stream: anytype, endian: builtin.Endian, is_64: bool) !u64 { return nosuspend if (is_64) try in_stream.readInt(u64, endian) else @as(u64, try in_stream.readInt(u32, endian)); } -fn parseFormValueBlockLen(allocator: *mem.Allocator, in_stream: var, size: usize) !FormValue { +fn parseFormValueBlockLen(allocator: *mem.Allocator, in_stream: anytype, size: usize) !FormValue { const buf = try readAllocBytes(allocator, in_stream, size); return FormValue{ .Block = buf }; } // TODO the nosuspends here are workarounds -fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: var, endian: builtin.Endian, size: usize) !FormValue { +fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: anytype, endian: builtin.Endian, size: usize) !FormValue { const block_len = try nosuspend in_stream.readVarInt(usize, endian, size); return parseFormValueBlockLen(allocator, in_stream, block_len); } -fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: var, signed: bool, endian: builtin.Endian, comptime size: i32) !FormValue { +fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed: bool, endian: builtin.Endian, comptime size: i32) !FormValue { // TODO: Please forgive me, I've worked around zig not properly spilling some intermediate values here. // `nosuspend` should be removed from all the function calls once it is fixed. return FormValue{ @@ -302,7 +302,7 @@ fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: var, signed: boo } // TODO the nosuspends here are workarounds -fn parseFormValueRef(allocator: *mem.Allocator, in_stream: var, endian: builtin.Endian, size: i32) !FormValue { +fn parseFormValueRef(allocator: *mem.Allocator, in_stream: anytype, endian: builtin.Endian, size: i32) !FormValue { return FormValue{ .Ref = switch (size) { 1 => try nosuspend in_stream.readInt(u8, endian), @@ -316,7 +316,7 @@ fn parseFormValueRef(allocator: *mem.Allocator, in_stream: var, endian: builtin. } // TODO the nosuspends here are workarounds -fn parseFormValue(allocator: *mem.Allocator, in_stream: var, form_id: u64, endian: builtin.Endian, is_64: bool) anyerror!FormValue { +fn parseFormValue(allocator: *mem.Allocator, in_stream: anytype, form_id: u64, endian: builtin.Endian, is_64: bool) anyerror!FormValue { return switch (form_id) { FORM_addr => FormValue{ .Address = try readAddress(in_stream, endian, @sizeOf(usize) == 8) }, FORM_block1 => parseFormValueBlock(allocator, in_stream, endian, 1), @@ -670,7 +670,7 @@ pub const DwarfInfo = struct { } } - fn parseDie(di: *DwarfInfo, in_stream: var, abbrev_table: *const AbbrevTable, is_64: bool) !?Die { + fn parseDie(di: *DwarfInfo, in_stream: anytype, abbrev_table: *const AbbrevTable, is_64: bool) !?Die { const abbrev_code = try leb.readULEB128(u64, in_stream); if (abbrev_code == 0) return null; const table_entry = getAbbrevTableEntry(abbrev_table, abbrev_code) orelse return error.InvalidDebugInfo; diff --git a/lib/std/elf.zig b/lib/std/elf.zig index b6609d8b31..98508df190 100644 --- a/lib/std/elf.zig +++ b/lib/std/elf.zig @@ -517,7 +517,7 @@ pub fn readAllHeaders(allocator: *mem.Allocator, file: File) !AllHeaders { return hdrs; } -pub fn int(is_64: bool, need_bswap: bool, int_32: var, int_64: var) @TypeOf(int_64) { +pub fn int(is_64: bool, need_bswap: bool, int_32: anytype, int_64: anytype) @TypeOf(int_64) { if (is_64) { if (need_bswap) { return @byteSwap(@TypeOf(int_64), int_64); @@ -529,7 +529,7 @@ pub fn int(is_64: bool, need_bswap: bool, int_32: var, int_64: var) @TypeOf(int_ } } -pub fn int32(need_bswap: bool, int_32: var, comptime Int64: var) Int64 { +pub fn int32(need_bswap: bool, int_32: anytype, comptime Int64: anytype) Int64 { if (need_bswap) { return @byteSwap(@TypeOf(int_32), int_32); } else { diff --git a/lib/std/event/group.zig b/lib/std/event/group.zig index 61130b32cb..0dc6550218 100644 --- a/lib/std/event/group.zig +++ b/lib/std/event/group.zig @@ -65,7 +65,7 @@ pub fn Group(comptime ReturnType: type) type { /// allocated by the group and freed by `wait`. /// `func` must be async and have return type `ReturnType`. /// Thread-safe. - pub fn call(self: *Self, comptime func: var, args: var) error{OutOfMemory}!void { + pub fn call(self: *Self, comptime func: anytype, args: anytype) error{OutOfMemory}!void { var frame = try self.allocator.create(@TypeOf(@call(.{ .modifier = .async_kw }, func, args))); errdefer self.allocator.destroy(frame); const node = try self.allocator.create(AllocStack.Node); diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig index 2674ba485a..7415b1b520 100644 --- a/lib/std/fmt.zig +++ b/lib/std/fmt.zig @@ -76,9 +76,9 @@ fn peekIsAlign(comptime fmt: []const u8) bool { /// /// A user type may be a `struct`, `vector`, `union` or `enum` type. pub fn format( - writer: var, + writer: anytype, comptime fmt: []const u8, - args: var, + args: anytype, ) !void { const ArgSetType = u32; if (@typeInfo(@TypeOf(args)) != .Struct) { @@ -311,10 +311,10 @@ pub fn format( } pub fn formatType( - value: var, + value: anytype, comptime fmt: []const u8, options: FormatOptions, - writer: var, + writer: anytype, max_depth: usize, ) @TypeOf(writer).Error!void { if (comptime std.mem.eql(u8, fmt, "*")) { @@ -490,10 +490,10 @@ pub fn formatType( } fn formatValue( - value: var, + value: anytype, comptime fmt: []const u8, options: FormatOptions, - writer: var, + writer: anytype, ) !void { if (comptime std.mem.eql(u8, fmt, "B")) { return formatBytes(value, options, 1000, writer); @@ -511,10 +511,10 @@ fn formatValue( } pub fn formatIntValue( - value: var, + value: anytype, comptime fmt: []const u8, options: FormatOptions, - writer: var, + writer: anytype, ) !void { comptime var radix = 10; comptime var uppercase = false; @@ -551,10 +551,10 @@ pub fn formatIntValue( } fn formatFloatValue( - value: var, + value: anytype, comptime fmt: []const u8, options: FormatOptions, - writer: var, + writer: anytype, ) !void { if (fmt.len == 0 or comptime std.mem.eql(u8, fmt, "e")) { return formatFloatScientific(value, options, writer); @@ -569,7 +569,7 @@ pub fn formatText( bytes: []const u8, comptime fmt: []const u8, options: FormatOptions, - writer: var, + writer: anytype, ) !void { if (comptime std.mem.eql(u8, fmt, "s") or (fmt.len == 0)) { return formatBuf(bytes, options, writer); @@ -586,7 +586,7 @@ pub fn formatText( pub fn formatAsciiChar( c: u8, options: FormatOptions, - writer: var, + writer: anytype, ) !void { return writer.writeAll(@as(*const [1]u8, &c)); } @@ -594,7 +594,7 @@ pub fn formatAsciiChar( pub fn formatBuf( buf: []const u8, options: FormatOptions, - writer: var, + writer: anytype, ) !void { const width = options.width orelse buf.len; var padding = if (width > buf.len) (width - buf.len) else 0; @@ -626,9 +626,9 @@ pub fn formatBuf( // It should be the case that every full precision, printed value can be re-parsed back to the // same type unambiguously. pub fn formatFloatScientific( - value: var, + value: anytype, options: FormatOptions, - writer: var, + writer: anytype, ) !void { var x = @floatCast(f64, value); @@ -719,9 +719,9 @@ pub fn formatFloatScientific( // Print a float of the format x.yyyyy where the number of y is specified by the precision argument. // By default floats are printed at full precision (no rounding). pub fn formatFloatDecimal( - value: var, + value: anytype, options: FormatOptions, - writer: var, + writer: anytype, ) !void { var x = @as(f64, value); @@ -860,10 +860,10 @@ pub fn formatFloatDecimal( } pub fn formatBytes( - value: var, + value: anytype, options: FormatOptions, comptime radix: usize, - writer: var, + writer: anytype, ) !void { if (value == 0) { return writer.writeAll("0B"); @@ -901,11 +901,11 @@ pub fn formatBytes( } pub fn formatInt( - value: var, + value: anytype, base: u8, uppercase: bool, options: FormatOptions, - writer: var, + writer: anytype, ) !void { const int_value = if (@TypeOf(value) == comptime_int) blk: { const Int = math.IntFittingRange(value, value); @@ -921,11 +921,11 @@ pub fn formatInt( } fn formatIntSigned( - value: var, + value: anytype, base: u8, uppercase: bool, options: FormatOptions, - writer: var, + writer: anytype, ) !void { const new_options = FormatOptions{ .width = if (options.width) |w| (if (w == 0) 0 else w - 1) else null, @@ -948,11 +948,11 @@ fn formatIntSigned( } fn formatIntUnsigned( - value: var, + value: anytype, base: u8, uppercase: bool, options: FormatOptions, - writer: var, + writer: anytype, ) !void { assert(base >= 2); var buf: [math.max(@TypeOf(value).bit_count, 1)]u8 = undefined; @@ -990,7 +990,7 @@ fn formatIntUnsigned( } } -pub fn formatIntBuf(out_buf: []u8, value: var, base: u8, uppercase: bool, options: FormatOptions) usize { +pub fn formatIntBuf(out_buf: []u8, value: anytype, base: u8, uppercase: bool, options: FormatOptions) usize { var fbs = std.io.fixedBufferStream(out_buf); formatInt(value, base, uppercase, options, fbs.writer()) catch unreachable; return fbs.pos; @@ -1050,7 +1050,7 @@ fn parseWithSign( .Pos => math.add, .Neg => math.sub, }; - + var x: T = 0; for (buf) |c| { @@ -1132,14 +1132,14 @@ pub const BufPrintError = error{ /// As much as possible was written to the buffer, but it was too small to fit all the printed bytes. NoSpaceLeft, }; -pub fn bufPrint(buf: []u8, comptime fmt: []const u8, args: var) BufPrintError![]u8 { +pub fn bufPrint(buf: []u8, comptime fmt: []const u8, args: anytype) BufPrintError![]u8 { var fbs = std.io.fixedBufferStream(buf); try format(fbs.writer(), fmt, args); return fbs.getWritten(); } // Count the characters needed for format. Useful for preallocating memory -pub fn count(comptime fmt: []const u8, args: var) u64 { +pub fn count(comptime fmt: []const u8, args: anytype) u64 { var counting_writer = std.io.countingWriter(std.io.null_writer); format(counting_writer.writer(), fmt, args) catch |err| switch (err) {}; return counting_writer.bytes_written; @@ -1147,7 +1147,7 @@ pub fn count(comptime fmt: []const u8, args: var) u64 { pub const AllocPrintError = error{OutOfMemory}; -pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: var) AllocPrintError![]u8 { +pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![]u8 { const size = math.cast(usize, count(fmt, args)) catch |err| switch (err) { // Output too long. Can't possibly allocate enough memory to display it. error.Overflow => return error.OutOfMemory, @@ -1158,7 +1158,7 @@ pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: var }; } -pub fn allocPrint0(allocator: *mem.Allocator, comptime fmt: []const u8, args: var) AllocPrintError![:0]u8 { +pub fn allocPrint0(allocator: *mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![:0]u8 { const result = try allocPrint(allocator, fmt ++ "\x00", args); return result[0 .. result.len - 1 :0]; } @@ -1184,7 +1184,7 @@ test "bufPrintInt" { std.testing.expectEqualSlices(u8, "-42", bufPrintIntToSlice(buf, @as(i32, -42), 10, false, FormatOptions{ .width = 3 })); } -fn bufPrintIntToSlice(buf: []u8, value: var, base: u8, uppercase: bool, options: FormatOptions) []u8 { +fn bufPrintIntToSlice(buf: []u8, value: anytype, base: u8, uppercase: bool, options: FormatOptions) []u8 { return buf[0..formatIntBuf(buf, value, base, uppercase, options)]; } @@ -1452,7 +1452,7 @@ test "custom" { self: SelfType, comptime fmt: []const u8, options: FormatOptions, - writer: var, + writer: anytype, ) !void { if (fmt.len == 0 or comptime std.mem.eql(u8, fmt, "p")) { return std.fmt.format(writer, "({d:.3},{d:.3})", .{ self.x, self.y }); @@ -1573,7 +1573,7 @@ test "bytes.hex" { try testFmt("lowercase: 000ebabe\n", "lowercase: {x}\n", .{bytes_with_zeros}); } -fn testFmt(expected: []const u8, comptime template: []const u8, args: var) !void { +fn testFmt(expected: []const u8, comptime template: []const u8, args: anytype) !void { var buf: [100]u8 = undefined; const result = try bufPrint(buf[0..], template, args); if (mem.eql(u8, result, expected)) return; @@ -1669,7 +1669,7 @@ test "formatType max_depth" { self: SelfType, comptime fmt: []const u8, options: FormatOptions, - writer: var, + writer: anytype, ) !void { if (fmt.len == 0) { return std.fmt.format(writer, "({d:.3},{d:.3})", .{ self.x, self.y }); diff --git a/lib/std/fs/wasi.zig b/lib/std/fs/wasi.zig index db4317064d..4a897c78f7 100644 --- a/lib/std/fs/wasi.zig +++ b/lib/std/fs/wasi.zig @@ -29,7 +29,7 @@ pub const PreopenType = union(PreopenTypeTag) { } } - pub fn format(self: Self, comptime fmt: []const u8, options: std.fmt.FormatOptions, out_stream: var) !void { + pub fn format(self: Self, comptime fmt: []const u8, options: std.fmt.FormatOptions, out_stream: anytype) !void { try out_stream.print("PreopenType{{ ", .{}); switch (self) { PreopenType.Dir => |path| try out_stream.print(".Dir = '{}'", .{path}), diff --git a/lib/std/hash/auto_hash.zig b/lib/std/hash/auto_hash.zig index a33b23354b..a3e1a390c2 100644 --- a/lib/std/hash/auto_hash.zig +++ b/lib/std/hash/auto_hash.zig @@ -21,7 +21,7 @@ pub const HashStrategy = enum { }; /// Helper function to hash a pointer and mutate the strategy if needed. -pub fn hashPointer(hasher: var, key: var, comptime strat: HashStrategy) void { +pub fn hashPointer(hasher: anytype, key: anytype, comptime strat: HashStrategy) void { const info = @typeInfo(@TypeOf(key)); switch (info.Pointer.size) { @@ -53,7 +53,7 @@ pub fn hashPointer(hasher: var, key: var, comptime strat: HashStrategy) void { } /// Helper function to hash a set of contiguous objects, from an array or slice. -pub fn hashArray(hasher: var, key: var, comptime strat: HashStrategy) void { +pub fn hashArray(hasher: anytype, key: anytype, comptime strat: HashStrategy) void { switch (strat) { .Shallow => { // TODO detect via a trait when Key has no padding bits to @@ -73,7 +73,7 @@ pub fn hashArray(hasher: var, key: var, comptime strat: HashStrategy) void { /// Provides generic hashing for any eligible type. /// Strategy is provided to determine if pointers should be followed or not. -pub fn hash(hasher: var, key: var, comptime strat: HashStrategy) void { +pub fn hash(hasher: anytype, key: anytype, comptime strat: HashStrategy) void { const Key = @TypeOf(key); switch (@typeInfo(Key)) { .NoReturn, @@ -161,7 +161,7 @@ pub fn hash(hasher: var, key: var, comptime strat: HashStrategy) void { /// Provides generic hashing for any eligible type. /// Only hashes `key` itself, pointers are not followed. /// Slices are rejected to avoid ambiguity on the user's intention. -pub fn autoHash(hasher: var, key: var) void { +pub fn autoHash(hasher: anytype, key: anytype) void { const Key = @TypeOf(key); if (comptime meta.trait.isSlice(Key)) { comptime assert(@hasDecl(std, "StringHashMap")); // detect when the following message needs updated @@ -181,28 +181,28 @@ pub fn autoHash(hasher: var, key: var) void { const testing = std.testing; const Wyhash = std.hash.Wyhash; -fn testHash(key: var) u64 { +fn testHash(key: anytype) u64 { // Any hash could be used here, for testing autoHash. var hasher = Wyhash.init(0); hash(&hasher, key, .Shallow); return hasher.final(); } -fn testHashShallow(key: var) u64 { +fn testHashShallow(key: anytype) u64 { // Any hash could be used here, for testing autoHash. var hasher = Wyhash.init(0); hash(&hasher, key, .Shallow); return hasher.final(); } -fn testHashDeep(key: var) u64 { +fn testHashDeep(key: anytype) u64 { // Any hash could be used here, for testing autoHash. var hasher = Wyhash.init(0); hash(&hasher, key, .Deep); return hasher.final(); } -fn testHashDeepRecursive(key: var) u64 { +fn testHashDeepRecursive(key: anytype) u64 { // Any hash could be used here, for testing autoHash. var hasher = Wyhash.init(0); hash(&hasher, key, .DeepRecursive); diff --git a/lib/std/hash/benchmark.zig b/lib/std/hash/benchmark.zig index 0eb3a25fe1..5f8a15831c 100644 --- a/lib/std/hash/benchmark.zig +++ b/lib/std/hash/benchmark.zig @@ -88,7 +88,7 @@ const Result = struct { const block_size: usize = 8 * 8192; -pub fn benchmarkHash(comptime H: var, bytes: usize) !Result { +pub fn benchmarkHash(comptime H: anytype, bytes: usize) !Result { var h = blk: { if (H.init_u8s) |init| { break :blk H.ty.init(init); @@ -119,7 +119,7 @@ pub fn benchmarkHash(comptime H: var, bytes: usize) !Result { }; } -pub fn benchmarkHashSmallKeys(comptime H: var, key_size: usize, bytes: usize) !Result { +pub fn benchmarkHashSmallKeys(comptime H: anytype, key_size: usize, bytes: usize) !Result { const key_count = bytes / key_size; var block: [block_size]u8 = undefined; prng.random.bytes(block[0..]); diff --git a/lib/std/hash/cityhash.zig b/lib/std/hash/cityhash.zig index a717303090..73b94acbd2 100644 --- a/lib/std/hash/cityhash.zig +++ b/lib/std/hash/cityhash.zig @@ -354,7 +354,7 @@ pub const CityHash64 = struct { } }; -fn SMHasherTest(comptime hash_fn: var, comptime hashbits: u32) u32 { +fn SMHasherTest(comptime hash_fn: anytype, comptime hashbits: u32) u32 { const hashbytes = hashbits / 8; var key: [256]u8 = undefined; var hashes: [hashbytes * 256]u8 = undefined; diff --git a/lib/std/hash/murmur.zig b/lib/std/hash/murmur.zig index 96efc8b9c1..effa13ad69 100644 --- a/lib/std/hash/murmur.zig +++ b/lib/std/hash/murmur.zig @@ -279,7 +279,7 @@ pub const Murmur3_32 = struct { } }; -fn SMHasherTest(comptime hash_fn: var, comptime hashbits: u32) u32 { +fn SMHasherTest(comptime hash_fn: anytype, comptime hashbits: u32) u32 { const hashbytes = hashbits / 8; var key: [256]u8 = undefined; var hashes: [hashbytes * 256]u8 = undefined; diff --git a/lib/std/heap.zig b/lib/std/heap.zig index ea9e95c675..ba96257557 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -15,15 +15,20 @@ pub const ArenaAllocator = @import("heap/arena_allocator.zig").ArenaAllocator; const Allocator = mem.Allocator; -usingnamespace if (comptime @hasDecl(c, "malloc_size")) struct { - pub const supports_malloc_size = true; - pub const malloc_size = c.malloc_size; -} else if (comptime @hasDecl(c, "malloc_usable_size")) struct { - pub const supports_malloc_size = true; - pub const malloc_size = c.malloc_usable_size; -} else struct { - pub const supports_malloc_size = false; -}; +usingnamespace if (comptime @hasDecl(c, "malloc_size")) + struct { + pub const supports_malloc_size = true; + pub const malloc_size = c.malloc_size; + } +else if (comptime @hasDecl(c, "malloc_usable_size")) + struct { + pub const supports_malloc_size = true; + pub const malloc_size = c.malloc_usable_size; + } +else + struct { + pub const supports_malloc_size = false; + }; pub const c_allocator = &c_allocator_state; var c_allocator_state = Allocator{ @@ -151,8 +156,7 @@ const PageAllocator = struct { } const maxDropLen = alignment - std.math.min(alignment, mem.page_size); - const allocLen = if (maxDropLen <= alignedLen - n) alignedLen - else mem.alignForward(alignedLen + maxDropLen, mem.page_size); + const allocLen = if (maxDropLen <= alignedLen - n) alignedLen else mem.alignForward(alignedLen + maxDropLen, mem.page_size); const slice = os.mmap( null, allocLen, @@ -331,8 +335,7 @@ const WasmPageAllocator = struct { fn alloc(allocator: *Allocator, len: usize, alignment: u29, len_align: u29) error{OutOfMemory}![]u8 { const page_count = nPages(len); const page_idx = try allocPages(page_count, alignment); - return @intToPtr([*]u8, page_idx * mem.page_size) - [0..alignPageAllocLen(page_count * mem.page_size, len, len_align)]; + return @intToPtr([*]u8, page_idx * mem.page_size)[0..alignPageAllocLen(page_count * mem.page_size, len, len_align)]; } fn allocPages(page_count: usize, alignment: u29) !usize { { @@ -452,7 +455,7 @@ pub const HeapAllocator = switch (builtin.os.tag) { fn resize(allocator: *Allocator, buf: []u8, new_size: usize, len_align: u29) error{OutOfMemory}!usize { const self = @fieldParentPtr(HeapAllocator, "allocator", allocator); if (new_size == 0) { - os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void ,getRecordPtr(buf).*)); + os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void, getRecordPtr(buf).*)); return 0; } diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig index b521515a79..d3055c75ee 100644 --- a/lib/std/heap/logging_allocator.zig +++ b/lib/std/heap/logging_allocator.zig @@ -40,7 +40,7 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type { if (new_len == 0) { self.out_stream.print("free : {}\n", .{buf.len}) catch {}; } else if (new_len <= buf.len) { - self.out_stream.print("shrink: {} to {}\n", .{buf.len, new_len}) catch {}; + self.out_stream.print("shrink: {} to {}\n", .{ buf.len, new_len }) catch {}; } else { self.out_stream.print("expand: {} to {}", .{ buf.len, new_len }) catch {}; } @@ -60,7 +60,7 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type { pub fn loggingAllocator( parent_allocator: *Allocator, - out_stream: var, + out_stream: anytype, ) LoggingAllocator(@TypeOf(out_stream)) { return LoggingAllocator(@TypeOf(out_stream)).init(parent_allocator, out_stream); } diff --git a/lib/std/http/headers.zig b/lib/std/http/headers.zig index 9310dac348..f5465d4151 100644 --- a/lib/std/http/headers.zig +++ b/lib/std/http/headers.zig @@ -348,7 +348,7 @@ pub const Headers = struct { self: Self, comptime fmt: []const u8, options: std.fmt.FormatOptions, - out_stream: var, + out_stream: anytype, ) !void { for (self.toSlice()) |entry| { try out_stream.writeAll(entry.name); diff --git a/lib/std/io/bit_reader.zig b/lib/std/io/bit_reader.zig index d5e8ce934f..fbdf7fbe78 100644 --- a/lib/std/io/bit_reader.zig +++ b/lib/std/io/bit_reader.zig @@ -170,7 +170,7 @@ pub fn BitReader(endian: builtin.Endian, comptime ReaderType: type) type { pub fn bitReader( comptime endian: builtin.Endian, - underlying_stream: var, + underlying_stream: anytype, ) BitReader(endian, @TypeOf(underlying_stream)) { return BitReader(endian, @TypeOf(underlying_stream)).init(underlying_stream); } diff --git a/lib/std/io/bit_writer.zig b/lib/std/io/bit_writer.zig index bdf9156136..7c1d3e5dba 100644 --- a/lib/std/io/bit_writer.zig +++ b/lib/std/io/bit_writer.zig @@ -34,7 +34,7 @@ pub fn BitWriter(endian: builtin.Endian, comptime WriterType: type) type { /// Write the specified number of bits to the stream from the least significant bits of /// the specified unsigned int value. Bits will only be written to the stream when there /// are enough to fill a byte. - pub fn writeBits(self: *Self, value: var, bits: usize) Error!void { + pub fn writeBits(self: *Self, value: anytype, bits: usize) Error!void { if (bits == 0) return; const U = @TypeOf(value); @@ -145,7 +145,7 @@ pub fn BitWriter(endian: builtin.Endian, comptime WriterType: type) type { pub fn bitWriter( comptime endian: builtin.Endian, - underlying_stream: var, + underlying_stream: anytype, ) BitWriter(endian, @TypeOf(underlying_stream)) { return BitWriter(endian, @TypeOf(underlying_stream)).init(underlying_stream); } diff --git a/lib/std/io/buffered_reader.zig b/lib/std/io/buffered_reader.zig index f33dc127d2..73d74b465f 100644 --- a/lib/std/io/buffered_reader.zig +++ b/lib/std/io/buffered_reader.zig @@ -48,7 +48,7 @@ pub fn BufferedReader(comptime buffer_size: usize, comptime ReaderType: type) ty }; } -pub fn bufferedReader(underlying_stream: var) BufferedReader(4096, @TypeOf(underlying_stream)) { +pub fn bufferedReader(underlying_stream: anytype) BufferedReader(4096, @TypeOf(underlying_stream)) { return .{ .unbuffered_reader = underlying_stream }; } diff --git a/lib/std/io/buffered_writer.zig b/lib/std/io/buffered_writer.zig index 5cd102b510..a970f899d6 100644 --- a/lib/std/io/buffered_writer.zig +++ b/lib/std/io/buffered_writer.zig @@ -43,6 +43,6 @@ pub fn BufferedWriter(comptime buffer_size: usize, comptime WriterType: type) ty }; } -pub fn bufferedWriter(underlying_stream: var) BufferedWriter(4096, @TypeOf(underlying_stream)) { +pub fn bufferedWriter(underlying_stream: anytype) BufferedWriter(4096, @TypeOf(underlying_stream)) { return .{ .unbuffered_writer = underlying_stream }; } diff --git a/lib/std/io/counting_writer.zig b/lib/std/io/counting_writer.zig index 90e4580eea..c0cd53c7ee 100644 --- a/lib/std/io/counting_writer.zig +++ b/lib/std/io/counting_writer.zig @@ -32,7 +32,7 @@ pub fn CountingWriter(comptime WriterType: type) type { }; } -pub fn countingWriter(child_stream: var) CountingWriter(@TypeOf(child_stream)) { +pub fn countingWriter(child_stream: anytype) CountingWriter(@TypeOf(child_stream)) { return .{ .bytes_written = 0, .child_stream = child_stream }; } diff --git a/lib/std/io/fixed_buffer_stream.zig b/lib/std/io/fixed_buffer_stream.zig index ee5fe48ca5..32625f3b7a 100644 --- a/lib/std/io/fixed_buffer_stream.zig +++ b/lib/std/io/fixed_buffer_stream.zig @@ -127,7 +127,7 @@ pub fn FixedBufferStream(comptime Buffer: type) type { }; } -pub fn fixedBufferStream(buffer: var) FixedBufferStream(NonSentinelSpan(@TypeOf(buffer))) { +pub fn fixedBufferStream(buffer: anytype) FixedBufferStream(NonSentinelSpan(@TypeOf(buffer))) { return .{ .buffer = mem.span(buffer), .pos = 0 }; } diff --git a/lib/std/io/multi_writer.zig b/lib/std/io/multi_writer.zig index 02ed75eaaa..e63940bff7 100644 --- a/lib/std/io/multi_writer.zig +++ b/lib/std/io/multi_writer.zig @@ -43,7 +43,7 @@ pub fn MultiWriter(comptime Writers: type) type { }; } -pub fn multiWriter(streams: var) MultiWriter(@TypeOf(streams)) { +pub fn multiWriter(streams: anytype) MultiWriter(@TypeOf(streams)) { return .{ .streams = streams }; } diff --git a/lib/std/io/peek_stream.zig b/lib/std/io/peek_stream.zig index 2bf6b83bc5..08e940c6ec 100644 --- a/lib/std/io/peek_stream.zig +++ b/lib/std/io/peek_stream.zig @@ -80,7 +80,7 @@ pub fn PeekStream( pub fn peekStream( comptime lookahead: comptime_int, - underlying_stream: var, + underlying_stream: anytype, ) PeekStream(.{ .Static = lookahead }, @TypeOf(underlying_stream)) { return PeekStream(.{ .Static = lookahead }, @TypeOf(underlying_stream)).init(underlying_stream); } diff --git a/lib/std/io/serialization.zig b/lib/std/io/serialization.zig index 8c63b8b966..8fe0782c84 100644 --- a/lib/std/io/serialization.zig +++ b/lib/std/io/serialization.zig @@ -93,7 +93,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing, } /// Deserializes data into the type pointed to by `ptr` - pub fn deserializeInto(self: *Self, ptr: var) !void { + pub fn deserializeInto(self: *Self, ptr: anytype) !void { const T = @TypeOf(ptr); comptime assert(trait.is(.Pointer)(T)); @@ -190,7 +190,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing, pub fn deserializer( comptime endian: builtin.Endian, comptime packing: Packing, - in_stream: var, + in_stream: anytype, ) Deserializer(endian, packing, @TypeOf(in_stream)) { return Deserializer(endian, packing, @TypeOf(in_stream)).init(in_stream); } @@ -229,7 +229,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co if (packing == .Bit) return self.out_stream.flushBits(); } - fn serializeInt(self: *Self, value: var) Error!void { + fn serializeInt(self: *Self, value: anytype) Error!void { const T = @TypeOf(value); comptime assert(trait.is(.Int)(T) or trait.is(.Float)(T)); @@ -261,7 +261,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co } /// Serializes the passed value into the stream - pub fn serialize(self: *Self, value: var) Error!void { + pub fn serialize(self: *Self, value: anytype) Error!void { const T = comptime @TypeOf(value); if (comptime trait.isIndexable(T)) { @@ -346,7 +346,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co pub fn serializer( comptime endian: builtin.Endian, comptime packing: Packing, - out_stream: var, + out_stream: anytype, ) Serializer(endian, packing, @TypeOf(out_stream)) { return Serializer(endian, packing, @TypeOf(out_stream)).init(out_stream); } @@ -462,7 +462,7 @@ test "Serializer/Deserializer Int: Inf/NaN" { try testIntSerializerDeserializerInfNaN(.Little, .Bit); } -fn testAlternateSerializer(self: var, _serializer: var) !void { +fn testAlternateSerializer(self: anytype, _serializer: anytype) !void { try _serializer.serialize(self.f_f16); } @@ -503,7 +503,7 @@ fn testSerializerDeserializer(comptime endian: builtin.Endian, comptime packing: f_f16: f16, f_unused_u32: u32, - pub fn deserialize(self: *@This(), _deserializer: var) !void { + pub fn deserialize(self: *@This(), _deserializer: anytype) !void { try _deserializer.deserializeInto(&self.f_f16); self.f_unused_u32 = 47; } diff --git a/lib/std/io/writer.zig b/lib/std/io/writer.zig index 659ba2703e..a98e3b1acd 100644 --- a/lib/std/io/writer.zig +++ b/lib/std/io/writer.zig @@ -24,7 +24,7 @@ pub fn Writer( } } - pub fn print(self: Self, comptime format: []const u8, args: var) Error!void { + pub fn print(self: Self, comptime format: []const u8, args: anytype) Error!void { return std.fmt.format(self, format, args); } diff --git a/lib/std/json.zig b/lib/std/json.zig index 65ebe55072..f1b91fc829 100644 --- a/lib/std/json.zig +++ b/lib/std/json.zig @@ -239,7 +239,7 @@ pub const StreamingParser = struct { NullLiteral3, // Only call this function to generate array/object final state. - pub fn fromInt(x: var) State { + pub fn fromInt(x: anytype) State { debug.assert(x == 0 or x == 1); const T = @TagType(State); return @intToEnum(State, @intCast(T, x)); @@ -1236,7 +1236,7 @@ pub const Value = union(enum) { pub fn jsonStringify( value: @This(), options: StringifyOptions, - out_stream: var, + out_stream: anytype, ) @TypeOf(out_stream).Error!void { switch (value) { .Null => try stringify(null, options, out_stream), @@ -2338,7 +2338,7 @@ pub const StringifyOptions = struct { pub fn outputIndent( whitespace: @This(), - out_stream: var, + out_stream: anytype, ) @TypeOf(out_stream).Error!void { var char: u8 = undefined; var n_chars: usize = undefined; @@ -2380,7 +2380,7 @@ pub const StringifyOptions = struct { fn outputUnicodeEscape( codepoint: u21, - out_stream: var, + out_stream: anytype, ) !void { if (codepoint <= 0xFFFF) { // If the character is in the Basic Multilingual Plane (U+0000 through U+FFFF), @@ -2402,9 +2402,9 @@ fn outputUnicodeEscape( } pub fn stringify( - value: var, + value: anytype, options: StringifyOptions, - out_stream: var, + out_stream: anytype, ) @TypeOf(out_stream).Error!void { const T = @TypeOf(value); switch (@typeInfo(T)) { @@ -2584,7 +2584,7 @@ pub fn stringify( unreachable; } -fn teststringify(expected: []const u8, value: var, options: StringifyOptions) !void { +fn teststringify(expected: []const u8, value: anytype, options: StringifyOptions) !void { const ValidationOutStream = struct { const Self = @This(); pub const OutStream = std.io.OutStream(*Self, Error, write); @@ -2758,7 +2758,7 @@ test "stringify struct with custom stringifier" { pub fn jsonStringify( value: Self, options: StringifyOptions, - out_stream: var, + out_stream: anytype, ) !void { try out_stream.writeAll("[\"something special\","); try stringify(42, options, out_stream); diff --git a/lib/std/json/write_stream.zig b/lib/std/json/write_stream.zig index dcfbf04bc1..778173cc24 100644 --- a/lib/std/json/write_stream.zig +++ b/lib/std/json/write_stream.zig @@ -152,7 +152,7 @@ pub fn WriteStream(comptime OutStream: type, comptime max_depth: usize) type { self: *Self, /// An integer, float, or `std.math.BigInt`. Emitted as a bare number if it fits losslessly /// in a IEEE 754 double float, otherwise emitted as a string to the full precision. - value: var, + value: anytype, ) !void { assert(self.state[self.state_index] == State.Value); switch (@typeInfo(@TypeOf(value))) { @@ -215,7 +215,7 @@ pub fn WriteStream(comptime OutStream: type, comptime max_depth: usize) type { self.state_index -= 1; } - fn stringify(self: *Self, value: var) !void { + fn stringify(self: *Self, value: anytype) !void { try std.json.stringify(value, std.json.StringifyOptions{ .whitespace = self.whitespace, }, self.stream); @@ -224,7 +224,7 @@ pub fn WriteStream(comptime OutStream: type, comptime max_depth: usize) type { } pub fn writeStream( - out_stream: var, + out_stream: anytype, comptime max_depth: usize, ) WriteStream(@TypeOf(out_stream), max_depth) { return WriteStream(@TypeOf(out_stream), max_depth).init(out_stream); diff --git a/lib/std/log.zig b/lib/std/log.zig index 63aeaecf88..0006580031 100644 --- a/lib/std/log.zig +++ b/lib/std/log.zig @@ -101,7 +101,7 @@ fn log( comptime message_level: Level, comptime scope: @Type(.EnumLiteral), comptime format: []const u8, - args: var, + args: anytype, ) void { if (@enumToInt(message_level) <= @enumToInt(level)) { if (@hasDecl(root, "log")) { @@ -120,7 +120,7 @@ fn log( pub fn emerg( comptime scope: @Type(.EnumLiteral), comptime format: []const u8, - args: var, + args: anytype, ) void { @setCold(true); log(.emerg, scope, format, args); @@ -131,7 +131,7 @@ pub fn emerg( pub fn alert( comptime scope: @Type(.EnumLiteral), comptime format: []const u8, - args: var, + args: anytype, ) void { @setCold(true); log(.alert, scope, format, args); @@ -143,7 +143,7 @@ pub fn alert( pub fn crit( comptime scope: @Type(.EnumLiteral), comptime format: []const u8, - args: var, + args: anytype, ) void { @setCold(true); log(.crit, scope, format, args); @@ -154,7 +154,7 @@ pub fn crit( pub fn err( comptime scope: @Type(.EnumLiteral), comptime format: []const u8, - args: var, + args: anytype, ) void { @setCold(true); log(.err, scope, format, args); @@ -166,7 +166,7 @@ pub fn err( pub fn warn( comptime scope: @Type(.EnumLiteral), comptime format: []const u8, - args: var, + args: anytype, ) void { log(.warn, scope, format, args); } @@ -176,7 +176,7 @@ pub fn warn( pub fn notice( comptime scope: @Type(.EnumLiteral), comptime format: []const u8, - args: var, + args: anytype, ) void { log(.notice, scope, format, args); } @@ -186,7 +186,7 @@ pub fn notice( pub fn info( comptime scope: @Type(.EnumLiteral), comptime format: []const u8, - args: var, + args: anytype, ) void { log(.info, scope, format, args); } @@ -196,7 +196,7 @@ pub fn info( pub fn debug( comptime scope: @Type(.EnumLiteral), comptime format: []const u8, - args: var, + args: anytype, ) void { log(.debug, scope, format, args); } diff --git a/lib/std/math.zig b/lib/std/math.zig index 14ffd61c29..111a618cef 100644 --- a/lib/std/math.zig +++ b/lib/std/math.zig @@ -104,7 +104,7 @@ pub fn approxEq(comptime T: type, x: T, y: T, epsilon: T) bool { } // TODO: Hide the following in an internal module. -pub fn forceEval(value: var) void { +pub fn forceEval(value: anytype) void { const T = @TypeOf(value); switch (T) { f16 => { @@ -259,7 +259,7 @@ pub fn Min(comptime A: type, comptime B: type) type { /// Returns the smaller number. When one of the parameter's type's full range fits in the other, /// the return type is the smaller type. -pub fn min(x: var, y: var) Min(@TypeOf(x), @TypeOf(y)) { +pub fn min(x: anytype, y: anytype) Min(@TypeOf(x), @TypeOf(y)) { const Result = Min(@TypeOf(x), @TypeOf(y)); if (x < y) { // TODO Zig should allow this as an implicit cast because x is immutable and in this @@ -310,7 +310,7 @@ test "math.min" { } } -pub fn max(x: var, y: var) @TypeOf(x, y) { +pub fn max(x: anytype, y: anytype) @TypeOf(x, y) { return if (x > y) x else y; } @@ -318,7 +318,7 @@ test "math.max" { testing.expect(max(@as(i32, -1), @as(i32, 2)) == 2); } -pub fn clamp(val: var, lower: var, upper: var) @TypeOf(val, lower, upper) { +pub fn clamp(val: anytype, lower: anytype, upper: anytype) @TypeOf(val, lower, upper) { assert(lower <= upper); return max(lower, min(val, upper)); } @@ -354,7 +354,7 @@ pub fn sub(comptime T: type, a: T, b: T) (error{Overflow}!T) { return if (@subWithOverflow(T, a, b, &answer)) error.Overflow else answer; } -pub fn negate(x: var) !@TypeOf(x) { +pub fn negate(x: anytype) !@TypeOf(x) { return sub(@TypeOf(x), 0, x); } @@ -365,7 +365,7 @@ pub fn shlExact(comptime T: type, a: T, shift_amt: Log2Int(T)) !T { /// Shifts left. Overflowed bits are truncated. /// A negative shift amount results in a right shift. -pub fn shl(comptime T: type, a: T, shift_amt: var) T { +pub fn shl(comptime T: type, a: T, shift_amt: anytype) T { const abs_shift_amt = absCast(shift_amt); const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt); @@ -391,7 +391,7 @@ test "math.shl" { /// Shifts right. Overflowed bits are truncated. /// A negative shift amount results in a left shift. -pub fn shr(comptime T: type, a: T, shift_amt: var) T { +pub fn shr(comptime T: type, a: T, shift_amt: anytype) T { const abs_shift_amt = absCast(shift_amt); const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt); @@ -419,7 +419,7 @@ test "math.shr" { /// Rotates right. Only unsigned values can be rotated. /// Negative shift values results in shift modulo the bit count. -pub fn rotr(comptime T: type, x: T, r: var) T { +pub fn rotr(comptime T: type, x: T, r: anytype) T { if (T.is_signed) { @compileError("cannot rotate signed integer"); } else { @@ -438,7 +438,7 @@ test "math.rotr" { /// Rotates left. Only unsigned values can be rotated. /// Negative shift values results in shift modulo the bit count. -pub fn rotl(comptime T: type, x: T, r: var) T { +pub fn rotl(comptime T: type, x: T, r: anytype) T { if (T.is_signed) { @compileError("cannot rotate signed integer"); } else { @@ -541,7 +541,7 @@ fn testOverflow() void { testing.expect((shlExact(i32, 0b11, 4) catch unreachable) == 0b110000); } -pub fn absInt(x: var) !@TypeOf(x) { +pub fn absInt(x: anytype) !@TypeOf(x) { const T = @TypeOf(x); comptime assert(@typeInfo(T) == .Int); // must pass an integer to absInt comptime assert(T.is_signed); // must pass a signed integer to absInt @@ -689,7 +689,7 @@ fn testRem() void { /// Returns the absolute value of the integer parameter. /// Result is an unsigned integer. -pub fn absCast(x: var) switch (@typeInfo(@TypeOf(x))) { +pub fn absCast(x: anytype) switch (@typeInfo(@TypeOf(x))) { .ComptimeInt => comptime_int, .Int => |intInfo| std.meta.Int(false, intInfo.bits), else => @compileError("absCast only accepts integers"), @@ -724,7 +724,7 @@ test "math.absCast" { /// Returns the negation of the integer parameter. /// Result is a signed integer. -pub fn negateCast(x: var) !std.meta.Int(true, @TypeOf(x).bit_count) { +pub fn negateCast(x: anytype) !std.meta.Int(true, @TypeOf(x).bit_count) { if (@TypeOf(x).is_signed) return negate(x); const int = std.meta.Int(true, @TypeOf(x).bit_count); @@ -747,7 +747,7 @@ test "math.negateCast" { /// Cast an integer to a different integer type. If the value doesn't fit, /// return an error. -pub fn cast(comptime T: type, x: var) (error{Overflow}!T) { +pub fn cast(comptime T: type, x: anytype) (error{Overflow}!T) { comptime assert(@typeInfo(T) == .Int); // must pass an integer comptime assert(@typeInfo(@TypeOf(x)) == .Int); // must pass an integer if (maxInt(@TypeOf(x)) > maxInt(T) and x > maxInt(T)) { @@ -772,7 +772,7 @@ test "math.cast" { pub const AlignCastError = error{UnalignedMemory}; /// Align cast a pointer but return an error if it's the wrong alignment -pub fn alignCast(comptime alignment: u29, ptr: var) AlignCastError!@TypeOf(@alignCast(alignment, ptr)) { +pub fn alignCast(comptime alignment: u29, ptr: anytype) AlignCastError!@TypeOf(@alignCast(alignment, ptr)) { const addr = @ptrToInt(ptr); if (addr % alignment != 0) { return error.UnalignedMemory; @@ -780,7 +780,7 @@ pub fn alignCast(comptime alignment: u29, ptr: var) AlignCastError!@TypeOf(@alig return @alignCast(alignment, ptr); } -pub fn isPowerOfTwo(v: var) bool { +pub fn isPowerOfTwo(v: anytype) bool { assert(v != 0); return (v & (v - 1)) == 0; } @@ -897,7 +897,7 @@ test "std.math.log2_int_ceil" { testing.expect(log2_int_ceil(u32, 10) == 4); } -pub fn lossyCast(comptime T: type, value: var) T { +pub fn lossyCast(comptime T: type, value: anytype) T { switch (@typeInfo(@TypeOf(value))) { .Int => return @intToFloat(T, value), .Float => return @floatCast(T, value), @@ -1031,7 +1031,7 @@ pub const Order = enum { }; /// Given two numbers, this function returns the order they are with respect to each other. -pub fn order(a: var, b: var) Order { +pub fn order(a: anytype, b: anytype) Order { if (a == b) { return .eq; } else if (a < b) { @@ -1062,7 +1062,7 @@ pub const CompareOperator = enum { /// This function does the same thing as comparison operators, however the /// operator is a runtime-known enum value. Works on any operands that /// support comparison operators. -pub fn compare(a: var, op: CompareOperator, b: var) bool { +pub fn compare(a: anytype, op: CompareOperator, b: anytype) bool { return switch (op) { .lt => a < b, .lte => a <= b, diff --git a/lib/std/math/acos.zig b/lib/std/math/acos.zig index aec0d4706a..cdd86601fd 100644 --- a/lib/std/math/acos.zig +++ b/lib/std/math/acos.zig @@ -12,7 +12,7 @@ const expect = std.testing.expect; /// /// Special cases: /// - acos(x) = nan if x < -1 or x > 1 -pub fn acos(x: var) @TypeOf(x) { +pub fn acos(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => acos32(x), diff --git a/lib/std/math/acosh.zig b/lib/std/math/acosh.zig index 0f99335058..9a594f9cc4 100644 --- a/lib/std/math/acosh.zig +++ b/lib/std/math/acosh.zig @@ -14,7 +14,7 @@ const expect = std.testing.expect; /// Special cases: /// - acosh(x) = snan if x < 1 /// - acosh(nan) = nan -pub fn acosh(x: var) @TypeOf(x) { +pub fn acosh(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => acosh32(x), diff --git a/lib/std/math/asin.zig b/lib/std/math/asin.zig index db57e2088f..4cff69fc1b 100644 --- a/lib/std/math/asin.zig +++ b/lib/std/math/asin.zig @@ -13,7 +13,7 @@ const expect = std.testing.expect; /// Special Cases: /// - asin(+-0) = +-0 /// - asin(x) = nan if x < -1 or x > 1 -pub fn asin(x: var) @TypeOf(x) { +pub fn asin(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => asin32(x), diff --git a/lib/std/math/asinh.zig b/lib/std/math/asinh.zig index ab1b650139..940b953d06 100644 --- a/lib/std/math/asinh.zig +++ b/lib/std/math/asinh.zig @@ -15,7 +15,7 @@ const maxInt = std.math.maxInt; /// - asinh(+-0) = +-0 /// - asinh(+-inf) = +-inf /// - asinh(nan) = nan -pub fn asinh(x: var) @TypeOf(x) { +pub fn asinh(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => asinh32(x), diff --git a/lib/std/math/atan.zig b/lib/std/math/atan.zig index eb9154b5fe..9342b6ed59 100644 --- a/lib/std/math/atan.zig +++ b/lib/std/math/atan.zig @@ -13,7 +13,7 @@ const expect = std.testing.expect; /// Special Cases: /// - atan(+-0) = +-0 /// - atan(+-inf) = +-pi/2 -pub fn atan(x: var) @TypeOf(x) { +pub fn atan(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => atan32(x), diff --git a/lib/std/math/atanh.zig b/lib/std/math/atanh.zig index e58a10fff5..de742bd4cd 100644 --- a/lib/std/math/atanh.zig +++ b/lib/std/math/atanh.zig @@ -15,7 +15,7 @@ const maxInt = std.math.maxInt; /// - atanh(+-1) = +-inf with signal /// - atanh(x) = nan if |x| > 1 with signal /// - atanh(nan) = nan -pub fn atanh(x: var) @TypeOf(x) { +pub fn atanh(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => atanh_32(x), diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index 85e14bc55c..b6d7731f1a 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -12,7 +12,7 @@ const assert = std.debug.assert; /// Returns the number of limbs needed to store `scalar`, which must be a /// primitive integer value. -pub fn calcLimbLen(scalar: var) usize { +pub fn calcLimbLen(scalar: anytype) usize { const T = @TypeOf(scalar); switch (@typeInfo(T)) { .Int => |info| { @@ -110,7 +110,7 @@ pub const Mutable = struct { /// `value` is a primitive integer type. /// Asserts the value fits within the provided `limbs_buffer`. /// Note: `calcLimbLen` can be used to figure out how big an array to allocate for `limbs_buffer`. - pub fn init(limbs_buffer: []Limb, value: var) Mutable { + pub fn init(limbs_buffer: []Limb, value: anytype) Mutable { limbs_buffer[0] = 0; var self: Mutable = .{ .limbs = limbs_buffer, @@ -169,7 +169,7 @@ pub const Mutable = struct { /// Asserts the value fits within the limbs buffer. /// Note: `calcLimbLen` can be used to figure out how big the limbs buffer /// needs to be to store a specific value. - pub fn set(self: *Mutable, value: var) void { + pub fn set(self: *Mutable, value: anytype) void { const T = @TypeOf(value); switch (@typeInfo(T)) { @@ -281,7 +281,7 @@ pub const Mutable = struct { /// /// Asserts the result fits in `r`. An upper bound on the number of limbs needed by /// r is `math.max(a.limbs.len, calcLimbLen(scalar)) + 1`. - pub fn addScalar(r: *Mutable, a: Const, scalar: var) void { + pub fn addScalar(r: *Mutable, a: Const, scalar: anytype) void { var limbs: [calcLimbLen(scalar)]Limb = undefined; const operand = init(&limbs, scalar).toConst(); return add(r, a, operand); @@ -1058,7 +1058,7 @@ pub const Const = struct { self: Const, comptime fmt: []const u8, options: std.fmt.FormatOptions, - out_stream: var, + out_stream: anytype, ) !void { comptime var radix = 10; comptime var uppercase = false; @@ -1261,7 +1261,7 @@ pub const Const = struct { } /// Same as `order` but the right-hand operand is a primitive integer. - pub fn orderAgainstScalar(lhs: Const, scalar: var) math.Order { + pub fn orderAgainstScalar(lhs: Const, scalar: anytype) math.Order { var limbs: [calcLimbLen(scalar)]Limb = undefined; const rhs = Mutable.init(&limbs, scalar); return order(lhs, rhs.toConst()); @@ -1333,7 +1333,7 @@ pub const Managed = struct { /// Creates a new `Managed` with value `value`. /// /// This is identical to an `init`, followed by a `set`. - pub fn initSet(allocator: *Allocator, value: var) !Managed { + pub fn initSet(allocator: *Allocator, value: anytype) !Managed { var s = try Managed.init(allocator); try s.set(value); return s; @@ -1496,7 +1496,7 @@ pub const Managed = struct { } /// Sets an Managed to value. Value must be an primitive integer type. - pub fn set(self: *Managed, value: var) Allocator.Error!void { + pub fn set(self: *Managed, value: anytype) Allocator.Error!void { try self.ensureCapacity(calcLimbLen(value)); var m = self.toMutable(); m.set(value); @@ -1549,7 +1549,7 @@ pub const Managed = struct { self: Managed, comptime fmt: []const u8, options: std.fmt.FormatOptions, - out_stream: var, + out_stream: anytype, ) !void { return self.toConst().format(fmt, options, out_stream); } @@ -1607,7 +1607,7 @@ pub const Managed = struct { /// scalar is a primitive integer type. /// /// Returns an error if memory could not be allocated. - pub fn addScalar(r: *Managed, a: Const, scalar: var) Allocator.Error!void { + pub fn addScalar(r: *Managed, a: Const, scalar: anytype) Allocator.Error!void { try r.ensureCapacity(math.max(a.limbs.len, calcLimbLen(scalar)) + 1); var m = r.toMutable(); m.addScalar(a, scalar); diff --git a/lib/std/math/big/rational.zig b/lib/std/math/big/rational.zig index 3624a16139..6f62a462b8 100644 --- a/lib/std/math/big/rational.zig +++ b/lib/std/math/big/rational.zig @@ -43,7 +43,7 @@ pub const Rational = struct { } /// Set a Rational from a primitive integer type. - pub fn setInt(self: *Rational, a: var) !void { + pub fn setInt(self: *Rational, a: anytype) !void { try self.p.set(a); try self.q.set(1); } @@ -280,7 +280,7 @@ pub const Rational = struct { } /// Set a rational from an integer ratio. - pub fn setRatio(self: *Rational, p: var, q: var) !void { + pub fn setRatio(self: *Rational, p: anytype, q: anytype) !void { try self.p.set(p); try self.q.set(q); diff --git a/lib/std/math/cbrt.zig b/lib/std/math/cbrt.zig index 2b219d5368..42163b96dc 100644 --- a/lib/std/math/cbrt.zig +++ b/lib/std/math/cbrt.zig @@ -14,7 +14,7 @@ const expect = std.testing.expect; /// - cbrt(+-0) = +-0 /// - cbrt(+-inf) = +-inf /// - cbrt(nan) = nan -pub fn cbrt(x: var) @TypeOf(x) { +pub fn cbrt(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => cbrt32(x), diff --git a/lib/std/math/ceil.zig b/lib/std/math/ceil.zig index e3b5679318..39de46f361 100644 --- a/lib/std/math/ceil.zig +++ b/lib/std/math/ceil.zig @@ -15,7 +15,7 @@ const expect = std.testing.expect; /// - ceil(+-0) = +-0 /// - ceil(+-inf) = +-inf /// - ceil(nan) = nan -pub fn ceil(x: var) @TypeOf(x) { +pub fn ceil(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => ceil32(x), diff --git a/lib/std/math/complex/abs.zig b/lib/std/math/complex/abs.zig index 75b967f3d2..db31aef42a 100644 --- a/lib/std/math/complex/abs.zig +++ b/lib/std/math/complex/abs.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the absolute value (modulus) of z. -pub fn abs(z: var) @TypeOf(z.re) { +pub fn abs(z: anytype) @TypeOf(z.re) { const T = @TypeOf(z.re); return math.hypot(T, z.re, z.im); } diff --git a/lib/std/math/complex/acos.zig b/lib/std/math/complex/acos.zig index 24a645375c..072fd77f08 100644 --- a/lib/std/math/complex/acos.zig +++ b/lib/std/math/complex/acos.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the arc-cosine of z. -pub fn acos(z: var) Complex(@TypeOf(z.re)) { +pub fn acos(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); const q = cmath.asin(z); return Complex(T).new(@as(T, math.pi) / 2 - q.re, -q.im); diff --git a/lib/std/math/complex/acosh.zig b/lib/std/math/complex/acosh.zig index 996334034a..59117a8b27 100644 --- a/lib/std/math/complex/acosh.zig +++ b/lib/std/math/complex/acosh.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the hyperbolic arc-cosine of z. -pub fn acosh(z: var) Complex(@TypeOf(z.re)) { +pub fn acosh(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); const q = cmath.acos(z); return Complex(T).new(-q.im, q.re); diff --git a/lib/std/math/complex/arg.zig b/lib/std/math/complex/arg.zig index f690e92143..6cf959a081 100644 --- a/lib/std/math/complex/arg.zig +++ b/lib/std/math/complex/arg.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the angular component (in radians) of z. -pub fn arg(z: var) @TypeOf(z.re) { +pub fn arg(z: anytype) @TypeOf(z.re) { const T = @TypeOf(z.re); return math.atan2(T, z.im, z.re); } diff --git a/lib/std/math/complex/asin.zig b/lib/std/math/complex/asin.zig index 01fa33156a..9f7cd396aa 100644 --- a/lib/std/math/complex/asin.zig +++ b/lib/std/math/complex/asin.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; // Returns the arc-sine of z. -pub fn asin(z: var) Complex(@TypeOf(z.re)) { +pub fn asin(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); const x = z.re; const y = z.im; diff --git a/lib/std/math/complex/asinh.zig b/lib/std/math/complex/asinh.zig index 47d8244adb..0c3c2bd115 100644 --- a/lib/std/math/complex/asinh.zig +++ b/lib/std/math/complex/asinh.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the hyperbolic arc-sine of z. -pub fn asinh(z: var) Complex(@TypeOf(z.re)) { +pub fn asinh(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); const q = Complex(T).new(-z.im, z.re); const r = cmath.asin(q); diff --git a/lib/std/math/complex/atan.zig b/lib/std/math/complex/atan.zig index 5ba6f7b0d2..98bde3e125 100644 --- a/lib/std/math/complex/atan.zig +++ b/lib/std/math/complex/atan.zig @@ -12,7 +12,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the arc-tangent of z. -pub fn atan(z: var) @TypeOf(z) { +pub fn atan(z: anytype) @TypeOf(z) { const T = @TypeOf(z.re); return switch (T) { f32 => atan32(z), diff --git a/lib/std/math/complex/atanh.zig b/lib/std/math/complex/atanh.zig index 8b70306224..a07c2969e4 100644 --- a/lib/std/math/complex/atanh.zig +++ b/lib/std/math/complex/atanh.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the hyperbolic arc-tangent of z. -pub fn atanh(z: var) Complex(@TypeOf(z.re)) { +pub fn atanh(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); const q = Complex(T).new(-z.im, z.re); const r = cmath.atan(q); diff --git a/lib/std/math/complex/conj.zig b/lib/std/math/complex/conj.zig index 1065d4bb73..42a34e7dfc 100644 --- a/lib/std/math/complex/conj.zig +++ b/lib/std/math/complex/conj.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the complex conjugate of z. -pub fn conj(z: var) Complex(@TypeOf(z.re)) { +pub fn conj(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); return Complex(T).new(z.re, -z.im); } diff --git a/lib/std/math/complex/cos.zig b/lib/std/math/complex/cos.zig index 1aefa73db5..9daf89c730 100644 --- a/lib/std/math/complex/cos.zig +++ b/lib/std/math/complex/cos.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the cosine of z. -pub fn cos(z: var) Complex(@TypeOf(z.re)) { +pub fn cos(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); const p = Complex(T).new(-z.im, z.re); return cmath.cosh(p); diff --git a/lib/std/math/complex/cosh.zig b/lib/std/math/complex/cosh.zig index a9ac893602..bd51629bd4 100644 --- a/lib/std/math/complex/cosh.zig +++ b/lib/std/math/complex/cosh.zig @@ -14,7 +14,7 @@ const Complex = cmath.Complex; const ldexp_cexp = @import("ldexp.zig").ldexp_cexp; /// Returns the hyperbolic arc-cosine of z. -pub fn cosh(z: var) Complex(@TypeOf(z.re)) { +pub fn cosh(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); return switch (T) { f32 => cosh32(z), diff --git a/lib/std/math/complex/exp.zig b/lib/std/math/complex/exp.zig index 9f9e3db807..6f6061a947 100644 --- a/lib/std/math/complex/exp.zig +++ b/lib/std/math/complex/exp.zig @@ -14,7 +14,7 @@ const Complex = cmath.Complex; const ldexp_cexp = @import("ldexp.zig").ldexp_cexp; /// Returns e raised to the power of z (e^z). -pub fn exp(z: var) @TypeOf(z) { +pub fn exp(z: anytype) @TypeOf(z) { const T = @TypeOf(z.re); return switch (T) { diff --git a/lib/std/math/complex/ldexp.zig b/lib/std/math/complex/ldexp.zig index 9eccd4bb98..c23b9b346e 100644 --- a/lib/std/math/complex/ldexp.zig +++ b/lib/std/math/complex/ldexp.zig @@ -11,7 +11,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns exp(z) scaled to avoid overflow. -pub fn ldexp_cexp(z: var, expt: i32) @TypeOf(z) { +pub fn ldexp_cexp(z: anytype, expt: i32) @TypeOf(z) { const T = @TypeOf(z.re); return switch (T) { diff --git a/lib/std/math/complex/log.zig b/lib/std/math/complex/log.zig index f1fad3175e..ec02c6c325 100644 --- a/lib/std/math/complex/log.zig +++ b/lib/std/math/complex/log.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the natural logarithm of z. -pub fn log(z: var) Complex(@TypeOf(z.re)) { +pub fn log(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); const r = cmath.abs(z); const phi = cmath.arg(z); diff --git a/lib/std/math/complex/proj.zig b/lib/std/math/complex/proj.zig index 349f6b3abb..e208ae0370 100644 --- a/lib/std/math/complex/proj.zig +++ b/lib/std/math/complex/proj.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the projection of z onto the riemann sphere. -pub fn proj(z: var) Complex(@TypeOf(z.re)) { +pub fn proj(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); if (math.isInf(z.re) or math.isInf(z.im)) { diff --git a/lib/std/math/complex/sin.zig b/lib/std/math/complex/sin.zig index 87dc57911b..1b10f8fca6 100644 --- a/lib/std/math/complex/sin.zig +++ b/lib/std/math/complex/sin.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the sine of z. -pub fn sin(z: var) Complex(@TypeOf(z.re)) { +pub fn sin(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); const p = Complex(T).new(-z.im, z.re); const q = cmath.sinh(p); diff --git a/lib/std/math/complex/sinh.zig b/lib/std/math/complex/sinh.zig index 7dd880c71c..32f2a730fb 100644 --- a/lib/std/math/complex/sinh.zig +++ b/lib/std/math/complex/sinh.zig @@ -14,7 +14,7 @@ const Complex = cmath.Complex; const ldexp_cexp = @import("ldexp.zig").ldexp_cexp; /// Returns the hyperbolic sine of z. -pub fn sinh(z: var) @TypeOf(z) { +pub fn sinh(z: anytype) @TypeOf(z) { const T = @TypeOf(z.re); return switch (T) { f32 => sinh32(z), diff --git a/lib/std/math/complex/sqrt.zig b/lib/std/math/complex/sqrt.zig index 57e73f6cd1..0edb02a7a9 100644 --- a/lib/std/math/complex/sqrt.zig +++ b/lib/std/math/complex/sqrt.zig @@ -12,7 +12,7 @@ const Complex = cmath.Complex; /// Returns the square root of z. The real and imaginary parts of the result have the same sign /// as the imaginary part of z. -pub fn sqrt(z: var) @TypeOf(z) { +pub fn sqrt(z: anytype) @TypeOf(z) { const T = @TypeOf(z.re); return switch (T) { diff --git a/lib/std/math/complex/tan.zig b/lib/std/math/complex/tan.zig index 70304803db..050898c573 100644 --- a/lib/std/math/complex/tan.zig +++ b/lib/std/math/complex/tan.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the tanget of z. -pub fn tan(z: var) Complex(@TypeOf(z.re)) { +pub fn tan(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); const q = Complex(T).new(-z.im, z.re); const r = cmath.tanh(q); diff --git a/lib/std/math/complex/tanh.zig b/lib/std/math/complex/tanh.zig index afd2e6aee4..1d614cca58 100644 --- a/lib/std/math/complex/tanh.zig +++ b/lib/std/math/complex/tanh.zig @@ -12,7 +12,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the hyperbolic tangent of z. -pub fn tanh(z: var) @TypeOf(z) { +pub fn tanh(z: anytype) @TypeOf(z) { const T = @TypeOf(z.re); return switch (T) { f32 => tanh32(z), diff --git a/lib/std/math/cos.zig b/lib/std/math/cos.zig index aa336769b1..df5c0a53be 100644 --- a/lib/std/math/cos.zig +++ b/lib/std/math/cos.zig @@ -13,7 +13,7 @@ const expect = std.testing.expect; /// Special Cases: /// - cos(+-inf) = nan /// - cos(nan) = nan -pub fn cos(x: var) @TypeOf(x) { +pub fn cos(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => cos_(f32, x), diff --git a/lib/std/math/cosh.zig b/lib/std/math/cosh.zig index 1cd8c5f27f..bab47dcdbd 100644 --- a/lib/std/math/cosh.zig +++ b/lib/std/math/cosh.zig @@ -17,7 +17,7 @@ const maxInt = std.math.maxInt; /// - cosh(+-0) = 1 /// - cosh(+-inf) = +inf /// - cosh(nan) = nan -pub fn cosh(x: var) @TypeOf(x) { +pub fn cosh(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => cosh32(x), diff --git a/lib/std/math/exp.zig b/lib/std/math/exp.zig index da80b201c0..c84d929adf 100644 --- a/lib/std/math/exp.zig +++ b/lib/std/math/exp.zig @@ -14,7 +14,7 @@ const builtin = @import("builtin"); /// Special Cases: /// - exp(+inf) = +inf /// - exp(nan) = nan -pub fn exp(x: var) @TypeOf(x) { +pub fn exp(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => exp32(x), diff --git a/lib/std/math/exp2.zig b/lib/std/math/exp2.zig index 411f789187..da391189b2 100644 --- a/lib/std/math/exp2.zig +++ b/lib/std/math/exp2.zig @@ -13,7 +13,7 @@ const expect = std.testing.expect; /// Special Cases: /// - exp2(+inf) = +inf /// - exp2(nan) = nan -pub fn exp2(x: var) @TypeOf(x) { +pub fn exp2(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => exp2_32(x), diff --git a/lib/std/math/expm1.zig b/lib/std/math/expm1.zig index 91752e9f80..80cdefae20 100644 --- a/lib/std/math/expm1.zig +++ b/lib/std/math/expm1.zig @@ -18,7 +18,7 @@ const expect = std.testing.expect; /// - expm1(+inf) = +inf /// - expm1(-inf) = -1 /// - expm1(nan) = nan -pub fn expm1(x: var) @TypeOf(x) { +pub fn expm1(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => expm1_32(x), diff --git a/lib/std/math/expo2.zig b/lib/std/math/expo2.zig index e70e365f26..f404570fb6 100644 --- a/lib/std/math/expo2.zig +++ b/lib/std/math/expo2.zig @@ -7,7 +7,7 @@ const math = @import("../math.zig"); /// Returns exp(x) / 2 for x >= log(maxFloat(T)). -pub fn expo2(x: var) @TypeOf(x) { +pub fn expo2(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => expo2f(x), diff --git a/lib/std/math/fabs.zig b/lib/std/math/fabs.zig index a659e35ca2..ca91f594fd 100644 --- a/lib/std/math/fabs.zig +++ b/lib/std/math/fabs.zig @@ -14,7 +14,7 @@ const maxInt = std.math.maxInt; /// Special Cases: /// - fabs(+-inf) = +inf /// - fabs(nan) = nan -pub fn fabs(x: var) @TypeOf(x) { +pub fn fabs(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f16 => fabs16(x), diff --git a/lib/std/math/floor.zig b/lib/std/math/floor.zig index 565e2911a9..3a71cc7cdf 100644 --- a/lib/std/math/floor.zig +++ b/lib/std/math/floor.zig @@ -15,7 +15,7 @@ const math = std.math; /// - floor(+-0) = +-0 /// - floor(+-inf) = +-inf /// - floor(nan) = nan -pub fn floor(x: var) @TypeOf(x) { +pub fn floor(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f16 => floor16(x), diff --git a/lib/std/math/frexp.zig b/lib/std/math/frexp.zig index cfdf9f838d..0e4558dc37 100644 --- a/lib/std/math/frexp.zig +++ b/lib/std/math/frexp.zig @@ -24,7 +24,7 @@ pub const frexp64_result = frexp_result(f64); /// - frexp(+-0) = +-0, 0 /// - frexp(+-inf) = +-inf, 0 /// - frexp(nan) = nan, undefined -pub fn frexp(x: var) frexp_result(@TypeOf(x)) { +pub fn frexp(x: anytype) frexp_result(@TypeOf(x)) { const T = @TypeOf(x); return switch (T) { f32 => frexp32(x), diff --git a/lib/std/math/ilogb.zig b/lib/std/math/ilogb.zig index 22e3fbaa97..748cf9ea0d 100644 --- a/lib/std/math/ilogb.zig +++ b/lib/std/math/ilogb.zig @@ -16,7 +16,7 @@ const minInt = std.math.minInt; /// - ilogb(+-inf) = maxInt(i32) /// - ilogb(0) = maxInt(i32) /// - ilogb(nan) = maxInt(i32) -pub fn ilogb(x: var) i32 { +pub fn ilogb(x: anytype) i32 { const T = @TypeOf(x); return switch (T) { f32 => ilogb32(x), diff --git a/lib/std/math/isfinite.zig b/lib/std/math/isfinite.zig index 26b3ce54a1..0681eae0b7 100644 --- a/lib/std/math/isfinite.zig +++ b/lib/std/math/isfinite.zig @@ -4,7 +4,7 @@ const expect = std.testing.expect; const maxInt = std.math.maxInt; /// Returns whether x is a finite value. -pub fn isFinite(x: var) bool { +pub fn isFinite(x: anytype) bool { const T = @TypeOf(x); switch (T) { f16 => { diff --git a/lib/std/math/isinf.zig b/lib/std/math/isinf.zig index 6eacab52ad..19357d89d1 100644 --- a/lib/std/math/isinf.zig +++ b/lib/std/math/isinf.zig @@ -4,7 +4,7 @@ const expect = std.testing.expect; const maxInt = std.math.maxInt; /// Returns whether x is an infinity, ignoring sign. -pub fn isInf(x: var) bool { +pub fn isInf(x: anytype) bool { const T = @TypeOf(x); switch (T) { f16 => { @@ -30,7 +30,7 @@ pub fn isInf(x: var) bool { } /// Returns whether x is an infinity with a positive sign. -pub fn isPositiveInf(x: var) bool { +pub fn isPositiveInf(x: anytype) bool { const T = @TypeOf(x); switch (T) { f16 => { @@ -52,7 +52,7 @@ pub fn isPositiveInf(x: var) bool { } /// Returns whether x is an infinity with a negative sign. -pub fn isNegativeInf(x: var) bool { +pub fn isNegativeInf(x: anytype) bool { const T = @TypeOf(x); switch (T) { f16 => { diff --git a/lib/std/math/isnan.zig b/lib/std/math/isnan.zig index ac865f0d0c..797c115d1d 100644 --- a/lib/std/math/isnan.zig +++ b/lib/std/math/isnan.zig @@ -4,12 +4,12 @@ const expect = std.testing.expect; const maxInt = std.math.maxInt; /// Returns whether x is a nan. -pub fn isNan(x: var) bool { +pub fn isNan(x: anytype) bool { return x != x; } /// Returns whether x is a signalling nan. -pub fn isSignalNan(x: var) bool { +pub fn isSignalNan(x: anytype) bool { // Note: A signalling nan is identical to a standard nan right now but may have a different bit // representation in the future when required. return isNan(x); diff --git a/lib/std/math/isnormal.zig b/lib/std/math/isnormal.zig index 917b4ebfcf..a3144f2784 100644 --- a/lib/std/math/isnormal.zig +++ b/lib/std/math/isnormal.zig @@ -4,7 +4,7 @@ const expect = std.testing.expect; const maxInt = std.math.maxInt; // Returns whether x has a normalized representation (i.e. integer part of mantissa is 1). -pub fn isNormal(x: var) bool { +pub fn isNormal(x: anytype) bool { const T = @TypeOf(x); switch (T) { f16 => { diff --git a/lib/std/math/ln.zig b/lib/std/math/ln.zig index 555a786907..99e54c4cc7 100644 --- a/lib/std/math/ln.zig +++ b/lib/std/math/ln.zig @@ -15,7 +15,7 @@ const expect = std.testing.expect; /// - ln(0) = -inf /// - ln(x) = nan if x < 0 /// - ln(nan) = nan -pub fn ln(x: var) @TypeOf(x) { +pub fn ln(x: anytype) @TypeOf(x) { const T = @TypeOf(x); switch (@typeInfo(T)) { .ComptimeFloat => { diff --git a/lib/std/math/log10.zig b/lib/std/math/log10.zig index 7367af28c6..e55bd8c1e8 100644 --- a/lib/std/math/log10.zig +++ b/lib/std/math/log10.zig @@ -16,7 +16,7 @@ const maxInt = std.math.maxInt; /// - log10(0) = -inf /// - log10(x) = nan if x < 0 /// - log10(nan) = nan -pub fn log10(x: var) @TypeOf(x) { +pub fn log10(x: anytype) @TypeOf(x) { const T = @TypeOf(x); switch (@typeInfo(T)) { .ComptimeFloat => { diff --git a/lib/std/math/log1p.zig b/lib/std/math/log1p.zig index 5e92cfdea3..e24ba8d84d 100644 --- a/lib/std/math/log1p.zig +++ b/lib/std/math/log1p.zig @@ -17,7 +17,7 @@ const expect = std.testing.expect; /// - log1p(-1) = -inf /// - log1p(x) = nan if x < -1 /// - log1p(nan) = nan -pub fn log1p(x: var) @TypeOf(x) { +pub fn log1p(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => log1p_32(x), diff --git a/lib/std/math/log2.zig b/lib/std/math/log2.zig index 54f8bc2baa..95d06a2b60 100644 --- a/lib/std/math/log2.zig +++ b/lib/std/math/log2.zig @@ -16,7 +16,7 @@ const maxInt = std.math.maxInt; /// - log2(0) = -inf /// - log2(x) = nan if x < 0 /// - log2(nan) = nan -pub fn log2(x: var) @TypeOf(x) { +pub fn log2(x: anytype) @TypeOf(x) { const T = @TypeOf(x); switch (@typeInfo(T)) { .ComptimeFloat => { diff --git a/lib/std/math/modf.zig b/lib/std/math/modf.zig index 6fd89e3dda..5ab5318a79 100644 --- a/lib/std/math/modf.zig +++ b/lib/std/math/modf.zig @@ -24,7 +24,7 @@ pub const modf64_result = modf_result(f64); /// Special Cases: /// - modf(+-inf) = +-inf, nan /// - modf(nan) = nan, nan -pub fn modf(x: var) modf_result(@TypeOf(x)) { +pub fn modf(x: anytype) modf_result(@TypeOf(x)) { const T = @TypeOf(x); return switch (T) { f32 => modf32(x), diff --git a/lib/std/math/round.zig b/lib/std/math/round.zig index 052c0f7670..854adee4ba 100644 --- a/lib/std/math/round.zig +++ b/lib/std/math/round.zig @@ -15,7 +15,7 @@ const math = std.math; /// - round(+-0) = +-0 /// - round(+-inf) = +-inf /// - round(nan) = nan -pub fn round(x: var) @TypeOf(x) { +pub fn round(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => round32(x), diff --git a/lib/std/math/scalbn.zig b/lib/std/math/scalbn.zig index bab109f334..71a8110ce7 100644 --- a/lib/std/math/scalbn.zig +++ b/lib/std/math/scalbn.zig @@ -9,7 +9,7 @@ const math = std.math; const expect = std.testing.expect; /// Returns x * 2^n. -pub fn scalbn(x: var, n: i32) @TypeOf(x) { +pub fn scalbn(x: anytype, n: i32) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => scalbn32(x, n), diff --git a/lib/std/math/signbit.zig b/lib/std/math/signbit.zig index 9cb62b5042..49397f7bd4 100644 --- a/lib/std/math/signbit.zig +++ b/lib/std/math/signbit.zig @@ -3,7 +3,7 @@ const math = std.math; const expect = std.testing.expect; /// Returns whether x is negative or negative 0. -pub fn signbit(x: var) bool { +pub fn signbit(x: anytype) bool { const T = @TypeOf(x); return switch (T) { f16 => signbit16(x), diff --git a/lib/std/math/sin.zig b/lib/std/math/sin.zig index e88f5eeeaf..df3b294ca6 100644 --- a/lib/std/math/sin.zig +++ b/lib/std/math/sin.zig @@ -14,7 +14,7 @@ const expect = std.testing.expect; /// - sin(+-0) = +-0 /// - sin(+-inf) = nan /// - sin(nan) = nan -pub fn sin(x: var) @TypeOf(x) { +pub fn sin(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => sin_(T, x), diff --git a/lib/std/math/sinh.zig b/lib/std/math/sinh.zig index 0e2cb5a3d5..26e0e05f38 100644 --- a/lib/std/math/sinh.zig +++ b/lib/std/math/sinh.zig @@ -17,7 +17,7 @@ const maxInt = std.math.maxInt; /// - sinh(+-0) = +-0 /// - sinh(+-inf) = +-inf /// - sinh(nan) = nan -pub fn sinh(x: var) @TypeOf(x) { +pub fn sinh(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => sinh32(x), diff --git a/lib/std/math/sqrt.zig b/lib/std/math/sqrt.zig index 097b0152f7..2f0d251432 100644 --- a/lib/std/math/sqrt.zig +++ b/lib/std/math/sqrt.zig @@ -13,7 +13,7 @@ const maxInt = std.math.maxInt; /// - sqrt(x) = nan if x < 0 /// - sqrt(nan) = nan /// TODO Decide if all this logic should be implemented directly in the @sqrt bultin function. -pub fn sqrt(x: var) Sqrt(@TypeOf(x)) { +pub fn sqrt(x: anytype) Sqrt(@TypeOf(x)) { const T = @TypeOf(x); switch (@typeInfo(T)) { .Float, .ComptimeFloat => return @sqrt(x), diff --git a/lib/std/math/tan.zig b/lib/std/math/tan.zig index 86f473f448..2cd5a407df 100644 --- a/lib/std/math/tan.zig +++ b/lib/std/math/tan.zig @@ -14,7 +14,7 @@ const expect = std.testing.expect; /// - tan(+-0) = +-0 /// - tan(+-inf) = nan /// - tan(nan) = nan -pub fn tan(x: var) @TypeOf(x) { +pub fn tan(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => tan_(f32, x), diff --git a/lib/std/math/tanh.zig b/lib/std/math/tanh.zig index 1cad399729..7697db5271 100644 --- a/lib/std/math/tanh.zig +++ b/lib/std/math/tanh.zig @@ -17,7 +17,7 @@ const maxInt = std.math.maxInt; /// - sinh(+-0) = +-0 /// - sinh(+-inf) = +-1 /// - sinh(nan) = nan -pub fn tanh(x: var) @TypeOf(x) { +pub fn tanh(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => tanh32(x), diff --git a/lib/std/math/trunc.zig b/lib/std/math/trunc.zig index cdd2fa3c6b..df24b77111 100644 --- a/lib/std/math/trunc.zig +++ b/lib/std/math/trunc.zig @@ -15,7 +15,7 @@ const maxInt = std.math.maxInt; /// - trunc(+-0) = +-0 /// - trunc(+-inf) = +-inf /// - trunc(nan) = nan -pub fn trunc(x: var) @TypeOf(x) { +pub fn trunc(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => trunc32(x), diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 08ecc5167f..82831787a7 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -122,7 +122,7 @@ pub const Allocator = struct { assert(resized_len >= new_byte_count); @memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count); return old_mem.ptr[0..resized_len]; - } else |_| { } + } else |_| {} } if (new_byte_count <= old_mem.len and new_alignment <= old_alignment) { return error.OutOfMemory; @@ -156,7 +156,7 @@ pub const Allocator = struct { /// `ptr` should be the return value of `create`, or otherwise /// have the same address and alignment property. - pub fn destroy(self: *Allocator, ptr: var) void { + pub fn destroy(self: *Allocator, ptr: anytype) void { const T = @TypeOf(ptr).Child; if (@sizeOf(T) == 0) return; const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr)); @@ -225,7 +225,7 @@ pub const Allocator = struct { return self.allocAdvanced(T, alignment, n, .exact); } - const Exact = enum {exact,at_least}; + const Exact = enum { exact, at_least }; pub fn allocAdvanced( self: *Allocator, comptime T: type, @@ -272,7 +272,7 @@ pub const Allocator = struct { /// in `std.ArrayList.shrink`. /// If you need guaranteed success, call `shrink`. /// If `new_n` is 0, this is the same as `free` and it always succeeds. - pub fn realloc(self: *Allocator, old_mem: var, new_n: usize) t: { + pub fn realloc(self: *Allocator, old_mem: anytype, new_n: usize) t: { const Slice = @typeInfo(@TypeOf(old_mem)).Pointer; break :t Error![]align(Slice.alignment) Slice.child; } { @@ -280,7 +280,7 @@ pub const Allocator = struct { return self.reallocAdvanced(old_mem, old_alignment, new_n, .exact); } - pub fn reallocAtLeast(self: *Allocator, old_mem: var, new_n: usize) t: { + pub fn reallocAtLeast(self: *Allocator, old_mem: anytype, new_n: usize) t: { const Slice = @typeInfo(@TypeOf(old_mem)).Pointer; break :t Error![]align(Slice.alignment) Slice.child; } { @@ -291,7 +291,7 @@ pub const Allocator = struct { // Deprecated: use `reallocAdvanced` pub fn alignedRealloc( self: *Allocator, - old_mem: var, + old_mem: anytype, comptime new_alignment: u29, new_n: usize, ) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child { @@ -303,7 +303,7 @@ pub const Allocator = struct { /// allocation. pub fn reallocAdvanced( self: *Allocator, - old_mem: var, + old_mem: anytype, comptime new_alignment: u29, new_n: usize, exact: Exact, @@ -321,8 +321,7 @@ pub const Allocator = struct { const old_byte_slice = mem.sliceAsBytes(old_mem); const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory; // Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure - const new_byte_slice = try self.reallocBytes(old_byte_slice, Slice.alignment, byte_count, new_alignment, - if (exact == .exact) @as(u29, 0) else @sizeOf(T)); + const new_byte_slice = try self.reallocBytes(old_byte_slice, Slice.alignment, byte_count, new_alignment, if (exact == .exact) @as(u29, 0) else @sizeOf(T)); return mem.bytesAsSlice(T, @alignCast(new_alignment, new_byte_slice)); } @@ -331,7 +330,7 @@ pub const Allocator = struct { /// Shrink always succeeds, and `new_n` must be <= `old_mem.len`. /// Returned slice has same alignment as old_mem. /// Shrinking to 0 is the same as calling `free`. - pub fn shrink(self: *Allocator, old_mem: var, new_n: usize) t: { + pub fn shrink(self: *Allocator, old_mem: anytype, new_n: usize) t: { const Slice = @typeInfo(@TypeOf(old_mem)).Pointer; break :t []align(Slice.alignment) Slice.child; } { @@ -344,7 +343,7 @@ pub const Allocator = struct { /// allocation. pub fn alignedShrink( self: *Allocator, - old_mem: var, + old_mem: anytype, comptime new_alignment: u29, new_n: usize, ) []align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child { @@ -368,7 +367,7 @@ pub const Allocator = struct { /// Free an array allocated with `alloc`. To free a single item, /// see `destroy`. - pub fn free(self: *Allocator, memory: var) void { + pub fn free(self: *Allocator, memory: anytype) void { const Slice = @typeInfo(@TypeOf(memory)).Pointer; const bytes = mem.sliceAsBytes(memory); const bytes_len = bytes.len + if (Slice.sentinel != null) @sizeOf(Slice.child) else 0; @@ -396,67 +395,69 @@ pub const Allocator = struct { /// Detects and asserts if the std.mem.Allocator interface is violated by the caller /// or the allocator. -pub fn ValidationAllocator(comptime T: type) type { return struct { - const Self = @This(); - allocator: Allocator, - underlying_allocator: T, - pub fn init(allocator: T) @This() { - return .{ - .allocator = .{ - .allocFn = alloc, - .resizeFn = resize, - }, - .underlying_allocator = allocator, - }; - } - fn getUnderlyingAllocatorPtr(self: *@This()) *Allocator { - if (T == *Allocator) return self.underlying_allocator; - if (*T == *Allocator) return &self.underlying_allocator; - return &self.underlying_allocator.allocator; - } - pub fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) Allocator.Error![]u8 { - assert(n > 0); - assert(mem.isValidAlign(ptr_align)); - if (len_align != 0) { - assert(mem.isAlignedAnyAlign(n, len_align)); - assert(n >= len_align); - } - - const self = @fieldParentPtr(@This(), "allocator", allocator); - const result = try self.getUnderlyingAllocatorPtr().callAllocFn(n, ptr_align, len_align); - assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align)); - if (len_align == 0) { - assert(result.len == n); - } else { - assert(result.len >= n); - assert(mem.isAlignedAnyAlign(result.len, len_align)); +pub fn ValidationAllocator(comptime T: type) type { + return struct { + const Self = @This(); + allocator: Allocator, + underlying_allocator: T, + pub fn init(allocator: T) @This() { + return .{ + .allocator = .{ + .allocFn = alloc, + .resizeFn = resize, + }, + .underlying_allocator = allocator, + }; } - return result; - } - pub fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) Allocator.Error!usize { - assert(buf.len > 0); - if (len_align != 0) { - assert(mem.isAlignedAnyAlign(new_len, len_align)); - assert(new_len >= len_align); + fn getUnderlyingAllocatorPtr(self: *@This()) *Allocator { + if (T == *Allocator) return self.underlying_allocator; + if (*T == *Allocator) return &self.underlying_allocator; + return &self.underlying_allocator.allocator; } - const self = @fieldParentPtr(@This(), "allocator", allocator); - const result = try self.getUnderlyingAllocatorPtr().callResizeFn(buf, new_len, len_align); - if (len_align == 0) { - assert(result == new_len); - } else { - assert(result >= new_len); - assert(mem.isAlignedAnyAlign(result, len_align)); + pub fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) Allocator.Error![]u8 { + assert(n > 0); + assert(mem.isValidAlign(ptr_align)); + if (len_align != 0) { + assert(mem.isAlignedAnyAlign(n, len_align)); + assert(n >= len_align); + } + + const self = @fieldParentPtr(@This(), "allocator", allocator); + const result = try self.getUnderlyingAllocatorPtr().callAllocFn(n, ptr_align, len_align); + assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align)); + if (len_align == 0) { + assert(result.len == n); + } else { + assert(result.len >= n); + assert(mem.isAlignedAnyAlign(result.len, len_align)); + } + return result; } - return result; - } - pub usingnamespace if (T == *Allocator or !@hasDecl(T, "reset")) struct {} else struct { - pub fn reset(self: *Self) void { - self.underlying_allocator.reset(); + pub fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) Allocator.Error!usize { + assert(buf.len > 0); + if (len_align != 0) { + assert(mem.isAlignedAnyAlign(new_len, len_align)); + assert(new_len >= len_align); + } + const self = @fieldParentPtr(@This(), "allocator", allocator); + const result = try self.getUnderlyingAllocatorPtr().callResizeFn(buf, new_len, len_align); + if (len_align == 0) { + assert(result == new_len); + } else { + assert(result >= new_len); + assert(mem.isAlignedAnyAlign(result, len_align)); + } + return result; } + pub usingnamespace if (T == *Allocator or !@hasDecl(T, "reset")) struct {} else struct { + pub fn reset(self: *Self) void { + self.underlying_allocator.reset(); + } + }; }; -};} +} -pub fn validationWrap(allocator: var) ValidationAllocator(@TypeOf(allocator)) { +pub fn validationWrap(allocator: anytype) ValidationAllocator(@TypeOf(allocator)) { return ValidationAllocator(@TypeOf(allocator)).init(allocator); } @@ -465,14 +466,14 @@ pub fn validationWrap(allocator: var) ValidationAllocator(@TypeOf(allocator)) { /// than the `len` that was requsted. This function should only be used by allocators /// that are unaffected by `len_align`. pub fn alignAllocLen(full_len: usize, alloc_len: usize, len_align: u29) usize { - assert(alloc_len > 0); - assert(alloc_len >= len_align); - assert(full_len >= alloc_len); - if (len_align == 0) - return alloc_len; - const adjusted = alignBackwardAnyAlign(full_len, len_align); - assert(adjusted >= alloc_len); - return adjusted; + assert(alloc_len > 0); + assert(alloc_len >= len_align); + assert(full_len >= alloc_len); + if (len_align == 0) + return alloc_len; + const adjusted = alignBackwardAnyAlign(full_len, len_align); + assert(adjusted >= alloc_len); + return adjusted; } var failAllocator = Allocator{ @@ -695,7 +696,7 @@ test "mem.secureZero" { /// Initializes all fields of the struct with their default value, or zero values if no default value is present. /// If the field is present in the provided initial values, it will have that value instead. /// Structs are initialized recursively. -pub fn zeroInit(comptime T: type, init: var) T { +pub fn zeroInit(comptime T: type, init: anytype) T { comptime const Init = @TypeOf(init); switch (@typeInfo(T)) { @@ -895,7 +896,7 @@ test "Span" { /// /// When there is both a sentinel and an array length or slice length, the /// length value is used instead of the sentinel. -pub fn span(ptr: var) Span(@TypeOf(ptr)) { +pub fn span(ptr: anytype) Span(@TypeOf(ptr)) { if (@typeInfo(@TypeOf(ptr)) == .Optional) { if (ptr) |non_null| { return span(non_null); @@ -923,7 +924,7 @@ test "span" { /// Same as `span`, except when there is both a sentinel and an array /// length or slice length, scans the memory for the sentinel value /// rather than using the length. -pub fn spanZ(ptr: var) Span(@TypeOf(ptr)) { +pub fn spanZ(ptr: anytype) Span(@TypeOf(ptr)) { if (@typeInfo(@TypeOf(ptr)) == .Optional) { if (ptr) |non_null| { return spanZ(non_null); @@ -952,7 +953,7 @@ test "spanZ" { /// or a slice, and returns the length. /// In the case of a sentinel-terminated array, it uses the array length. /// For C pointers it assumes it is a pointer-to-many with a 0 sentinel. -pub fn len(value: var) usize { +pub fn len(value: anytype) usize { return switch (@typeInfo(@TypeOf(value))) { .Array => |info| info.len, .Vector => |info| info.len, @@ -1000,7 +1001,7 @@ test "len" { /// In the case of a sentinel-terminated array, it scans the array /// for a sentinel and uses that for the length, rather than using the array length. /// For C pointers it assumes it is a pointer-to-many with a 0 sentinel. -pub fn lenZ(ptr: var) usize { +pub fn lenZ(ptr: anytype) usize { return switch (@typeInfo(@TypeOf(ptr))) { .Array => |info| if (info.sentinel) |sentinel| indexOfSentinel(info.child, sentinel, &ptr) @@ -2031,7 +2032,7 @@ fn AsBytesReturnType(comptime P: type) type { } /// Given a pointer to a single item, returns a slice of the underlying bytes, preserving constness. -pub fn asBytes(ptr: var) AsBytesReturnType(@TypeOf(ptr)) { +pub fn asBytes(ptr: anytype) AsBytesReturnType(@TypeOf(ptr)) { const P = @TypeOf(ptr); return @ptrCast(AsBytesReturnType(P), ptr); } @@ -2071,7 +2072,7 @@ test "asBytes" { } ///Given any value, returns a copy of its bytes in an array. -pub fn toBytes(value: var) [@sizeOf(@TypeOf(value))]u8 { +pub fn toBytes(value: anytype) [@sizeOf(@TypeOf(value))]u8 { return asBytes(&value).*; } @@ -2106,7 +2107,7 @@ fn BytesAsValueReturnType(comptime T: type, comptime B: type) type { ///Given a pointer to an array of bytes, returns a pointer to a value of the specified type /// backed by those bytes, preserving constness. -pub fn bytesAsValue(comptime T: type, bytes: var) BytesAsValueReturnType(T, @TypeOf(bytes)) { +pub fn bytesAsValue(comptime T: type, bytes: anytype) BytesAsValueReturnType(T, @TypeOf(bytes)) { return @ptrCast(BytesAsValueReturnType(T, @TypeOf(bytes)), bytes); } @@ -2149,7 +2150,7 @@ test "bytesAsValue" { ///Given a pointer to an array of bytes, returns a value of the specified type backed by a /// copy of those bytes. -pub fn bytesToValue(comptime T: type, bytes: var) T { +pub fn bytesToValue(comptime T: type, bytes: anytype) T { return bytesAsValue(T, bytes).*; } test "bytesToValue" { @@ -2177,7 +2178,7 @@ fn BytesAsSliceReturnType(comptime T: type, comptime bytesType: type) type { return if (trait.isConstPtr(bytesType)) []align(alignment) const T else []align(alignment) T; } -pub fn bytesAsSlice(comptime T: type, bytes: var) BytesAsSliceReturnType(T, @TypeOf(bytes)) { +pub fn bytesAsSlice(comptime T: type, bytes: anytype) BytesAsSliceReturnType(T, @TypeOf(bytes)) { // let's not give an undefined pointer to @ptrCast // it may be equal to zero and fail a null check if (bytes.len == 0) { @@ -2256,7 +2257,7 @@ fn SliceAsBytesReturnType(comptime sliceType: type) type { return if (trait.isConstPtr(sliceType)) []align(alignment) const u8 else []align(alignment) u8; } -pub fn sliceAsBytes(slice: var) SliceAsBytesReturnType(@TypeOf(slice)) { +pub fn sliceAsBytes(slice: anytype) SliceAsBytesReturnType(@TypeOf(slice)) { const Slice = @TypeOf(slice); // let's not give an undefined pointer to @ptrCast diff --git a/lib/std/meta.zig b/lib/std/meta.zig index 6c10941aa7..2827cfecf4 100644 --- a/lib/std/meta.zig +++ b/lib/std/meta.zig @@ -9,7 +9,7 @@ pub const trait = @import("meta/trait.zig"); const TypeInfo = builtin.TypeInfo; -pub fn tagName(v: var) []const u8 { +pub fn tagName(v: anytype) []const u8 { const T = @TypeOf(v); switch (@typeInfo(T)) { .ErrorSet => return @errorName(v), @@ -430,7 +430,7 @@ test "std.meta.TagType" { } ///Returns the active tag of a tagged union -pub fn activeTag(u: var) @TagType(@TypeOf(u)) { +pub fn activeTag(u: anytype) @TagType(@TypeOf(u)) { const T = @TypeOf(u); return @as(@TagType(T), u); } @@ -480,7 +480,7 @@ test "std.meta.TagPayloadType" { /// Compares two of any type for equality. Containers are compared on a field-by-field basis, /// where possible. Pointers are not followed. -pub fn eql(a: var, b: @TypeOf(a)) bool { +pub fn eql(a: anytype, b: @TypeOf(a)) bool { const T = @TypeOf(a); switch (@typeInfo(T)) { @@ -627,7 +627,7 @@ test "intToEnum with error return" { pub const IntToEnumError = error{InvalidEnumTag}; -pub fn intToEnum(comptime Tag: type, tag_int: var) IntToEnumError!Tag { +pub fn intToEnum(comptime Tag: type, tag_int: anytype) IntToEnumError!Tag { inline for (@typeInfo(Tag).Enum.fields) |f| { const this_tag_value = @field(Tag, f.name); if (tag_int == @enumToInt(this_tag_value)) { @@ -696,7 +696,7 @@ pub fn Vector(comptime len: u32, comptime child: type) type { /// Given a type and value, cast the value to the type as c would. /// This is for translate-c and is not intended for general use. -pub fn cast(comptime DestType: type, target: var) DestType { +pub fn cast(comptime DestType: type, target: anytype) DestType { const TargetType = @TypeOf(target); switch (@typeInfo(DestType)) { .Pointer => { diff --git a/lib/std/meta/trait.zig b/lib/std/meta/trait.zig index 11aa8457ee..5cea0ecb9a 100644 --- a/lib/std/meta/trait.zig +++ b/lib/std/meta/trait.zig @@ -9,7 +9,7 @@ const meta = @import("../meta.zig"); pub const TraitFn = fn (type) bool; -pub fn multiTrait(comptime traits: var) TraitFn { +pub fn multiTrait(comptime traits: anytype) TraitFn { const Closure = struct { pub fn trait(comptime T: type) bool { inline for (traits) |t| @@ -342,7 +342,7 @@ test "std.meta.trait.isContainer" { testing.expect(!isContainer(u8)); } -pub fn hasDecls(comptime T: type, comptime names: var) bool { +pub fn hasDecls(comptime T: type, comptime names: anytype) bool { inline for (names) |name| { if (!@hasDecl(T, name)) return false; @@ -368,7 +368,7 @@ test "std.meta.trait.hasDecls" { testing.expect(!hasDecls(TestStruct2, tuple)); } -pub fn hasFields(comptime T: type, comptime names: var) bool { +pub fn hasFields(comptime T: type, comptime names: anytype) bool { inline for (names) |name| { if (!@hasField(T, name)) return false; @@ -394,7 +394,7 @@ test "std.meta.trait.hasFields" { testing.expect(!hasFields(TestStruct2, .{ "a", "b", "useless" })); } -pub fn hasFunctions(comptime T: type, comptime names: var) bool { +pub fn hasFunctions(comptime T: type, comptime names: anytype) bool { inline for (names) |name| { if (!hasFn(name)(T)) return false; diff --git a/lib/std/net.zig b/lib/std/net.zig index a9dcf53f92..71bab383fa 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -427,7 +427,7 @@ pub const Address = extern union { self: Address, comptime fmt: []const u8, options: std.fmt.FormatOptions, - out_stream: var, + out_stream: anytype, ) !void { switch (self.any.family) { os.AF_INET => { @@ -1404,8 +1404,8 @@ fn resMSendRc( fn dnsParse( r: []const u8, - ctx: var, - comptime callback: var, + ctx: anytype, + comptime callback: anytype, ) !void { // This implementation is ported from musl libc. // A more idiomatic "ziggy" implementation would be welcome. diff --git a/lib/std/os.zig b/lib/std/os.zig index 1e1049ae51..dfb47208ca 100644 --- a/lib/std/os.zig +++ b/lib/std/os.zig @@ -4068,7 +4068,7 @@ pub fn nanosleep(seconds: u64, nanoseconds: u64) void { } pub fn dl_iterate_phdr( - context: var, + context: anytype, comptime Error: type, comptime callback: fn (info: *dl_phdr_info, size: usize, context: @TypeOf(context)) Error!void, ) Error!void { diff --git a/lib/std/os/uefi.zig b/lib/std/os/uefi.zig index 1d2f88794d..c037075cd8 100644 --- a/lib/std/os/uefi.zig +++ b/lib/std/os/uefi.zig @@ -28,7 +28,7 @@ pub const Guid = extern struct { self: @This(), comptime f: []const u8, options: std.fmt.FormatOptions, - out_stream: var, + out_stream: anytype, ) Errors!void { if (f.len == 0) { return std.fmt.format(out_stream, "{x:0>8}-{x:0>4}-{x:0>4}-{x:0>2}{x:0>2}-{x:0>12}", .{ diff --git a/lib/std/progress.zig b/lib/std/progress.zig index d80f8c4423..b81e81aa2c 100644 --- a/lib/std/progress.zig +++ b/lib/std/progress.zig @@ -224,7 +224,7 @@ pub const Progress = struct { self.prev_refresh_timestamp = self.timer.read(); } - pub fn log(self: *Progress, comptime format: []const u8, args: var) void { + pub fn log(self: *Progress, comptime format: []const u8, args: anytype) void { const file = self.terminal orelse return; self.refresh(); file.outStream().print(format, args) catch { @@ -234,7 +234,7 @@ pub const Progress = struct { self.columns_written = 0; } - fn bufWrite(self: *Progress, end: *usize, comptime format: []const u8, args: var) void { + fn bufWrite(self: *Progress, end: *usize, comptime format: []const u8, args: anytype) void { if (std.fmt.bufPrint(self.output_buffer[end.*..], format, args)) |written| { const amt = written.len; end.* += amt; diff --git a/lib/std/segmented_list.zig b/lib/std/segmented_list.zig index d087e480f6..d39fe3e239 100644 --- a/lib/std/segmented_list.zig +++ b/lib/std/segmented_list.zig @@ -122,7 +122,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type self.* = undefined; } - pub fn at(self: var, i: usize) AtType(@TypeOf(self)) { + pub fn at(self: anytype, i: usize) AtType(@TypeOf(self)) { assert(i < self.len); return self.uncheckedAt(i); } @@ -241,7 +241,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type } } - pub fn uncheckedAt(self: var, index: usize) AtType(@TypeOf(self)) { + pub fn uncheckedAt(self: anytype, index: usize) AtType(@TypeOf(self)) { if (index < prealloc_item_count) { return &self.prealloc_segment[index]; } diff --git a/lib/std/sort.zig b/lib/std/sort.zig index cb6162e9b0..464054e4a5 100644 --- a/lib/std/sort.zig +++ b/lib/std/sort.zig @@ -9,7 +9,7 @@ pub fn binarySearch( comptime T: type, key: T, items: []const T, - context: var, + context: anytype, comptime compareFn: fn (context: @TypeOf(context), lhs: T, rhs: T) math.Order, ) ?usize { var left: usize = 0; @@ -76,7 +76,7 @@ test "binarySearch" { pub fn insertionSort( comptime T: type, items: []T, - context: var, + context: anytype, comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool, ) void { var i: usize = 1; @@ -182,7 +182,7 @@ const Pull = struct { pub fn sort( comptime T: type, items: []T, - context: var, + context: anytype, comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool, ) void { // Implementation ported from https://github.com/BonzaiThePenguin/WikiSort/blob/master/WikiSort.c @@ -813,7 +813,7 @@ fn mergeInPlace( items: []T, A_arg: Range, B_arg: Range, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, ) void { if (A_arg.length() == 0 or B_arg.length() == 0) return; @@ -862,7 +862,7 @@ fn mergeInternal( items: []T, A: Range, B: Range, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, buffer: Range, ) void { @@ -906,7 +906,7 @@ fn findFirstForward( items: []T, value: T, range: Range, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, unique: usize, ) usize { @@ -928,7 +928,7 @@ fn findFirstBackward( items: []T, value: T, range: Range, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, unique: usize, ) usize { @@ -950,7 +950,7 @@ fn findLastForward( items: []T, value: T, range: Range, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, unique: usize, ) usize { @@ -972,7 +972,7 @@ fn findLastBackward( items: []T, value: T, range: Range, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, unique: usize, ) usize { @@ -994,7 +994,7 @@ fn binaryFirst( items: []T, value: T, range: Range, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, ) usize { var curr = range.start; @@ -1017,7 +1017,7 @@ fn binaryLast( items: []T, value: T, range: Range, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, ) usize { var curr = range.start; @@ -1040,7 +1040,7 @@ fn mergeInto( from: []T, A: Range, B: Range, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, into: []T, ) void { @@ -1078,7 +1078,7 @@ fn mergeExternal( items: []T, A: Range, B: Range, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, cache: []T, ) void { @@ -1112,7 +1112,7 @@ fn mergeExternal( fn swap( comptime T: type, items: []T, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, order: *[8]u8, x: usize, @@ -1358,7 +1358,7 @@ fn fuzzTest(rng: *std.rand.Random) !void { pub fn argMin( comptime T: type, items: []const T, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, ) ?usize { if (items.len == 0) { @@ -1390,7 +1390,7 @@ test "argMin" { pub fn min( comptime T: type, items: []const T, - context: var, + context: anytype, comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool, ) ?T { const i = argMin(T, items, context, lessThan) orelse return null; @@ -1410,7 +1410,7 @@ test "min" { pub fn argMax( comptime T: type, items: []const T, - context: var, + context: anytype, comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool, ) ?usize { if (items.len == 0) { @@ -1442,7 +1442,7 @@ test "argMax" { pub fn max( comptime T: type, items: []const T, - context: var, + context: anytype, comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool, ) ?T { const i = argMax(T, items, context, lessThan) orelse return null; @@ -1462,7 +1462,7 @@ test "max" { pub fn isSorted( comptime T: type, items: []const T, - context: var, + context: anytype, comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool, ) bool { var i: usize = 1; diff --git a/lib/std/special/build_runner.zig b/lib/std/special/build_runner.zig index 1c88f98e6e..83c181e841 100644 --- a/lib/std/special/build_runner.zig +++ b/lib/std/special/build_runner.zig @@ -135,7 +135,7 @@ fn runBuild(builder: *Builder) anyerror!void { } } -fn usage(builder: *Builder, already_ran_build: bool, out_stream: var) !void { +fn usage(builder: *Builder, already_ran_build: bool, out_stream: anytype) !void { // run the build script to collect the options if (!already_ran_build) { builder.setInstallPrefix(null); @@ -202,7 +202,7 @@ fn usage(builder: *Builder, already_ran_build: bool, out_stream: var) !void { ); } -fn usageAndErr(builder: *Builder, already_ran_build: bool, out_stream: var) void { +fn usageAndErr(builder: *Builder, already_ran_build: bool, out_stream: anytype) void { usage(builder, already_ran_build, out_stream) catch {}; process.exit(1); } diff --git a/lib/std/special/test_runner.zig b/lib/std/special/test_runner.zig index fd8c068f05..301457dde0 100644 --- a/lib/std/special/test_runner.zig +++ b/lib/std/special/test_runner.zig @@ -79,9 +79,9 @@ pub fn log( comptime message_level: std.log.Level, comptime scope: @Type(.EnumLiteral), comptime format: []const u8, - args: var, + args: anytype, ) void { if (@enumToInt(message_level) <= @enumToInt(std.testing.log_level)) { - std.debug.print("[{}] ({}): " ++ format, .{@tagName(scope), @tagName(message_level)} ++ args); + std.debug.print("[{}] ({}): " ++ format, .{ @tagName(scope), @tagName(message_level) } ++ args); } } diff --git a/lib/std/target.zig b/lib/std/target.zig index 110b7a088f..0b95e8f75a 100644 --- a/lib/std/target.zig +++ b/lib/std/target.zig @@ -108,23 +108,16 @@ pub const Target = struct { self: WindowsVersion, comptime fmt: []const u8, options: std.fmt.FormatOptions, - out_stream: var, + out_stream: anytype, ) !void { - if (fmt.len > 0 and fmt[0] == 's') { - if ( - @enumToInt(self) >= @enumToInt(WindowsVersion.nt4) and @enumToInt(self) <= @enumToInt(WindowsVersion.win10_19h1) - ) { + if (fmt.len > 0 and fmt[0] == 's') { + if (@enumToInt(self) >= @enumToInt(WindowsVersion.nt4) and @enumToInt(self) <= @enumToInt(WindowsVersion.win10_19h1)) { try std.fmt.format(out_stream, ".{}", .{@tagName(self)}); } else { - try std.fmt.format(out_stream, - "@intToEnum(Target.Os.WindowsVersion, {})", - .{ @enumToInt(self) } - ); + try std.fmt.format(out_stream, "@intToEnum(Target.Os.WindowsVersion, {})", .{@enumToInt(self)}); } } else { - if ( - @enumToInt(self) >= @enumToInt(WindowsVersion.nt4) and @enumToInt(self) <= @enumToInt(WindowsVersion.win10_19h1) - ) { + if (@enumToInt(self) >= @enumToInt(WindowsVersion.nt4) and @enumToInt(self) <= @enumToInt(WindowsVersion.win10_19h1)) { try std.fmt.format(out_stream, "WindowsVersion.{}", .{@tagName(self)}); } else { try std.fmt.format(out_stream, "WindowsVersion(", .{@typeName(@This())}); @@ -1189,7 +1182,7 @@ pub const Target = struct { pub fn standardDynamicLinkerPath(self: Target) DynamicLinker { var result: DynamicLinker = .{}; const S = struct { - fn print(r: *DynamicLinker, comptime fmt: []const u8, args: var) DynamicLinker { + fn print(r: *DynamicLinker, comptime fmt: []const u8, args: anytype) DynamicLinker { r.max_byte = @intCast(u8, (std.fmt.bufPrint(&r.buffer, fmt, args) catch unreachable).len - 1); return r.*; } diff --git a/lib/std/testing.zig b/lib/std/testing.zig index bdaf759d62..44c221d76a 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -19,7 +19,7 @@ pub var log_level = std.log.Level.warn; /// This function is intended to be used only in tests. It prints diagnostics to stderr /// and then aborts when actual_error_union is not expected_error. -pub fn expectError(expected_error: anyerror, actual_error_union: var) void { +pub fn expectError(expected_error: anyerror, actual_error_union: anytype) void { if (actual_error_union) |actual_payload| { std.debug.panic("expected error.{}, found {}", .{ @errorName(expected_error), actual_payload }); } else |actual_error| { @@ -36,7 +36,7 @@ pub fn expectError(expected_error: anyerror, actual_error_union: var) void { /// equal, prints diagnostics to stderr to show exactly how they are not equal, /// then aborts. /// The types must match exactly. -pub fn expectEqual(expected: var, actual: @TypeOf(expected)) void { +pub fn expectEqual(expected: anytype, actual: @TypeOf(expected)) void { switch (@typeInfo(@TypeOf(actual))) { .NoReturn, .BoundFn, diff --git a/lib/std/thread.zig b/lib/std/thread.zig index d07c41c5b0..3d20f54558 100644 --- a/lib/std/thread.zig +++ b/lib/std/thread.zig @@ -143,7 +143,7 @@ pub const Thread = struct { /// fn startFn(@TypeOf(context)) T /// where T is u8, noreturn, void, or !void /// caller must call wait on the returned thread - pub fn spawn(context: var, comptime startFn: var) SpawnError!*Thread { + pub fn spawn(context: anytype, comptime startFn: anytype) SpawnError!*Thread { if (builtin.single_threaded) @compileError("cannot spawn thread when building in single-threaded mode"); // TODO compile-time call graph analysis to determine stack upper bound // https://github.com/ziglang/zig/issues/157 diff --git a/lib/std/zig/ast.zig b/lib/std/zig/ast.zig index e95a8855af..03153f541a 100644 --- a/lib/std/zig/ast.zig +++ b/lib/std/zig/ast.zig @@ -29,7 +29,7 @@ pub const Tree = struct { self.arena.promote(self.gpa).deinit(); } - pub fn renderError(self: *Tree, parse_error: *const Error, stream: var) !void { + pub fn renderError(self: *Tree, parse_error: *const Error, stream: anytype) !void { return parse_error.render(self.token_ids, stream); } @@ -167,7 +167,7 @@ pub const Error = union(enum) { DeclBetweenFields: DeclBetweenFields, InvalidAnd: InvalidAnd, - pub fn render(self: *const Error, tokens: []const Token.Id, stream: var) !void { + pub fn render(self: *const Error, tokens: []const Token.Id, stream: anytype) !void { switch (self.*) { .InvalidToken => |*x| return x.render(tokens, stream), .ExpectedContainerMembers => |*x| return x.render(tokens, stream), @@ -322,7 +322,7 @@ pub const Error = union(enum) { pub const ExpectedCall = struct { node: *Node, - pub fn render(self: *const ExpectedCall, tokens: []const Token.Id, stream: var) !void { + pub fn render(self: *const ExpectedCall, tokens: []const Token.Id, stream: anytype) !void { return stream.print("expected " ++ @tagName(Node.Id.Call) ++ ", found {}", .{ @tagName(self.node.id), }); @@ -332,7 +332,7 @@ pub const Error = union(enum) { pub const ExpectedCallOrFnProto = struct { node: *Node, - pub fn render(self: *const ExpectedCallOrFnProto, tokens: []const Token.Id, stream: var) !void { + pub fn render(self: *const ExpectedCallOrFnProto, tokens: []const Token.Id, stream: anytype) !void { return stream.print("expected " ++ @tagName(Node.Id.Call) ++ " or " ++ @tagName(Node.Id.FnProto) ++ ", found {}", .{@tagName(self.node.id)}); } @@ -342,7 +342,7 @@ pub const Error = union(enum) { token: TokenIndex, expected_id: Token.Id, - pub fn render(self: *const ExpectedToken, tokens: []const Token.Id, stream: var) !void { + pub fn render(self: *const ExpectedToken, tokens: []const Token.Id, stream: anytype) !void { const found_token = tokens[self.token]; switch (found_token) { .Invalid => { @@ -360,7 +360,7 @@ pub const Error = union(enum) { token: TokenIndex, end_id: Token.Id, - pub fn render(self: *const ExpectedCommaOrEnd, tokens: []const Token.Id, stream: var) !void { + pub fn render(self: *const ExpectedCommaOrEnd, tokens: []const Token.Id, stream: anytype) !void { const actual_token = tokens[self.token]; return stream.print("expected ',' or '{}', found '{}'", .{ self.end_id.symbol(), @@ -375,7 +375,7 @@ pub const Error = union(enum) { token: TokenIndex, - pub fn render(self: *const ThisError, tokens: []const Token.Id, stream: var) !void { + pub fn render(self: *const ThisError, tokens: []const Token.Id, stream: anytype) !void { const actual_token = tokens[self.token]; return stream.print(msg, .{actual_token.symbol()}); } @@ -388,7 +388,7 @@ pub const Error = union(enum) { token: TokenIndex, - pub fn render(self: *const ThisError, tokens: []const Token.Id, stream: var) !void { + pub fn render(self: *const ThisError, tokens: []const Token.Id, stream: anytype) !void { return stream.writeAll(msg); } }; diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig index 4f8eff3c68..29b60c0cee 100644 --- a/lib/std/zig/parse.zig +++ b/lib/std/zig/parse.zig @@ -2955,7 +2955,7 @@ const Parser = struct { const NodeParseFn = fn (p: *Parser) Error!?*Node; - fn ListParseFn(comptime E: type, comptime nodeParseFn: var) ParseFn([]E) { + fn ListParseFn(comptime E: type, comptime nodeParseFn: anytype) ParseFn([]E) { return struct { pub fn parse(p: *Parser) ![]E { var list = std.ArrayList(E).init(p.gpa); diff --git a/lib/std/zig/string_literal.zig b/lib/std/zig/string_literal.zig index cc6030ad15..f7ceee16ac 100644 --- a/lib/std/zig/string_literal.zig +++ b/lib/std/zig/string_literal.zig @@ -125,7 +125,7 @@ test "parse" { } /// Writes a Zig-syntax escaped string literal to the stream. Includes the double quotes. -pub fn render(utf8: []const u8, out_stream: var) !void { +pub fn render(utf8: []const u8, out_stream: anytype) !void { try out_stream.writeByte('"'); for (utf8) |byte| switch (byte) { '\n' => try out_stream.writeAll("\\n"), diff --git a/lib/std/zig/system.zig b/lib/std/zig/system.zig index 898d376cc7..af494efbab 100644 --- a/lib/std/zig/system.zig +++ b/lib/std/zig/system.zig @@ -130,7 +130,7 @@ pub const NativePaths = struct { return self.appendArray(&self.include_dirs, s); } - pub fn addIncludeDirFmt(self: *NativePaths, comptime fmt: []const u8, args: var) !void { + pub fn addIncludeDirFmt(self: *NativePaths, comptime fmt: []const u8, args: anytype) !void { const item = try std.fmt.allocPrint0(self.include_dirs.allocator, fmt, args); errdefer self.include_dirs.allocator.free(item); try self.include_dirs.append(item); @@ -140,7 +140,7 @@ pub const NativePaths = struct { return self.appendArray(&self.lib_dirs, s); } - pub fn addLibDirFmt(self: *NativePaths, comptime fmt: []const u8, args: var) !void { + pub fn addLibDirFmt(self: *NativePaths, comptime fmt: []const u8, args: anytype) !void { const item = try std.fmt.allocPrint0(self.lib_dirs.allocator, fmt, args); errdefer self.lib_dirs.allocator.free(item); try self.lib_dirs.append(item); @@ -150,7 +150,7 @@ pub const NativePaths = struct { return self.appendArray(&self.warnings, s); } - pub fn addWarningFmt(self: *NativePaths, comptime fmt: []const u8, args: var) !void { + pub fn addWarningFmt(self: *NativePaths, comptime fmt: []const u8, args: anytype) !void { const item = try std.fmt.allocPrint0(self.warnings.allocator, fmt, args); errdefer self.warnings.allocator.free(item); try self.warnings.append(item); @@ -887,7 +887,7 @@ pub const NativeTargetInfo = struct { abi: Target.Abi, }; - pub fn elfInt(is_64: bool, need_bswap: bool, int_32: var, int_64: var) @TypeOf(int_64) { + pub fn elfInt(is_64: bool, need_bswap: bool, int_32: anytype, int_64: anytype) @TypeOf(int_64) { if (is_64) { if (need_bswap) { return @byteSwap(@TypeOf(int_64), int_64); diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig index a6508943df..b0678ea665 100644 --- a/src-self-hosted/Module.zig +++ b/src-self-hosted/Module.zig @@ -3575,7 +3575,7 @@ fn coerceArrayPtrToSlice(self: *Module, scope: *Scope, dest_type: Type, inst: *I return self.fail(scope, inst.src, "TODO implement coerceArrayPtrToSlice runtime instruction", .{}); } -fn fail(self: *Module, scope: *Scope, src: usize, comptime format: []const u8, args: var) InnerError { +fn fail(self: *Module, scope: *Scope, src: usize, comptime format: []const u8, args: anytype) InnerError { @setCold(true); const err_msg = try ErrorMsg.create(self.gpa, src, format, args); return self.failWithOwnedErrorMsg(scope, src, err_msg); @@ -3586,7 +3586,7 @@ fn failTok( scope: *Scope, token_index: ast.TokenIndex, comptime format: []const u8, - args: var, + args: anytype, ) InnerError { @setCold(true); const src = scope.tree().token_locs[token_index].start; @@ -3598,7 +3598,7 @@ fn failNode( scope: *Scope, ast_node: *ast.Node, comptime format: []const u8, - args: var, + args: anytype, ) InnerError { @setCold(true); const src = scope.tree().token_locs[ast_node.firstToken()].start; @@ -3662,7 +3662,7 @@ pub const ErrorMsg = struct { byte_offset: usize, msg: []const u8, - pub fn create(gpa: *Allocator, byte_offset: usize, comptime format: []const u8, args: var) !*ErrorMsg { + pub fn create(gpa: *Allocator, byte_offset: usize, comptime format: []const u8, args: anytype) !*ErrorMsg { const self = try gpa.create(ErrorMsg); errdefer gpa.destroy(self); self.* = try init(gpa, byte_offset, format, args); @@ -3675,7 +3675,7 @@ pub const ErrorMsg = struct { gpa.destroy(self); } - pub fn init(gpa: *Allocator, byte_offset: usize, comptime format: []const u8, args: var) !ErrorMsg { + pub fn init(gpa: *Allocator, byte_offset: usize, comptime format: []const u8, args: anytype) !ErrorMsg { return ErrorMsg{ .byte_offset = byte_offset, .msg = try std.fmt.allocPrint(gpa, format, args), diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig index 7ed84d540c..ba91e0726f 100644 --- a/src-self-hosted/codegen.zig +++ b/src-self-hosted/codegen.zig @@ -230,7 +230,7 @@ pub fn generateSymbol( } } -const InnerError = error { +const InnerError = error{ OutOfMemory, CodegenFail, }; @@ -673,9 +673,9 @@ const Function = struct { try self.genX8664BinMathCode(inst.base.src, dst_mcv, src_mcv, 7, 0x38); const info = inst.args.lhs.ty.intInfo(self.target.*); if (info.signed) { - return MCValue{.compare_flags_signed = inst.args.op}; + return MCValue{ .compare_flags_signed = inst.args.op }; } else { - return MCValue{.compare_flags_unsigned = inst.args.op}; + return MCValue{ .compare_flags_unsigned = inst.args.op }; } }, else => return self.fail(inst.base.src, "TODO implement cmp for {}", .{self.target.cpu.arch}), @@ -721,7 +721,7 @@ const Function = struct { } fn genX86CondBr(self: *Function, inst: *ir.Inst.CondBr, opcode: u8, comptime arch: std.Target.Cpu.Arch) !MCValue { - self.code.appendSliceAssumeCapacity(&[_]u8{0x0f, opcode}); + self.code.appendSliceAssumeCapacity(&[_]u8{ 0x0f, opcode }); const reloc = Reloc{ .rel32 = self.code.items.len }; self.code.items.len += 4; try self.genBody(inst.args.true_body, arch); @@ -1081,10 +1081,12 @@ const Function = struct { switch (mcv) { .immediate => |imm| { // This immediate is unsigned. - const U = @Type(.{ .Int = .{ - .bits = ti.bits - @boolToInt(ti.is_signed), - .is_signed = false, - }}); + const U = @Type(.{ + .Int = .{ + .bits = ti.bits - @boolToInt(ti.is_signed), + .is_signed = false, + }, + }); if (imm >= std.math.maxInt(U)) { return self.copyToNewRegister(inst); } @@ -1094,7 +1096,6 @@ const Function = struct { return mcv; } - fn genTypedValue(self: *Function, src: usize, typed_value: TypedValue) !MCValue { const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); @@ -1121,7 +1122,7 @@ const Function = struct { } } - fn fail(self: *Function, src: usize, comptime format: []const u8, args: var) error{ CodegenFail, OutOfMemory } { + fn fail(self: *Function, src: usize, comptime format: []const u8, args: anytype) error{ CodegenFail, OutOfMemory } { @setCold(true); assert(self.err_msg == null); self.err_msg = try ErrorMsg.create(self.bin_file.allocator, src, format, args); diff --git a/src-self-hosted/dep_tokenizer.zig b/src-self-hosted/dep_tokenizer.zig index cad12834a7..20324cbf0c 100644 --- a/src-self-hosted/dep_tokenizer.zig +++ b/src-self-hosted/dep_tokenizer.zig @@ -299,12 +299,12 @@ pub const Tokenizer = struct { return null; } - fn errorf(self: *Tokenizer, comptime fmt: []const u8, args: var) Error { + fn errorf(self: *Tokenizer, comptime fmt: []const u8, args: anytype) Error { self.error_text = try std.fmt.allocPrintZ(&self.arena.allocator, fmt, args); return Error.InvalidInput; } - fn errorPosition(self: *Tokenizer, position: usize, bytes: []const u8, comptime fmt: []const u8, args: var) Error { + fn errorPosition(self: *Tokenizer, position: usize, bytes: []const u8, comptime fmt: []const u8, args: anytype) Error { var buffer = try std.ArrayListSentineled(u8, 0).initSize(&self.arena.allocator, 0); try buffer.outStream().print(fmt, args); try buffer.appendSlice(" '"); @@ -316,7 +316,7 @@ pub const Tokenizer = struct { return Error.InvalidInput; } - fn errorIllegalChar(self: *Tokenizer, position: usize, char: u8, comptime fmt: []const u8, args: var) Error { + fn errorIllegalChar(self: *Tokenizer, position: usize, char: u8, comptime fmt: []const u8, args: anytype) Error { var buffer = try std.ArrayListSentineled(u8, 0).initSize(&self.arena.allocator, 0); try buffer.appendSlice("illegal char "); try printUnderstandableChar(&buffer, char); @@ -883,7 +883,7 @@ fn depTokenizer(input: []const u8, expect: []const u8) !void { testing.expect(false); } -fn printSection(out: var, label: []const u8, bytes: []const u8) !void { +fn printSection(out: anytype, label: []const u8, bytes: []const u8) !void { try printLabel(out, label, bytes); try hexDump(out, bytes); try printRuler(out); @@ -891,7 +891,7 @@ fn printSection(out: var, label: []const u8, bytes: []const u8) !void { try out.write("\n"); } -fn printLabel(out: var, label: []const u8, bytes: []const u8) !void { +fn printLabel(out: anytype, label: []const u8, bytes: []const u8) !void { var buf: [80]u8 = undefined; var text = try std.fmt.bufPrint(buf[0..], "{} {} bytes ", .{ label, bytes.len }); try out.write(text); @@ -903,7 +903,7 @@ fn printLabel(out: var, label: []const u8, bytes: []const u8) !void { try out.write("\n"); } -fn printRuler(out: var) !void { +fn printRuler(out: anytype) !void { var i: usize = 0; const end = 79; while (i < 79) : (i += 1) { @@ -912,7 +912,7 @@ fn printRuler(out: var) !void { try out.write("\n"); } -fn hexDump(out: var, bytes: []const u8) !void { +fn hexDump(out: anytype, bytes: []const u8) !void { const n16 = bytes.len >> 4; var line: usize = 0; var offset: usize = 0; @@ -959,7 +959,7 @@ fn hexDump(out: var, bytes: []const u8) !void { try out.write("\n"); } -fn hexDump16(out: var, offset: usize, bytes: []const u8) !void { +fn hexDump16(out: anytype, offset: usize, bytes: []const u8) !void { try printDecValue(out, offset, 8); try out.write(":"); try out.write(" "); @@ -977,19 +977,19 @@ fn hexDump16(out: var, offset: usize, bytes: []const u8) !void { try out.write("|\n"); } -fn printDecValue(out: var, value: u64, width: u8) !void { +fn printDecValue(out: anytype, value: u64, width: u8) !void { var buffer: [20]u8 = undefined; const len = std.fmt.formatIntBuf(buffer[0..], value, 10, false, width); try out.write(buffer[0..len]); } -fn printHexValue(out: var, value: u64, width: u8) !void { +fn printHexValue(out: anytype, value: u64, width: u8) !void { var buffer: [16]u8 = undefined; const len = std.fmt.formatIntBuf(buffer[0..], value, 16, false, width); try out.write(buffer[0..len]); } -fn printCharValues(out: var, bytes: []const u8) !void { +fn printCharValues(out: anytype, bytes: []const u8) !void { for (bytes) |b| { try out.write(&[_]u8{printable_char_tab[b]}); } @@ -1020,13 +1020,13 @@ comptime { // output: must be a function that takes a `self` idiom parameter // and a bytes parameter // context: must be that self -fn makeOutput(comptime output: var, context: var) Output(output, @TypeOf(context)) { +fn makeOutput(comptime output: anytype, context: anytype) Output(output, @TypeOf(context)) { return Output(output, @TypeOf(context)){ .context = context, }; } -fn Output(comptime output_func: var, comptime Context: type) type { +fn Output(comptime output_func: anytype, comptime Context: type) type { return struct { context: Context, diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig index 1de7c626ea..094b877b53 100644 --- a/src-self-hosted/ir.zig +++ b/src-self-hosted/ir.zig @@ -13,7 +13,7 @@ const codegen = @import("codegen.zig"); pub const Inst = struct { tag: Tag, /// Each bit represents the index of an `Inst` parameter in the `args` field. - /// If a bit is set, it marks the end of the lifetime of the corresponding + /// If a bit is set, it marks the end of the lifetime of the corresponding /// instruction parameter. For example, 0b000_00101 means that the first and /// third `Inst` parameters' lifetimes end after this instruction, and will /// not have any more following references. diff --git a/src-self-hosted/libc_installation.zig b/src-self-hosted/libc_installation.zig index dfc0f1235a..65c6c8c16d 100644 --- a/src-self-hosted/libc_installation.zig +++ b/src-self-hosted/libc_installation.zig @@ -37,7 +37,7 @@ pub const LibCInstallation = struct { pub fn parse( allocator: *Allocator, libc_file: []const u8, - stderr: var, + stderr: anytype, ) !LibCInstallation { var self: LibCInstallation = .{}; @@ -115,7 +115,7 @@ pub const LibCInstallation = struct { return self; } - pub fn render(self: LibCInstallation, out: var) !void { + pub fn render(self: LibCInstallation, out: anytype) !void { @setEvalBranchQuota(4000); const include_dir = self.include_dir orelse ""; const sys_include_dir = self.sys_include_dir orelse ""; diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig index 2fb14ab690..b664d93353 100644 --- a/src-self-hosted/link.zig +++ b/src-self-hosted/link.zig @@ -244,7 +244,7 @@ pub const File = struct { need_noreturn: bool = false, error_msg: *Module.ErrorMsg = undefined, - pub fn fail(self: *C, src: usize, comptime format: []const u8, args: var) !void { + pub fn fail(self: *C, src: usize, comptime format: []const u8, args: anytype) !void { self.error_msg = try Module.ErrorMsg.create(self.allocator, src, format, args); return error.CGenFailure; } @@ -1167,10 +1167,10 @@ pub const File = struct { try self.offset_table_free_list.ensureCapacity(self.allocator, self.local_symbols.items.len); if (self.local_symbol_free_list.popOrNull()) |i| { - std.log.debug(.link, "reusing symbol index {} for {}\n", .{i, decl.name}); + std.log.debug(.link, "reusing symbol index {} for {}\n", .{ i, decl.name }); decl.link.local_sym_index = i; } else { - std.log.debug(.link, "allocating symbol index {} for {}\n", .{self.local_symbols.items.len, decl.name}); + std.log.debug(.link, "allocating symbol index {} for {}\n", .{ self.local_symbols.items.len, decl.name }); decl.link.local_sym_index = @intCast(u32, self.local_symbols.items.len); _ = self.local_symbols.addOneAssumeCapacity(); } @@ -1657,7 +1657,7 @@ fn openBinFileInner(allocator: *Allocator, file: fs.File, options: Options) !Fil } /// Saturating multiplication -fn satMul(a: var, b: var) @TypeOf(a, b) { +fn satMul(a: anytype, b: anytype) @TypeOf(a, b) { const T = @TypeOf(a, b); return std.math.mul(T, a, b) catch std.math.maxInt(T); } diff --git a/src-self-hosted/liveness.zig b/src-self-hosted/liveness.zig index 28eb2145c7..797a55a80c 100644 --- a/src-self-hosted/liveness.zig +++ b/src-self-hosted/liveness.zig @@ -135,5 +135,5 @@ fn analyzeInst(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst, void } } - std.log.debug(.liveness, "analyze {}: 0b{b}\n", .{inst.base.tag, inst.base.deaths}); + std.log.debug(.liveness, "analyze {}: 0b{b}\n", .{ inst.base.tag, inst.base.deaths }); } diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig index ee60b600a4..c0ac42c845 100644 --- a/src-self-hosted/main.zig +++ b/src-self-hosted/main.zig @@ -42,7 +42,7 @@ pub fn log( comptime level: std.log.Level, comptime scope: @TypeOf(.EnumLiteral), comptime format: []const u8, - args: var, + args: anytype, ) void { if (@enumToInt(level) > @enumToInt(std.log.level)) return; diff --git a/src-self-hosted/print_targets.zig b/src-self-hosted/print_targets.zig index f84a4a2cbc..34eda71ccf 100644 --- a/src-self-hosted/print_targets.zig +++ b/src-self-hosted/print_targets.zig @@ -62,7 +62,7 @@ pub fn cmdTargets( allocator: *Allocator, args: []const []const u8, /// Output stream - stdout: var, + stdout: anytype, native_target: Target, ) !void { const available_glibcs = blk: { diff --git a/src-self-hosted/translate_c.zig b/src-self-hosted/translate_c.zig index 261cef37b5..396ee09ef9 100644 --- a/src-self-hosted/translate_c.zig +++ b/src-self-hosted/translate_c.zig @@ -1117,7 +1117,7 @@ fn transEnumDecl(c: *Context, enum_decl: *const ZigClangEnumDecl) Error!?*ast.No return transCreateNodeIdentifier(c, name); } -fn createAlias(c: *Context, alias: var) !void { +fn createAlias(c: *Context, alias: anytype) !void { const node = try transCreateNodeVarDecl(c, true, true, alias.alias); node.eq_token = try appendToken(c, .Equal, "="); node.init_node = try transCreateNodeIdentifier(c, alias.name); @@ -2161,7 +2161,7 @@ fn transCreateNodeArrayType( rp: RestorePoint, source_loc: ZigClangSourceLocation, ty: *const ZigClangType, - len: var, + len: anytype, ) TransError!*ast.Node { var node = try transCreateNodePrefixOp( rp.c, @@ -4187,7 +4187,7 @@ fn transCreateNodeBoolLiteral(c: *Context, value: bool) !*ast.Node { return &node.base; } -fn transCreateNodeInt(c: *Context, int: var) !*ast.Node { +fn transCreateNodeInt(c: *Context, int: anytype) !*ast.Node { const token = try appendTokenFmt(c, .IntegerLiteral, "{}", .{int}); const node = try c.arena.create(ast.Node.IntegerLiteral); node.* = .{ @@ -4196,7 +4196,7 @@ fn transCreateNodeInt(c: *Context, int: var) !*ast.Node { return &node.base; } -fn transCreateNodeFloat(c: *Context, int: var) !*ast.Node { +fn transCreateNodeFloat(c: *Context, int: anytype) !*ast.Node { const token = try appendTokenFmt(c, .FloatLiteral, "{}", .{int}); const node = try c.arena.create(ast.Node.FloatLiteral); node.* = .{ @@ -4907,22 +4907,22 @@ fn finishTransFnProto( fn revertAndWarn( rp: RestorePoint, - err: var, + err: anytype, source_loc: ZigClangSourceLocation, comptime format: []const u8, - args: var, + args: anytype, ) (@TypeOf(err) || error{OutOfMemory}) { rp.activate(); try emitWarning(rp.c, source_loc, format, args); return err; } -fn emitWarning(c: *Context, loc: ZigClangSourceLocation, comptime format: []const u8, args: var) !void { +fn emitWarning(c: *Context, loc: ZigClangSourceLocation, comptime format: []const u8, args: anytype) !void { const args_prefix = .{c.locStr(loc)}; _ = try appendTokenFmt(c, .LineComment, "// {}: warning: " ++ format, args_prefix ++ args); } -pub fn failDecl(c: *Context, loc: ZigClangSourceLocation, name: []const u8, comptime format: []const u8, args: var) !void { +pub fn failDecl(c: *Context, loc: ZigClangSourceLocation, name: []const u8, comptime format: []const u8, args: anytype) !void { // pub const name = @compileError(msg); const pub_tok = try appendToken(c, .Keyword_pub, "pub"); const const_tok = try appendToken(c, .Keyword_const, "const"); @@ -4973,7 +4973,7 @@ fn appendToken(c: *Context, token_id: Token.Id, bytes: []const u8) !ast.TokenInd return appendTokenFmt(c, token_id, "{}", .{bytes}); } -fn appendTokenFmt(c: *Context, token_id: Token.Id, comptime format: []const u8, args: var) !ast.TokenIndex { +fn appendTokenFmt(c: *Context, token_id: Token.Id, comptime format: []const u8, args: anytype) !ast.TokenIndex { assert(token_id != .Invalid); try c.token_ids.ensureCapacity(c.gpa, c.token_ids.items.len + 1); diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig index caacf4e7fc..82c7cfa607 100644 --- a/src-self-hosted/type.zig +++ b/src-self-hosted/type.zig @@ -277,7 +277,7 @@ pub const Type = extern union { self: Type, comptime fmt: []const u8, options: std.fmt.FormatOptions, - out_stream: var, + out_stream: anytype, ) @TypeOf(out_stream).Error!void { comptime assert(fmt.len == 0); var ty = self; @@ -591,7 +591,6 @@ pub const Type = extern union { .anyerror => return 2, // TODO revisit this when we have the concept of the error tag type - .int_signed, .int_unsigned => { const bits: u16 = if (self.cast(Payload.IntSigned)) |pl| pl.bits diff --git a/src-self-hosted/value.zig b/src-self-hosted/value.zig index 6509ee52f6..c1e9a38bd1 100644 --- a/src-self-hosted/value.zig +++ b/src-self-hosted/value.zig @@ -227,7 +227,7 @@ pub const Value = extern union { self: Value, comptime fmt: []const u8, options: std.fmt.FormatOptions, - out_stream: var, + out_stream: anytype, ) !void { comptime assert(fmt.len == 0); var val = self; diff --git a/src-self-hosted/zir.zig b/src-self-hosted/zir.zig index 2a4db02c19..8faf248636 100644 --- a/src-self-hosted/zir.zig +++ b/src-self-hosted/zir.zig @@ -655,7 +655,7 @@ pub const Module = struct { /// The allocator is used for temporary storage, but this function always returns /// with no resources allocated. - pub fn writeToStream(self: Module, allocator: *Allocator, stream: var) !void { + pub fn writeToStream(self: Module, allocator: *Allocator, stream: anytype) !void { var write = Writer{ .module = &self, .inst_table = InstPtrTable.init(allocator), @@ -686,7 +686,6 @@ pub const Module = struct { try stream.writeByte('\n'); } } - }; const InstPtrTable = std.AutoHashMap(*Inst, struct { inst: *Inst, index: ?usize, name: []const u8 }); @@ -700,7 +699,7 @@ const Writer = struct { fn writeInstToStream( self: *Writer, - stream: var, + stream: anytype, inst: *Inst, ) (@TypeOf(stream).Error || error{OutOfMemory})!void { // TODO I tried implementing this with an inline for loop and hit a compiler bug @@ -746,7 +745,7 @@ const Writer = struct { fn writeInstToStreamGeneric( self: *Writer, - stream: var, + stream: anytype, comptime inst_tag: Inst.Tag, base: *Inst, ) (@TypeOf(stream).Error || error{OutOfMemory})!void { @@ -783,7 +782,7 @@ const Writer = struct { try stream.writeByte(')'); } - fn writeParamToStream(self: *Writer, stream: var, param: var) !void { + fn writeParamToStream(self: *Writer, stream: anytype, param: anytype) !void { if (@typeInfo(@TypeOf(param)) == .Enum) { return stream.writeAll(@tagName(param)); } @@ -829,7 +828,7 @@ const Writer = struct { } } - fn writeInstParamToStream(self: *Writer, stream: var, inst: *Inst) !void { + fn writeInstParamToStream(self: *Writer, stream: anytype, inst: *Inst) !void { if (self.inst_table.get(inst)) |info| { if (info.index) |i| { try stream.print("%{}", .{info.index}); @@ -1062,7 +1061,7 @@ const Parser = struct { } } - fn fail(self: *Parser, comptime format: []const u8, args: var) InnerError { + fn fail(self: *Parser, comptime format: []const u8, args: anytype) InnerError { @setCold(true); self.error_msg = ErrorMsg{ .byte_offset = self.i, diff --git a/test/stage1/behavior/async_fn.zig b/test/stage1/behavior/async_fn.zig index 4214ed84d2..807e4c6275 100644 --- a/test/stage1/behavior/async_fn.zig +++ b/test/stage1/behavior/async_fn.zig @@ -1016,7 +1016,7 @@ test "@asyncCall using the result location inside the frame" { test "@TypeOf an async function call of generic fn with error union type" { const S = struct { - fn func(comptime x: var) anyerror!i32 { + fn func(comptime x: anytype) anyerror!i32 { const T = @TypeOf(async func(x)); comptime expect(T == @TypeOf(@frame()).Child); return undefined; @@ -1032,7 +1032,7 @@ test "using @TypeOf on a generic function call" { var buf: [100]u8 align(16) = undefined; - fn amain(x: var) void { + fn amain(x: anytype) void { if (x == 0) { global_ok = true; return; @@ -1057,7 +1057,7 @@ test "recursive call of await @asyncCall with struct return type" { var buf: [100]u8 align(16) = undefined; - fn amain(x: var) Foo { + fn amain(x: anytype) Foo { if (x == 0) { global_ok = true; return Foo{ .x = 1, .y = 2, .z = 3 }; @@ -1336,7 +1336,7 @@ test "async function passed 0-bit arg after non-0-bit arg" { bar(1, .{}) catch unreachable; } - fn bar(x: i32, args: var) anyerror!void { + fn bar(x: i32, args: anytype) anyerror!void { global_frame = @frame(); suspend; global_int = x; @@ -1357,7 +1357,7 @@ test "async function passed align(16) arg after align(8) arg" { bar(10, .{a}) catch unreachable; } - fn bar(x: u64, args: var) anyerror!void { + fn bar(x: u64, args: anytype) anyerror!void { expect(x == 10); global_frame = @frame(); suspend; diff --git a/test/stage1/behavior/bitcast.zig b/test/stage1/behavior/bitcast.zig index 009f4544ba..2a86044dc1 100644 --- a/test/stage1/behavior/bitcast.zig +++ b/test/stage1/behavior/bitcast.zig @@ -171,7 +171,7 @@ test "nested bitcast" { test "bitcast passed as tuple element" { const S = struct { - fn foo(args: var) void { + fn foo(args: anytype) void { comptime expect(@TypeOf(args[0]) == f32); expect(args[0] == 12.34); } @@ -181,7 +181,7 @@ test "bitcast passed as tuple element" { test "triple level result location with bitcast sandwich passed as tuple element" { const S = struct { - fn foo(args: var) void { + fn foo(args: anytype) void { comptime expect(@TypeOf(args[0]) == f64); expect(args[0] > 12.33 and args[0] < 12.35); } diff --git a/test/stage1/behavior/bugs/2114.zig b/test/stage1/behavior/bugs/2114.zig index ab32a22cf3..1034a256d3 100644 --- a/test/stage1/behavior/bugs/2114.zig +++ b/test/stage1/behavior/bugs/2114.zig @@ -2,7 +2,7 @@ const std = @import("std"); const expect = std.testing.expect; const math = std.math; -fn ctz(x: var) usize { +fn ctz(x: anytype) usize { return @ctz(@TypeOf(x), x); } diff --git a/test/stage1/behavior/bugs/3742.zig b/test/stage1/behavior/bugs/3742.zig index f09127a66f..bf6e1f5207 100644 --- a/test/stage1/behavior/bugs/3742.zig +++ b/test/stage1/behavior/bugs/3742.zig @@ -23,7 +23,7 @@ pub fn isCommand(comptime T: type) bool { } pub const ArgSerializer = struct { - pub fn serializeCommand(command: var) void { + pub fn serializeCommand(command: anytype) void { const CmdT = @TypeOf(command); if (comptime isCommand(CmdT)) { diff --git a/test/stage1/behavior/bugs/4328.zig b/test/stage1/behavior/bugs/4328.zig index 0196af1748..98ab7bd155 100644 --- a/test/stage1/behavior/bugs/4328.zig +++ b/test/stage1/behavior/bugs/4328.zig @@ -17,11 +17,11 @@ const S = extern struct { test "Extern function calls in @TypeOf" { const Test = struct { - fn test_fn_1(a: var, b: var) @TypeOf(printf("%d %s\n", a, b)) { + fn test_fn_1(a: anytype, b: anytype) @TypeOf(printf("%d %s\n", a, b)) { return 0; } - fn test_fn_2(a: var) @TypeOf((S{ .state = 0 }).s_do_thing(a)) { + fn test_fn_2(a: anytype) @TypeOf((S{ .state = 0 }).s_do_thing(a)) { return 1; } @@ -56,7 +56,7 @@ test "Extern function calls, dereferences and field access in @TypeOf" { return .{ .dummy_field = 0 }; } - fn test_fn_2(a: var) @TypeOf(fopen("test", "r").*.dummy_field) { + fn test_fn_2(a: anytype) @TypeOf(fopen("test", "r").*.dummy_field) { return 255; } @@ -68,4 +68,4 @@ test "Extern function calls, dereferences and field access in @TypeOf" { Test.doTheTest(); comptime Test.doTheTest(); -} \ No newline at end of file +} diff --git a/test/stage1/behavior/bugs/4769_a.zig b/test/stage1/behavior/bugs/4769_a.zig index ab0c01417a..8337712ea5 100644 --- a/test/stage1/behavior/bugs/4769_a.zig +++ b/test/stage1/behavior/bugs/4769_a.zig @@ -1 +1 @@ -// \ No newline at end of file +// diff --git a/test/stage1/behavior/bugs/4769_b.zig b/test/stage1/behavior/bugs/4769_b.zig index 23b2513f17..9d0f028e57 100644 --- a/test/stage1/behavior/bugs/4769_b.zig +++ b/test/stage1/behavior/bugs/4769_b.zig @@ -1 +1 @@ -//! \ No newline at end of file +//! diff --git a/test/stage1/behavior/byval_arg_var.zig b/test/stage1/behavior/byval_arg_var.zig index 3794a965c6..ec3d18a532 100644 --- a/test/stage1/behavior/byval_arg_var.zig +++ b/test/stage1/behavior/byval_arg_var.zig @@ -13,11 +13,11 @@ fn start() void { foo("string literal"); } -fn foo(x: var) void { +fn foo(x: anytype) void { bar(x); } -fn bar(x: var) void { +fn bar(x: anytype) void { result = x; } diff --git a/test/stage1/behavior/call.zig b/test/stage1/behavior/call.zig index 40b5be4cd3..4d05a83a39 100644 --- a/test/stage1/behavior/call.zig +++ b/test/stage1/behavior/call.zig @@ -57,7 +57,7 @@ test "tuple parameters" { test "comptime call with bound function as parameter" { const S = struct { - fn ReturnType(func: var) type { + fn ReturnType(func: anytype) type { return switch (@typeInfo(@TypeOf(func))) { .BoundFn => |info| info, else => unreachable, diff --git a/test/stage1/behavior/enum.zig b/test/stage1/behavior/enum.zig index b6cb86a363..765828f5ce 100644 --- a/test/stage1/behavior/enum.zig +++ b/test/stage1/behavior/enum.zig @@ -208,7 +208,7 @@ test "@tagName non-exhaustive enum" { comptime expect(mem.eql(u8, testEnumTagNameBare(NonExhaustive.B), "B")); } -fn testEnumTagNameBare(n: var) []const u8 { +fn testEnumTagNameBare(n: anytype) []const u8 { return @tagName(n); } diff --git a/test/stage1/behavior/error.zig b/test/stage1/behavior/error.zig index def7fd679d..975e08b04f 100644 --- a/test/stage1/behavior/error.zig +++ b/test/stage1/behavior/error.zig @@ -227,7 +227,7 @@ test "error: Infer error set from literals" { _ = comptime intLiteral("n") catch |err| handleErrors(err); } -fn handleErrors(err: var) noreturn { +fn handleErrors(err: anytype) noreturn { switch (err) { error.T => {}, } diff --git a/test/stage1/behavior/eval.zig b/test/stage1/behavior/eval.zig index 2af34eaf7e..17d5aafe06 100644 --- a/test/stage1/behavior/eval.zig +++ b/test/stage1/behavior/eval.zig @@ -670,10 +670,10 @@ fn loopNTimes(comptime n: usize) void { } test "variable inside inline loop that has different types on different iterations" { - testVarInsideInlineLoop(.{true, @as(u32, 42)}); + testVarInsideInlineLoop(.{ true, @as(u32, 42) }); } -fn testVarInsideInlineLoop(args: var) void { +fn testVarInsideInlineLoop(args: anytype) void { comptime var i = 0; inline while (i < args.len) : (i += 1) { const x = args[i]; @@ -814,17 +814,16 @@ test "two comptime calls with array default initialized to undefined" { dynamic_linker: DynamicLinker = DynamicLinker{}, pub fn parse() void { - var result: CrossTarget = .{ }; + var result: CrossTarget = .{}; result.getCpuArch(); } - pub fn getCpuArch(self: CrossTarget) void { } + pub fn getCpuArch(self: CrossTarget) void {} }; const DynamicLinker = struct { buffer: [255]u8 = undefined, }; - }; comptime { diff --git a/test/stage1/behavior/fn.zig b/test/stage1/behavior/fn.zig index c1e5459378..c9f7477ecf 100644 --- a/test/stage1/behavior/fn.zig +++ b/test/stage1/behavior/fn.zig @@ -104,7 +104,7 @@ test "number literal as an argument" { comptime numberLiteralArg(3); } -fn numberLiteralArg(a: var) void { +fn numberLiteralArg(a: anytype) void { expect(a == 3); } @@ -132,7 +132,7 @@ test "pass by non-copying value through var arg" { expect(addPointCoordsVar(Point{ .x = 1, .y = 2 }) == 3); } -fn addPointCoordsVar(pt: var) i32 { +fn addPointCoordsVar(pt: anytype) i32 { comptime expect(@TypeOf(pt) == Point); return pt.x + pt.y; } @@ -267,7 +267,7 @@ test "ability to give comptime types and non comptime types to same parameter" { expect(foo(i32) == 20); } - fn foo(arg: var) i32 { + fn foo(arg: anytype) i32 { if (@typeInfo(@TypeOf(arg)) == .Type and arg == i32) return 20; return 9 + arg; } diff --git a/test/stage1/behavior/generics.zig b/test/stage1/behavior/generics.zig index a5d2f9dabe..6b584e381d 100644 --- a/test/stage1/behavior/generics.zig +++ b/test/stage1/behavior/generics.zig @@ -47,7 +47,7 @@ comptime { expect(max_f64(1.2, 3.4) == 3.4); } -fn max_var(a: var, b: var) @TypeOf(a + b) { +fn max_var(a: anytype, b: anytype) @TypeOf(a + b) { return if (a > b) a else b; } @@ -133,15 +133,15 @@ fn getFirstByte(comptime T: type, mem: []const T) u8 { return getByte(@ptrCast(*const u8, &mem[0])); } -const foos = [_]fn (var) bool{ +const foos = [_]fn (anytype) bool{ foo1, foo2, }; -fn foo1(arg: var) bool { +fn foo1(arg: anytype) bool { return arg; } -fn foo2(arg: var) bool { +fn foo2(arg: anytype) bool { return !arg; } diff --git a/test/stage1/behavior/optional.zig b/test/stage1/behavior/optional.zig index 0003bb86e1..1dc33eb8ea 100644 --- a/test/stage1/behavior/optional.zig +++ b/test/stage1/behavior/optional.zig @@ -67,8 +67,20 @@ fn test_cmp_optional_non_optional() void { // test evaluation is always lexical // ensure that the optional isn't always computed before the non-optional var mutable_state: i32 = 0; - _ = blk1: { mutable_state += 1; break :blk1 @as(?f64, 10.0); } != blk2: { expect(mutable_state == 1); break :blk2 @as(f64, 5.0); }; - _ = blk1: { mutable_state += 1; break :blk1 @as(f64, 10.0); } != blk2: { expect(mutable_state == 2); break :blk2 @as(?f64, 5.0); }; + _ = blk1: { + mutable_state += 1; + break :blk1 @as(?f64, 10.0); + } != blk2: { + expect(mutable_state == 1); + break :blk2 @as(f64, 5.0); + }; + _ = blk1: { + mutable_state += 1; + break :blk1 @as(f64, 10.0); + } != blk2: { + expect(mutable_state == 2); + break :blk2 @as(?f64, 5.0); + }; } test "passing an optional integer as a parameter" { diff --git a/test/stage1/behavior/struct.zig b/test/stage1/behavior/struct.zig index 7b8690d604..2d83bd23bb 100644 --- a/test/stage1/behavior/struct.zig +++ b/test/stage1/behavior/struct.zig @@ -713,7 +713,7 @@ test "packed struct field passed to generic function" { a: u1, }; - fn genericReadPackedField(ptr: var) u5 { + fn genericReadPackedField(ptr: anytype) u5 { return ptr.*; } }; @@ -754,7 +754,7 @@ test "fully anonymous struct" { .s = "hi", }); } - fn dump(args: var) void { + fn dump(args: anytype) void { expect(args.int == 1234); expect(args.float == 12.34); expect(args.b); @@ -771,7 +771,7 @@ test "fully anonymous list literal" { fn doTheTest() void { dump(.{ @as(u32, 1234), @as(f64, 12.34), true, "hi" }); } - fn dump(args: var) void { + fn dump(args: anytype) void { expect(args.@"0" == 1234); expect(args.@"1" == 12.34); expect(args.@"2"); @@ -792,8 +792,8 @@ test "anonymous struct literal assigned to variable" { test "struct with var field" { const Point = struct { - x: var, - y: var, + x: anytype, + y: anytype, }; const pt = Point{ .x = 1, diff --git a/test/stage1/behavior/tuple.zig b/test/stage1/behavior/tuple.zig index 0b2fdfe4e0..299abec3c9 100644 --- a/test/stage1/behavior/tuple.zig +++ b/test/stage1/behavior/tuple.zig @@ -42,7 +42,7 @@ test "tuple multiplication" { comptime S.doTheTest(); const T = struct { - fn consume_tuple(tuple: var, len: usize) void { + fn consume_tuple(tuple: anytype, len: usize) void { expect(tuple.len == len); } @@ -82,7 +82,7 @@ test "tuple multiplication" { test "pass tuple to comptime var parameter" { const S = struct { - fn Foo(comptime args: var) void { + fn Foo(comptime args: anytype) void { expect(args[0] == 1); } diff --git a/test/stage1/behavior/type_info.zig b/test/stage1/behavior/type_info.zig index 2685a3552e..48263dba6f 100644 --- a/test/stage1/behavior/type_info.zig +++ b/test/stage1/behavior/type_info.zig @@ -385,7 +385,7 @@ test "@typeInfo does not force declarations into existence" { } test "defaut value for a var-typed field" { - const S = struct { x: var }; + const S = struct { x: anytype }; expect(@typeInfo(S).Struct.fields[0].default_value == null); } diff --git a/test/stage1/behavior/union.zig b/test/stage1/behavior/union.zig index 8555e712dd..da898347b9 100644 --- a/test/stage1/behavior/union.zig +++ b/test/stage1/behavior/union.zig @@ -296,7 +296,7 @@ const TaggedUnionWithAVoid = union(enum) { B: i32, }; -fn testTaggedUnionInit(x: var) bool { +fn testTaggedUnionInit(x: anytype) bool { const y = TaggedUnionWithAVoid{ .A = x }; return @as(@TagType(TaggedUnionWithAVoid), y) == TaggedUnionWithAVoid.A; } diff --git a/test/stage1/behavior/var_args.zig b/test/stage1/behavior/var_args.zig index 0c8674a7fb..eae8f8f888 100644 --- a/test/stage1/behavior/var_args.zig +++ b/test/stage1/behavior/var_args.zig @@ -1,6 +1,6 @@ const expect = @import("std").testing.expect; -fn add(args: var) i32 { +fn add(args: anytype) i32 { var sum = @as(i32, 0); { comptime var i: usize = 0; @@ -17,7 +17,7 @@ test "add arbitrary args" { expect(add(.{}) == 0); } -fn readFirstVarArg(args: var) void { +fn readFirstVarArg(args: anytype) void { const value = args[0]; } @@ -31,7 +31,7 @@ test "pass args directly" { expect(addSomeStuff(.{}) == 0); } -fn addSomeStuff(args: var) i32 { +fn addSomeStuff(args: anytype) i32 { return add(args); } @@ -47,7 +47,7 @@ test "runtime parameter before var args" { } } -fn extraFn(extra: u32, args: var) usize { +fn extraFn(extra: u32, args: anytype) usize { if (args.len >= 1) { expect(args[0] == false); } @@ -57,15 +57,15 @@ fn extraFn(extra: u32, args: var) usize { return args.len; } -const foos = [_]fn (var) bool{ +const foos = [_]fn (anytype) bool{ foo1, foo2, }; -fn foo1(args: var) bool { +fn foo1(args: anytype) bool { return true; } -fn foo2(args: var) bool { +fn foo2(args: anytype) bool { return false; } @@ -78,6 +78,6 @@ test "pass zero length array to var args param" { doNothingWithFirstArg(.{""}); } -fn doNothingWithFirstArg(args: var) void { +fn doNothingWithFirstArg(args: anytype) void { const a = args[0]; } diff --git a/test/stage1/behavior/vector.zig b/test/stage1/behavior/vector.zig index 851074e0d1..8bdffcd500 100644 --- a/test/stage1/behavior/vector.zig +++ b/test/stage1/behavior/vector.zig @@ -171,7 +171,7 @@ test "load vector elements via comptime index" { expect(v[1] == 2); expect(loadv(&v[2]) == 3); } - fn loadv(ptr: var) i32 { + fn loadv(ptr: anytype) i32 { return ptr.*; } }; @@ -194,7 +194,7 @@ test "store vector elements via comptime index" { storev(&v[0], 100); expect(v[0] == 100); } - fn storev(ptr: var, x: i32) void { + fn storev(ptr: anytype, x: i32) void { ptr.* = x; } }; @@ -392,7 +392,7 @@ test "vector shift operators" { if (builtin.os.tag == .wasi) return error.SkipZigTest; const S = struct { - fn doTheTestShift(x: var, y: var) void { + fn doTheTestShift(x: anytype, y: anytype) void { const N = @typeInfo(@TypeOf(x)).Array.len; const TX = @typeInfo(@TypeOf(x)).Array.child; const TY = @typeInfo(@TypeOf(y)).Array.child; @@ -409,7 +409,7 @@ test "vector shift operators" { expectEqual(x[i] << y[i], v); } } - fn doTheTestShiftExact(x: var, y: var, dir: enum { Left, Right }) void { + fn doTheTestShiftExact(x: anytype, y: anytype, dir: enum { Left, Right }) void { const N = @typeInfo(@TypeOf(x)).Array.len; const TX = @typeInfo(@TypeOf(x)).Array.child; const TY = @typeInfo(@TypeOf(y)).Array.child; -- cgit v1.2.3