From ce30357532395f46229052b5bcdc5f0a49d2b20d Mon Sep 17 00:00:00 2001 From: daurnimator Date: Mon, 15 Jun 2020 22:58:59 +1000 Subject: std: clean up debug stderr variables - stderr_file_writer was unused - stderr_stream was a pointer to a stream, rather than a stream - other functions assumed that getStderrStream has already been called --- lib/std/debug.zig | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) (limited to 'lib/std/debug.zig') diff --git a/lib/std/debug.zig b/lib/std/debug.zig index f339aa639b..916fb7ff2d 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -50,14 +50,10 @@ pub const LineInfo = struct { } }; -/// Tries to write to stderr, unbuffered, and ignores any error returned. -/// Does not append a newline. -var stderr_file: File = undefined; -var stderr_file_writer: File.Writer = undefined; - -var stderr_stream: ?*File.OutStream = null; var stderr_mutex = std.Mutex.init(); +/// Tries to write to stderr, unbuffered, and ignores any error returned. +/// Does not append a newline. pub fn warn(comptime fmt: []const u8, args: var) void { const held = stderr_mutex.acquire(); defer held.release(); @@ -65,16 +61,8 @@ pub fn warn(comptime fmt: []const u8, args: var) void { nosuspend stderr.print(fmt, args) catch return; } -pub fn getStderrStream() *File.OutStream { - if (stderr_stream) |st| { - return st; - } else { - stderr_file = io.getStdErr(); - stderr_file_writer = stderr_file.outStream(); - const st = &stderr_file_writer; - stderr_stream = st; - return st; - } +pub fn getStderrStream() File.OutStream { + return io.getStdErr().outStream(); } pub fn getStderrMutex() *std.Mutex { @@ -99,6 +87,7 @@ pub fn detectTTYConfig() TTY.Config { if (process.getEnvVarOwned(allocator, "ZIG_DEBUG_COLOR")) |_| { return .escape_codes; } else |_| { + const stderr_file = io.getStdErr(); if (stderr_file.supportsAnsiEscapeCodes()) { return .escape_codes; } else if (builtin.os.tag == .windows and stderr_file.isTty()) { @@ -458,6 +447,7 @@ pub const TTY = struct { .Reset => out_stream.writeAll(RESET) catch return, }, .windows_api => if (builtin.os.tag == .windows) { + const stderr_file = io.getStdErr(); const S = struct { var attrs: windows.WORD = undefined; var init_attrs = false; -- cgit v1.2.3 From af592f0ddd4448e746cf288b674c0199325598d5 Mon Sep 17 00:00:00 2001 From: daurnimator Date: Mon, 15 Jun 2020 23:51:25 +1000 Subject: std: remove std.debug.getStderrStream Rather than migrate to new 'writer' interface, just remove it --- lib/std/debug.zig | 16 ++++++---------- lib/std/json.zig | 2 +- 2 files changed, 7 insertions(+), 11 deletions(-) (limited to 'lib/std/debug.zig') diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 916fb7ff2d..591f2d1a80 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -57,14 +57,10 @@ var stderr_mutex = std.Mutex.init(); pub fn warn(comptime fmt: []const u8, args: var) void { const held = stderr_mutex.acquire(); defer held.release(); - const stderr = getStderrStream(); + const stderr = io.getStdErr().writer(); nosuspend stderr.print(fmt, args) catch return; } -pub fn getStderrStream() File.OutStream { - return io.getStdErr().outStream(); -} - pub fn getStderrMutex() *std.Mutex { return &stderr_mutex; } @@ -102,7 +98,7 @@ pub fn detectTTYConfig() TTY.Config { /// TODO multithreaded awareness pub fn dumpCurrentStackTrace(start_addr: ?usize) void { nosuspend { - const stderr = getStderrStream(); + const stderr = io.getStdErr().writer(); if (builtin.strip_debug_info) { stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return; return; @@ -123,7 +119,7 @@ pub fn dumpCurrentStackTrace(start_addr: ?usize) void { /// TODO multithreaded awareness pub fn dumpStackTraceFromBase(bp: usize, ip: usize) void { nosuspend { - const stderr = getStderrStream(); + const stderr = io.getStdErr().writer(); if (builtin.strip_debug_info) { stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return; return; @@ -193,7 +189,7 @@ pub fn captureStackTrace(first_address: ?usize, stack_trace: *builtin.StackTrace /// TODO multithreaded awareness pub fn dumpStackTrace(stack_trace: builtin.StackTrace) void { nosuspend { - const stderr = getStderrStream(); + const stderr = io.getStdErr().writer(); if (builtin.strip_debug_info) { stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return; return; @@ -261,7 +257,7 @@ pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, c const held = panic_mutex.acquire(); defer held.release(); - const stderr = getStderrStream(); + const stderr = io.getStdErr().writer(); stderr.print(format ++ "\n", args) catch os.abort(); if (trace) |t| { dumpStackTrace(t.*); @@ -286,7 +282,7 @@ pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, c // A panic happened while trying to print a previous panic message, // we're still holding the mutex but that's fine as we're going to // call abort() - const stderr = getStderrStream(); + const stderr = io.getStdErr().writer(); stderr.print("Panicked during a panic. Aborting.\n", .{}) catch os.abort(); }, else => { diff --git a/lib/std/json.zig b/lib/std/json.zig index eeceeac8a7..4acdbc7d1a 100644 --- a/lib/std/json.zig +++ b/lib/std/json.zig @@ -1288,7 +1288,7 @@ pub const Value = union(enum) { var held = std.debug.getStderrMutex().acquire(); defer held.release(); - const stderr = std.debug.getStderrStream(); + const stderr = io.getStdErr().writer(); std.json.stringify(self, std.json.StringifyOptions{ .whitespace = null }, stderr) catch return; } }; -- cgit v1.2.3 From 8e5393a779ee115846821e28bdb47affdf158992 Mon Sep 17 00:00:00 2001 From: Isaac Freund Date: Tue, 2 Jun 2020 18:43:27 +0200 Subject: Deprecate std.debug.warn --- lib/std/debug.zig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'lib/std/debug.zig') diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 591f2d1a80..f3c2cf3b31 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -52,8 +52,7 @@ pub const LineInfo = struct { var stderr_mutex = std.Mutex.init(); -/// Tries to write to stderr, unbuffered, and ignores any error returned. -/// Does not append a newline. +/// Deprecated. Use `std.log` functions for logging. pub fn warn(comptime fmt: []const u8, args: var) void { const held = stderr_mutex.acquire(); defer held.release(); -- cgit v1.2.3 From c3e0224792510e69763dbc6ba68794f9134925f2 Mon Sep 17 00:00:00 2001 From: Isaac Freund Date: Tue, 2 Jun 2020 19:22:22 +0200 Subject: Add std.debug.print for "printf debugging" --- lib/std/debug.zig | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'lib/std/debug.zig') diff --git a/lib/std/debug.zig b/lib/std/debug.zig index f3c2cf3b31..92b79be35c 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -52,8 +52,13 @@ pub const LineInfo = struct { var stderr_mutex = std.Mutex.init(); -/// Deprecated. Use `std.log` functions for logging. -pub fn warn(comptime fmt: []const u8, args: var) void { +/// Deprecated. Use `std.log` functions for logging or `std.debug.print` for +/// "printf debugging". +pub const warn = print; + +/// Print to stderr, unbuffered, and silently returning on failure. Intended +/// for use in "printf debugging." Use `std.log` functions for proper logging. +pub fn print(comptime fmt: []const u8, args: var) void { const held = stderr_mutex.acquire(); defer held.release(); const stderr = io.getStdErr().writer(); -- cgit v1.2.3 From b3b6ccba50ef7a683ad05546cba2b71e7d10489f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 3 Jul 2020 23:57:24 +0000 Subject: reimplement std.HashMap * breaking changes to the API. Some of the weird decisions from before are changed to what would be more expected. - `get` returns `?V`, use `getEntry` for the old API. - `put` returns `!void`, use `fetchPut` for the old API. * HashMap now has a comptime parameter of whether to store hashes with entries. AutoHashMap has heuristics on whether to set this parameter. For example, for integers, it is false, since equality checking is cheap, but for strings, it is true, since equality checking is probably expensive. * The implementation has a separate array for entry_index / distance_from_start_index. Entries no longer has holes; it is an ArrayList, and iteration is simpler and more cache coherent. This is inspired by Python's new dictionaries. * HashMap is separated into an "unmanaged" and a "managed" API. The unmanaged API is where the actual implementation is; the managed API wraps it and provides a more convenient API, storing the allocator. * Memory usage: When there are less than or equal to 8 entries, HashMap now incurs only a single pointer-size integer as overhead, opposed to using an ArrayList. * Since the entries array is separate from the indexes array, the holes in the indexes array take up less room than the holes in the entries array otherwise would. However the entries array also allocates additional capacity for appending into the array. * HashMap now maintains insertion order. Deletion performs a "swap remove". It's now possible to modify the HashMap while iterating. --- lib/std/array_list.zig | 16 + lib/std/debug.zig | 2 +- lib/std/hash_map.zig | 1058 ++++++++++++++++++++++++++++++++++-------------- 3 files changed, 766 insertions(+), 310 deletions(-) (limited to 'lib/std/debug.zig') diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index b57d051d2b..a68c1fa9d6 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -210,6 +210,14 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { self.capacity = new_len; } + /// Reduce length to `new_len`. + /// Invalidates element pointers. + /// Keeps capacity the same. + pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void { + assert(new_len <= self.items.len); + self.items.len = new_len; + } + pub fn ensureCapacity(self: *Self, new_capacity: usize) !void { var better_capacity = self.capacity; if (better_capacity >= new_capacity) return; @@ -432,6 +440,14 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ self.capacity = new_len; } + /// Reduce length to `new_len`. + /// Invalidates element pointers. + /// Keeps capacity the same. + pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void { + assert(new_len <= self.items.len); + self.items.len = new_len; + } + pub fn ensureCapacity(self: *Self, allocator: *Allocator, new_capacity: usize) !void { var better_capacity = self.capacity; if (better_capacity >= new_capacity) return; diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 92b79be35c..e9bafec94c 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -1278,7 +1278,7 @@ pub const DebugInfo = struct { else => return error.MissingDebugInfo, } - if (self.address_map.getValue(ctx.base_address)) |obj_di| { + if (self.address_map.get(ctx.base_address)) |obj_di| { return obj_di; } diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index bcd4280153..4b91a83ba2 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -9,17 +9,15 @@ const autoHash = std.hash.autoHash; const Wyhash = std.hash.Wyhash; const Allocator = mem.Allocator; const builtin = @import("builtin"); - -const want_modification_safety = std.debug.runtime_safety; -const debug_u32 = if (want_modification_safety) u32 else void; +const hash_map = @This(); pub fn AutoHashMap(comptime K: type, comptime V: type) type { - return HashMap(K, V, getAutoHashFn(K), getAutoEqlFn(K)); + return HashMap(K, V, getAutoHashFn(K), getAutoEqlFn(K), autoEqlIsCheap(K)); } /// Builtin hashmap for strings as keys. pub fn StringHashMap(comptime V: type) type { - return HashMap([]const u8, V, hashString, eqlString); + return HashMap([]const u8, V, hashString, eqlString, true); } pub fn eqlString(a: []const u8, b: []const u8) bool { @@ -30,422 +28,846 @@ pub fn hashString(s: []const u8) u32 { return @truncate(u32, std.hash.Wyhash.hash(0, s)); } -pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u32, comptime eql: fn (a: K, b: K) bool) type { +/// Insertion order is preserved. +/// Deletions perform a "swap removal" on the entries list. +/// Modifying the hash map while iterating is allowed, however one must understand +/// the (well defined) behavior when mixing insertions and deletions with iteration. +/// For a hash map that can be initialized directly that does not store an Allocator +/// field, see `HashMapUnmanaged`. +/// When `store_hash` is `false`, this data structure is biased towards cheap `eql` +/// functions. It does not store each item's hash in the table. Setting `store_hash` +/// to `true` incurs slightly more memory cost by storing each key's hash in the table +/// but only has to call `eql` for hash collisions. +pub fn HashMap( + comptime K: type, + comptime V: type, + comptime hash: fn (key: K) u32, + comptime eql: fn (a: K, b: K) bool, + comptime store_hash: bool, +) type { return struct { - entries: []Entry, - size: usize, - max_distance_from_start_index: usize, + unmanaged: Unmanaged, allocator: *Allocator, - /// This is used to detect bugs where a hashtable is edited while an iterator is running. - modification_count: debug_u32, - - const Self = @This(); - - /// A *KV is a mutable pointer into this HashMap's internal storage. - /// Modifying the key is undefined behavior. - /// Modifying the value is harmless. - /// *KV pointers become invalid whenever this HashMap is modified, - /// and then any access to the *KV is undefined behavior. - pub const KV = struct { - key: K, - value: V, - }; - - const Entry = struct { - used: bool, - distance_from_start_index: usize, - kv: KV, - }; - - pub const GetOrPutResult = struct { - kv: *KV, - found_existing: bool, - }; + pub const Unmanaged = HashMapUnmanaged(K, V, hash, eql, store_hash); + pub const Entry = Unmanaged.Entry; + pub const Hash = Unmanaged.Hash; + pub const GetOrPutResult = Unmanaged.GetOrPutResult; + /// Deprecated. Iterate using `items`. pub const Iterator = struct { hm: *const Self, - // how many items have we returned - count: usize, - // iterator through the entry array + /// Iterator through the entry array. index: usize, - // used to detect concurrent modification - initial_modification_count: debug_u32, - pub fn next(it: *Iterator) ?*KV { - if (want_modification_safety) { - assert(it.initial_modification_count == it.hm.modification_count); // concurrent modification - } - if (it.count >= it.hm.size) return null; - while (it.index < it.hm.entries.len) : (it.index += 1) { - const entry = &it.hm.entries[it.index]; - if (entry.used) { - it.index += 1; - it.count += 1; - return &entry.kv; - } - } - unreachable; // no next item + pub fn next(it: *Iterator) ?*Entry { + if (it.index >= it.hm.unmanaged.entries.items.len) return null; + const result = &it.hm.unmanaged.entries.items[it.index]; + it.index += 1; + return result; } - // Reset the iterator to the initial index + /// Reset the iterator to the initial index pub fn reset(it: *Iterator) void { - it.count = 0; it.index = 0; - // Resetting the modification count too - it.initial_modification_count = it.hm.modification_count; } }; + const Self = @This(); + const Index = Unmanaged.Index; + pub fn init(allocator: *Allocator) Self { - return Self{ - .entries = &[_]Entry{}, + return .{ + .unmanaged = .{}, .allocator = allocator, - .size = 0, - .max_distance_from_start_index = 0, - .modification_count = if (want_modification_safety) 0 else {}, }; } - pub fn deinit(hm: Self) void { - hm.allocator.free(hm.entries); + pub fn deinit(self: *Self) void { + self.unmanaged.deinit(self.allocator); + self.* = undefined; } - pub fn clear(hm: *Self) void { - for (hm.entries) |*entry| { - entry.used = false; - } - hm.size = 0; - hm.max_distance_from_start_index = 0; - hm.incrementModificationCount(); + pub fn clearRetainingCapacity(self: *Self) void { + return self.unmanaged.clearRetainingCapacity(); } + pub fn clearAndFree(self: *Self, allocator: *Allocator) void { + return self.unmanaged.clearAndFree(self.allocator); + } + + /// Deprecated. Use `items().len`. pub fn count(self: Self) usize { - return self.size; + return self.items().len; + } + + /// Deprecated. Iterate using `items`. + pub fn iterator(self: *const Self) Iterator { + return Iterator{ + .hm = self, + .index = 0, + }; } /// If key exists this function cannot fail. /// If there is an existing item with `key`, then the result - /// kv pointer points to it, and found_existing is true. + /// `Entry` pointer points to it, and found_existing is true. /// Otherwise, puts a new item with undefined value, and - /// the kv pointer points to it. Caller should then initialize - /// the data. + /// the `Entry` pointer points to it. Caller should then initialize + /// the value (but not the key). pub fn getOrPut(self: *Self, key: K) !GetOrPutResult { - // TODO this implementation can be improved - we should only - // have to hash once and find the entry once. - if (self.get(key)) |kv| { - return GetOrPutResult{ - .kv = kv, - .found_existing = true, - }; - } - self.incrementModificationCount(); - try self.autoCapacity(); - const put_result = self.internalPut(key); - assert(put_result.old_kv == null); - return GetOrPutResult{ - .kv = &put_result.new_entry.kv, - .found_existing = false, + return self.unmanaged.getOrPut(self.allocator, key); + } + + /// If there is an existing item with `key`, then the result + /// `Entry` pointer points to it, and found_existing is true. + /// Otherwise, puts a new item with undefined value, and + /// the `Entry` pointer points to it. Caller should then initialize + /// the value (but not the key). + /// If a new entry needs to be stored, this function asserts there + /// is enough capacity to store it. + pub fn getOrPutAssumeCapacity(self: *Self, key: K) GetOrPutResult { + return self.unmanaged.getOrPutAssumeCapacity(key); + } + + pub fn getOrPutValue(self: *Self, key: K, value: V) !*Entry { + return self.unmanaged.getOrPutValue(self.allocator, key, value); + } + + /// Increases capacity, guaranteeing that insertions up until the + /// `expected_count` will not cause an allocation, and therefore cannot fail. + pub fn ensureCapacity(self: *Self, new_capacity: usize) !void { + return self.unmanaged.ensureCapacity(self.allocator, new_capacity); + } + + /// Returns the number of total elements which may be present before it is + /// no longer guaranteed that no allocations will be performed. + pub fn capacity(self: *Self) usize { + return self.unmanaged.capacity(); + } + + /// Clobbers any existing data. To detect if a put would clobber + /// existing data, see `getOrPut`. + pub fn put(self: *Self, key: K, value: V) !void { + return self.unmanaged.put(self.allocator, key, value); + } + + /// Inserts a key-value pair into the hash map, asserting that no previous + /// entry with the same key is already present + pub fn putNoClobber(self: *Self, key: K, value: V) !void { + return self.unmanaged.putNoClobber(self.allocator, key, value); + } + + /// Asserts there is enough capacity to store the new key-value pair. + /// Clobbers any existing data. To detect if a put would clobber + /// existing data, see `getOrPutAssumeCapacity`. + pub fn putAssumeCapacity(self: *Self, key: K, value: V) void { + return self.unmanaged.putAssumeCapacity(key, value); + } + + /// Asserts there is enough capacity to store the new key-value pair. + /// Asserts that it does not clobber any existing data. + /// To detect if a put would clobber existing data, see `getOrPutAssumeCapacity`. + pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void { + return self.unmanaged.putAssumeCapacityNoClobber(key, value); + } + + /// Inserts a new `Entry` into the hash map, returning the previous one, if any. + pub fn fetchPut(self: *Self, key: K, value: V) !?Entry { + return self.unmanaged.fetchPut(self.allocator, key, value); + } + + /// Inserts a new `Entry` into the hash map, returning the previous one, if any. + /// If insertion happuns, asserts there is enough capacity without allocating. + pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?Entry { + return self.unmanaged.fetchPutAssumeCapacity(key, value); + } + + pub fn getEntry(self: Self, key: K) ?*Entry { + return self.unmanaged.getEntry(key); + } + + pub fn get(self: Self, key: K) ?V { + return self.unmanaged.get(key); + } + + pub fn contains(self: Self, key: K) bool { + return self.unmanaged.contains(key); + } + + /// If there is an `Entry` with a matching key, it is deleted from + /// the hash map, and then returned from this function. + pub fn remove(self: *Self, key: K) ?Entry { + return self.unmanaged.remove(key); + } + + /// Asserts there is an `Entry` with matching key, deletes it from the hash map, + /// and discards it. + pub fn removeAssertDiscard(self: *Self, key: K) void { + return self.unmanaged.removeAssertDiscard(key); + } + + pub fn items(self: Self) []Entry { + return self.unmanaged.items(); + } + + pub fn clone(self: Self) !Self { + var other = try self.unmanaged.clone(self.allocator); + return other.promote(self.allocator); + } + }; +} + +/// General purpose hash table. +/// Insertion order is preserved. +/// Deletions perform a "swap removal" on the entries list. +/// Modifying the hash map while iterating is allowed, however one must understand +/// the (well defined) behavior when mixing insertions and deletions with iteration. +/// This type does not store an Allocator field - the Allocator must be passed in +/// with each function call that requires it. See `HashMap` for a type that stores +/// an Allocator field for convenience. +/// Can be initialized directly using the default field values. +/// This type is designed to have low overhead for small numbers of entries. When +/// `store_hash` is `false` and the number of entries in the map is less than 9, +/// the overhead cost of using `HashMapUnmanaged` rather than `std.ArrayList` is +/// only a single pointer-sized integer. +/// When `store_hash` is `false`, this data structure is biased towards cheap `eql` +/// functions. It does not store each item's hash in the table. Setting `store_hash` +/// to `true` incurs slightly more memory cost by storing each key's hash in the table +/// but guarantees only one call to `eql` per insertion/deletion. +pub fn HashMapUnmanaged( + comptime K: type, + comptime V: type, + comptime hash: fn (key: K) u32, + comptime eql: fn (a: K, b: K) bool, + comptime store_hash: bool, +) type { + return struct { + /// It is permitted to access this field directly. + entries: std.ArrayListUnmanaged(Entry) = .{}, + + /// When entries length is less than `linear_scan_max`, this remains `null`. + /// Once entries length grows big enough, this field is allocated. There is + /// an IndexHeader followed by an array of Index(I) structs, where I is defined + /// by how many total indexes there are. + index_header: ?*IndexHeader = null, + + /// Modifying the key is illegal behavior. + /// Modifying the value is allowed. + /// Entry pointers become invalid whenever this HashMap is modified, + /// unless `ensureCapacity` was previously used. + pub const Entry = struct { + /// This field is `void` if `store_hash` is `false`. + hash: Hash, + key: K, + value: V, + }; + + pub const Hash = if (store_hash) u32 else void; + + pub const GetOrPutResult = struct { + entry: *Entry, + found_existing: bool, + }; + + pub const Managed = HashMap(K, V, hash, eql, store_hash); + + const Self = @This(); + + const linear_scan_max = 8; + + pub fn promote(self: Self, allocator: *Allocator) Managed { + return .{ + .unmanaged = self, + .allocator = allocator, }; } - pub fn getOrPutValue(self: *Self, key: K, value: V) !*KV { - const res = try self.getOrPut(key); - if (!res.found_existing) - res.kv.value = value; + pub fn deinit(self: *Self, allocator: *Allocator) void { + self.entries.deinit(allocator); + if (self.index_header) |header| { + header.free(allocator); + } + self.* = undefined; + } - return res.kv; + pub fn clearRetainingCapacity(self: *Self) void { + self.entries.items.len = 0; + if (self.header) |header| { + header.max_distance_from_start_index = 0; + const indexes = header.indexes(u8); + @memset(indexes.ptr, 0xff, indexes.len); + } } - fn optimizedCapacity(expected_count: usize) usize { - // ensure that the hash map will be at most 60% full if - // expected_count items are put into it - var optimized_capacity = expected_count * 5 / 3; - // an overflow here would mean the amount of memory required would not - // be representable in the address space - return math.ceilPowerOfTwo(usize, optimized_capacity) catch unreachable; + pub fn clearAndFree(self: *Self, allocator: *Allocator) void { + self.entries.shrink(allocator, 0); + if (self.header) |header| { + header.free(allocator); + self.header = null; + } } - /// Increases capacity so that the hash map will be at most - /// 60% full when expected_count items are put into it - pub fn ensureCapacity(self: *Self, expected_count: usize) !void { - if (expected_count == 0) return; - const optimized_capacity = optimizedCapacity(expected_count); - return self.ensureCapacityExact(optimized_capacity); + /// If key exists this function cannot fail. + /// If there is an existing item with `key`, then the result + /// `Entry` pointer points to it, and found_existing is true. + /// Otherwise, puts a new item with undefined value, and + /// the `Entry` pointer points to it. Caller should then initialize + /// the value (but not the key). + pub fn getOrPut(self: *Self, allocator: *Allocator, key: K) !GetOrPutResult { + self.ensureCapacity(allocator, self.entries.items.len + 1) catch |err| { + // "If key exists this function cannot fail." + return GetOrPutResult{ + .entry = self.getEntry(key) orelse return err, + .found_existing = true, + }; + }; + return self.getOrPutAssumeCapacity(key); } - /// Sets the capacity to the new capacity if the new - /// capacity is greater than the current capacity. - /// New capacity must be a power of two. - fn ensureCapacityExact(self: *Self, new_capacity: usize) !void { - // capacity must always be a power of two to allow for modulo - // optimization in the constrainIndex fn - assert(math.isPowerOfTwo(new_capacity)); + /// If there is an existing item with `key`, then the result + /// `Entry` pointer points to it, and found_existing is true. + /// Otherwise, puts a new item with undefined value, and + /// the `Entry` pointer points to it. Caller should then initialize + /// the value (but not the key). + /// If a new entry needs to be stored, this function asserts there + /// is enough capacity to store it. + pub fn getOrPutAssumeCapacity(self: *Self, key: K) GetOrPutResult { + const header = self.index_header orelse { + // Linear scan. + const h = if (store_hash) hash(key) else {}; + for (self.entries.items) |*item| { + if (item.hash == h and eql(key, item.key)) { + return GetOrPutResult{ + .entry = item, + .found_existing = true, + }; + } + } + const new_entry = self.entries.addOneAssumeCapacity(); + new_entry.* = .{ + .hash = if (store_hash) h else {}, + .key = key, + .value = undefined, + }; + return GetOrPutResult{ + .entry = new_entry, + .found_existing = false, + }; + }; - if (new_capacity <= self.entries.len) { - return; + switch (header.capacityIndexType()) { + .u8 => return self.getOrPutInternal(key, header, u8), + .u16 => return self.getOrPutInternal(key, header, u16), + .u32 => return self.getOrPutInternal(key, header, u32), + .usize => return self.getOrPutInternal(key, header, usize), } + } - const old_entries = self.entries; - try self.initCapacity(new_capacity); - self.incrementModificationCount(); - if (old_entries.len > 0) { - // dump all of the old elements into the new table - for (old_entries) |*old_entry| { - if (old_entry.used) { - self.internalPut(old_entry.kv.key).new_entry.kv.value = old_entry.kv.value; + pub fn getOrPutValue(self: *Self, allocator: *Allocator, key: K, value: V) !*Entry { + const res = try self.getOrPut(allocator, key); + if (!res.found_existing) + res.entry.value = value; + + return res.entry; + } + + /// Increases capacity, guaranteeing that insertions up until the + /// `expected_count` will not cause an allocation, and therefore cannot fail. + pub fn ensureCapacity(self: *Self, allocator: *Allocator, new_capacity: usize) !void { + try self.entries.ensureCapacity(allocator, new_capacity); + if (new_capacity <= linear_scan_max) return; + + // Resize if indexes would be more than 75% full. + const needed_len = new_capacity * 4 / 3; + if (self.index_header) |header| { + if (needed_len > header.indexes_len) { + var new_indexes_len = header.indexes_len; + while (true) { + new_indexes_len += new_indexes_len / 2 + 8; + if (new_indexes_len >= needed_len) break; } + const new_header = try IndexHeader.alloc(allocator, new_indexes_len); + self.insertAllEntriesIntoNewHeader(new_header); + header.free(allocator); + self.index_header = new_header; } - self.allocator.free(old_entries); + } else { + const header = try IndexHeader.alloc(allocator, needed_len); + self.insertAllEntriesIntoNewHeader(header); + self.index_header = header; } } - /// Returns the kv pair that was already there. - pub fn put(self: *Self, key: K, value: V) !?KV { - try self.autoCapacity(); - return putAssumeCapacity(self, key, value); + /// Returns the number of total elements which may be present before it is + /// no longer guaranteed that no allocations will be performed. + pub fn capacity(self: Self) usize { + const entry_cap = self.entries.capacity; + const header = self.index_header orelse return math.min(linear_scan_max, entry_cap); + const indexes_cap = (header.indexes_len + 1) * 3 / 4; + return math.min(entry_cap, indexes_cap); } - /// Calls put() and asserts that no kv pair is clobbered. - pub fn putNoClobber(self: *Self, key: K, value: V) !void { - assert((try self.put(key, value)) == null); + /// Clobbers any existing data. To detect if a put would clobber + /// existing data, see `getOrPut`. + pub fn put(self: *Self, allocator: *Allocator, key: K, value: V) !void { + const result = try self.getOrPut(allocator, key); + result.entry.value = value; } - pub fn putAssumeCapacity(self: *Self, key: K, value: V) ?KV { - assert(self.count() < self.entries.len); - self.incrementModificationCount(); + /// Inserts a key-value pair into the hash map, asserting that no previous + /// entry with the same key is already present + pub fn putNoClobber(self: *Self, allocator: *Allocator, key: K, value: V) !void { + const result = try self.getOrPut(allocator, key); + assert(!result.found_existing); + result.entry.value = value; + } - const put_result = self.internalPut(key); - put_result.new_entry.kv.value = value; - return put_result.old_kv; + /// Asserts there is enough capacity to store the new key-value pair. + /// Clobbers any existing data. To detect if a put would clobber + /// existing data, see `getOrPutAssumeCapacity`. + pub fn putAssumeCapacity(self: *Self, key: K, value: V) void { + const result = self.getOrPutAssumeCapacity(key); + result.entry.value = value; } + /// Asserts there is enough capacity to store the new key-value pair. + /// Asserts that it does not clobber any existing data. + /// To detect if a put would clobber existing data, see `getOrPutAssumeCapacity`. pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void { - assert(self.putAssumeCapacity(key, value) == null); + const result = self.getOrPutAssumeCapacity(key); + assert(!result.found_existing); + result.entry.value = value; } - pub fn get(hm: *const Self, key: K) ?*KV { - if (hm.entries.len == 0) { + /// Inserts a new `Entry` into the hash map, returning the previous one, if any. + pub fn fetchPut(self: *Self, allocator: *Allocator, key: K, value: V) !?Entry { + const gop = try self.getOrPut(allocator, key); + var result: ?Entry = null; + if (gop.found_existing) { + result = gop.entry.*; + } + gop.entry.value = value; + return result; + } + + /// Inserts a new `Entry` into the hash map, returning the previous one, if any. + /// If insertion happuns, asserts there is enough capacity without allocating. + pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?Entry { + const gop = self.getOrPutAssumeCapacity(key); + var result: ?Entry = null; + if (gop.found_existing) { + result = gop.entry.*; + } + gop.entry.value = value; + return result; + } + + pub fn getEntry(self: Self, key: K) ?*Entry { + const header = self.index_header orelse { + // Linear scan. + const h = if (store_hash) hash(key) else {}; + for (self.entries.items) |*item| { + if (item.hash == h and eql(key, item.key)) { + return item; + } + } return null; + }; + + switch (header.capacityIndexType()) { + .u8 => return self.getInternal(key, header, u8), + .u16 => return self.getInternal(key, header, u16), + .u32 => return self.getInternal(key, header, u32), + .usize => return self.getInternal(key, header, usize), } - return hm.internalGet(key); } - pub fn getValue(hm: *const Self, key: K) ?V { - return if (hm.get(key)) |kv| kv.value else null; + pub fn get(self: Self, key: K) ?V { + return if (self.getEntry(key)) |entry| entry.value else null; } - pub fn contains(hm: *const Self, key: K) bool { - return hm.get(key) != null; + pub fn contains(self: Self, key: K) bool { + return self.getEntry(key) != null; } - /// Returns any kv pair that was removed. - pub fn remove(hm: *Self, key: K) ?KV { - if (hm.entries.len == 0) return null; - hm.incrementModificationCount(); - const start_index = hm.keyToIndex(key); - { - var roll_over: usize = 0; - while (roll_over <= hm.max_distance_from_start_index) : (roll_over += 1) { - const index = hm.constrainIndex(start_index + roll_over); - var entry = &hm.entries[index]; - - if (!entry.used) return null; - - if (!eql(entry.kv.key, key)) continue; - - const removed_kv = entry.kv; - while (roll_over < hm.entries.len) : (roll_over += 1) { - const next_index = hm.constrainIndex(start_index + roll_over + 1); - const next_entry = &hm.entries[next_index]; - if (!next_entry.used or next_entry.distance_from_start_index == 0) { - entry.used = false; - hm.size -= 1; - return removed_kv; - } - entry.* = next_entry.*; - entry.distance_from_start_index -= 1; - entry = next_entry; + /// If there is an `Entry` with a matching key, it is deleted from + /// the hash map, and then returned from this function. + pub fn remove(self: *Self, key: K) ?Entry { + const header = self.index_header orelse { + // Linear scan. + const h = if (store_hash) hash(key) else {}; + for (self.entries.items) |item, i| { + if (item.hash == h and eql(key, item.key)) { + return self.entries.swapRemove(i); } - unreachable; // shifting everything in the table } + return null; + }; + switch (header.capacityIndexType()) { + .u8 => return self.removeInternal(key, header, u8), + .u16 => return self.removeInternal(key, header, u16), + .u32 => return self.removeInternal(key, header, u32), + .usize => return self.removeInternal(key, header, usize), } - return null; } - /// Calls remove(), asserts that a kv pair is removed, and discards it. - pub fn removeAssertDiscard(hm: *Self, key: K) void { - assert(hm.remove(key) != null); + /// Asserts there is an `Entry` with matching key, deletes it from the hash map, + /// and discards it. + pub fn removeAssertDiscard(self: *Self, key: K) void { + assert(self.remove(key) != null); } - pub fn iterator(hm: *const Self) Iterator { - return Iterator{ - .hm = hm, - .count = 0, - .index = 0, - .initial_modification_count = hm.modification_count, - }; + pub fn items(self: Self) []Entry { + return self.entries.items; } - pub fn clone(self: Self) !Self { - var other = Self.init(self.allocator); - try other.initCapacity(self.entries.len); - var it = self.iterator(); - while (it.next()) |entry| { - try other.putNoClobber(entry.key, entry.value); + pub fn clone(self: Self, allocator: *Allocator) !Self { + // TODO this can be made more efficient by directly allocating + // the memory slices and memcpying the elements. + var other = Self.init(); + try other.initCapacity(allocator, self.entries.len); + for (self.entries.items) |entry| { + other.putAssumeCapacityNoClobber(entry.key, entry.value); } return other; } - fn autoCapacity(self: *Self) !void { - if (self.entries.len == 0) { - return self.ensureCapacityExact(16); - } - // if we get too full (60%), double the capacity - if (self.size * 5 >= self.entries.len * 3) { - return self.ensureCapacityExact(self.entries.len * 2); - } - } + fn removeInternal(self: *Self, key: K, header: *IndexHeader, comptime I: type) ?Entry { + const indexes = header.indexes(I); + const h = hash(key); + const start_index = header.hashToIndex(h); + var roll_over: usize = 0; + while (roll_over <= header.max_distance_from_start_index) : (roll_over += 1) { + const index_index = (start_index + roll_over) % header.indexes_len; + var index = &indexes[index_index]; + if (index.isEmpty()) + return null; + + const entry = &self.entries.items[index.entry_index]; + + const hash_match = if (store_hash) h == entry.hash else true; + if (!hash_match or !eql(key, entry.key)) + continue; - fn initCapacity(hm: *Self, capacity: usize) !void { - hm.entries = try hm.allocator.alloc(Entry, capacity); - hm.size = 0; - hm.max_distance_from_start_index = 0; - for (hm.entries) |*entry| { - entry.used = false; + const removed_entry = self.entries.swapRemove(index.entry_index); + if (self.entries.items.len > 0 and self.entries.items.len != index.entry_index) { + // Because of the swap remove, now we need to update the index that was + // pointing to the last entry and is now pointing to this removed item slot. + self.updateEntryIndex(header, self.entries.items.len, index.entry_index, I, indexes); + } + + // Now we have to shift over the following indexes. + roll_over += 1; + while (roll_over < header.indexes_len) : (roll_over += 1) { + const next_index_index = (start_index + roll_over) % header.indexes_len; + const next_index = &indexes[next_index_index]; + if (next_index.isEmpty() or next_index.distance_from_start_index == 0) { + index.setEmpty(); + return removed_entry; + } + index.* = next_index.*; + index.distance_from_start_index -= 1; + index = next_index; + } + unreachable; } + return null; } - fn incrementModificationCount(hm: *Self) void { - if (want_modification_safety) { - hm.modification_count +%= 1; + fn updateEntryIndex( + self: *Self, + header: *IndexHeader, + old_entry_index: usize, + new_entry_index: usize, + comptime I: type, + indexes: []Index(I), + ) void { + const h = if (store_hash) self.entries.items[new_entry_index].hash else hash(self.entries.items[new_entry_index].key); + const start_index = header.hashToIndex(h); + var roll_over: usize = 0; + while (roll_over <= header.max_distance_from_start_index) : (roll_over += 1) { + const index_index = (start_index + roll_over) % header.indexes_len; + const index = &indexes[index_index]; + if (index.entry_index == old_entry_index) { + index.entry_index = @intCast(I, new_entry_index); + return; + } } + unreachable; } - const InternalPutResult = struct { - new_entry: *Entry, - old_kv: ?KV, - }; - - /// Returns a pointer to the new entry. - /// Asserts that there is enough space for the new item. - fn internalPut(self: *Self, orig_key: K) InternalPutResult { - var key = orig_key; - var value: V = undefined; - const start_index = self.keyToIndex(key); + /// Must ensureCapacity before calling this. + fn getOrPutInternal(self: *Self, key: K, header: *IndexHeader, comptime I: type) GetOrPutResult { + const indexes = header.indexes(I); + const h = hash(key); + const start_index = header.hashToIndex(h); var roll_over: usize = 0; var distance_from_start_index: usize = 0; - var got_result_entry = false; - var result = InternalPutResult{ - .new_entry = undefined, - .old_kv = null, - }; - while (roll_over < self.entries.len) : ({ + while (roll_over <= header.indexes_len) : ({ roll_over += 1; distance_from_start_index += 1; }) { - const index = self.constrainIndex(start_index + roll_over); - const entry = &self.entries[index]; - - if (entry.used and !eql(entry.kv.key, key)) { - if (entry.distance_from_start_index < distance_from_start_index) { - // robin hood to the rescue - const tmp = entry.*; - self.max_distance_from_start_index = math.max(self.max_distance_from_start_index, distance_from_start_index); - if (!got_result_entry) { - got_result_entry = true; - result.new_entry = entry; + const index_index = (start_index + roll_over) % header.indexes_len; + const index = indexes[index_index]; + if (index.isEmpty()) { + indexes[index_index] = .{ + .distance_from_start_index = @intCast(I, distance_from_start_index), + .entry_index = @intCast(I, self.entries.items.len), + }; + header.maybeBumpMax(distance_from_start_index); + const new_entry = self.entries.addOneAssumeCapacity(); + new_entry.* = .{ + .hash = if (store_hash) h else {}, + .key = key, + .value = undefined, + }; + return .{ + .found_existing = false, + .entry = new_entry, + }; + } + + // This pointer survives the following append because we call + // entries.ensureCapacity before getOrPutInternal. + const entry = &self.entries.items[index.entry_index]; + const hash_match = if (store_hash) h == entry.hash else true; + if (hash_match and eql(key, entry.key)) { + return .{ + .found_existing = true, + .entry = entry, + }; + } + if (index.distance_from_start_index < distance_from_start_index) { + // In this case, we did not find the item. We will put a new entry. + // However, we will use this index for the new entry, and move + // the previous index down the line, to keep the max_distance_from_start_index + // as small as possible. + indexes[index_index] = .{ + .distance_from_start_index = @intCast(I, distance_from_start_index), + .entry_index = @intCast(I, self.entries.items.len), + }; + header.maybeBumpMax(distance_from_start_index); + const new_entry = self.entries.addOneAssumeCapacity(); + new_entry.* = .{ + .hash = if (store_hash) h else {}, + .key = key, + .value = undefined, + }; + + distance_from_start_index = index.distance_from_start_index; + var prev_entry_index = index.entry_index; + + // Find somewhere to put the index we replaced by shifting + // following indexes backwards. + roll_over += 1; + distance_from_start_index += 1; + while (roll_over < header.indexes_len) : ({ + roll_over += 1; + distance_from_start_index += 1; + }) { + const next_index_index = (start_index + roll_over) % header.indexes_len; + const next_index = indexes[next_index_index]; + if (next_index.isEmpty()) { + header.maybeBumpMax(distance_from_start_index); + indexes[next_index_index] = .{ + .entry_index = prev_entry_index, + .distance_from_start_index = @intCast(I, distance_from_start_index), + }; + return .{ + .found_existing = false, + .entry = new_entry, + }; + } + if (next_index.distance_from_start_index < distance_from_start_index) { + header.maybeBumpMax(distance_from_start_index); + indexes[next_index_index] = .{ + .entry_index = prev_entry_index, + .distance_from_start_index = @intCast(I, distance_from_start_index), + }; + distance_from_start_index = next_index.distance_from_start_index; + prev_entry_index = next_index.entry_index; } - entry.* = Entry{ - .used = true, - .distance_from_start_index = distance_from_start_index, - .kv = KV{ - .key = key, - .value = value, - }, - }; - key = tmp.kv.key; - value = tmp.kv.value; - distance_from_start_index = tmp.distance_from_start_index; } - continue; + unreachable; } + } + unreachable; + } - if (entry.used) { - result.old_kv = entry.kv; - } else { - // adding an entry. otherwise overwriting old value with - // same key - self.size += 1; - } + fn getInternal(self: Self, key: K, header: *IndexHeader, comptime I: type) ?*Entry { + const indexes = header.indexes(I); + const h = hash(key); + const start_index = header.hashToIndex(h); + var roll_over: usize = 0; + while (roll_over <= header.max_distance_from_start_index) : (roll_over += 1) { + const index_index = (start_index + roll_over) % header.indexes_len; + const index = indexes[index_index]; + if (index.isEmpty()) + return null; + + const entry = &self.entries.items[index.entry_index]; + const hash_match = if (store_hash) h == entry.hash else true; + if (hash_match and eql(key, entry.key)) + return entry; + } + return null; + } - self.max_distance_from_start_index = math.max(distance_from_start_index, self.max_distance_from_start_index); - if (!got_result_entry) { - result.new_entry = entry; - } - entry.* = Entry{ - .used = true, - .distance_from_start_index = distance_from_start_index, - .kv = KV{ - .key = key, - .value = value, - }, - }; - return result; + fn insertAllEntriesIntoNewHeader(self: *Self, header: *IndexHeader) void { + switch (header.capacityIndexType()) { + .u8 => return self.insertAllEntriesIntoNewHeaderGeneric(header, u8), + .u16 => return self.insertAllEntriesIntoNewHeaderGeneric(header, u16), + .u32 => return self.insertAllEntriesIntoNewHeaderGeneric(header, u32), + .usize => return self.insertAllEntriesIntoNewHeaderGeneric(header, usize), } - unreachable; // put into a full map } - fn internalGet(hm: Self, key: K) ?*KV { - const start_index = hm.keyToIndex(key); - { + fn insertAllEntriesIntoNewHeaderGeneric(self: *Self, header: *IndexHeader, comptime I: type) void { + const indexes = header.indexes(I); + entry_loop: for (self.entries.items) |entry, i| { + const h = if (store_hash) entry.hash else hash(entry.key); + const start_index = header.hashToIndex(h); + var entry_index = i; var roll_over: usize = 0; - while (roll_over <= hm.max_distance_from_start_index) : (roll_over += 1) { - const index = hm.constrainIndex(start_index + roll_over); - const entry = &hm.entries[index]; - - if (!entry.used) return null; - if (eql(entry.kv.key, key)) return &entry.kv; + var distance_from_start_index: usize = 0; + while (roll_over < header.indexes_len) : ({ + roll_over += 1; + distance_from_start_index += 1; + }) { + const index_index = (start_index + roll_over) % header.indexes_len; + const next_index = indexes[index_index]; + if (next_index.isEmpty()) { + header.maybeBumpMax(distance_from_start_index); + indexes[index_index] = .{ + .distance_from_start_index = @intCast(I, distance_from_start_index), + .entry_index = @intCast(I, entry_index), + }; + continue :entry_loop; + } + if (next_index.distance_from_start_index < distance_from_start_index) { + header.maybeBumpMax(distance_from_start_index); + indexes[index_index] = .{ + .distance_from_start_index = @intCast(I, distance_from_start_index), + .entry_index = @intCast(I, entry_index), + }; + distance_from_start_index = next_index.distance_from_start_index; + entry_index = next_index.entry_index; + } } + unreachable; } - return null; } + }; +} + +const CapacityIndexType = enum { u8, u16, u32, usize }; - fn keyToIndex(hm: Self, key: K) usize { - return hm.constrainIndex(@as(usize, hash(key))); +fn capacityIndexType(indexes_len: usize) CapacityIndexType { + if (indexes_len < math.maxInt(u8)) + return .u8; + if (indexes_len < math.maxInt(u16)) + return .u16; + if (indexes_len < math.maxInt(u32)) + return .u32; + return .usize; +} + +fn capacityIndexSize(indexes_len: usize) usize { + switch (capacityIndexType(indexes_len)) { + .u8 => return @sizeOf(Index(u8)), + .u16 => return @sizeOf(Index(u16)), + .u32 => return @sizeOf(Index(u32)), + .usize => return @sizeOf(Index(usize)), + } +} + +fn Index(comptime I: type) type { + return extern struct { + entry_index: I, + distance_from_start_index: I, + + const Self = @This(); + + fn isEmpty(idx: Self) bool { + return idx.entry_index == math.maxInt(I); } - fn constrainIndex(hm: Self, i: usize) usize { - // this is an optimization for modulo of power of two integers; - // it requires hm.entries.len to always be a power of two - return i & (hm.entries.len - 1); + fn setEmpty(idx: *Self) void { + idx.entry_index = math.maxInt(I); } }; } +/// This struct is trailed by an array of `Index(I)`, where `I` +/// and the array length are determined by `indexes_len`. +const IndexHeader = struct { + max_distance_from_start_index: usize, + indexes_len: usize, + + fn hashToIndex(header: IndexHeader, h: u32) usize { + return @as(usize, h) % header.indexes_len; + } + + fn indexes(header: *IndexHeader, comptime I: type) []Index(I) { + const start = @ptrCast([*]Index(I), @ptrCast([*]u8, header) + @sizeOf(IndexHeader)); + return start[0..header.indexes_len]; + } + + fn capacityIndexType(header: IndexHeader) CapacityIndexType { + return hash_map.capacityIndexType(header.indexes_len); + } + + fn maybeBumpMax(header: *IndexHeader, distance_from_start_index: usize) void { + if (distance_from_start_index > header.max_distance_from_start_index) { + header.max_distance_from_start_index = distance_from_start_index; + } + } + + fn alloc(allocator: *Allocator, len: usize) !*IndexHeader { + const index_size = hash_map.capacityIndexSize(len); + const nbytes = @sizeOf(IndexHeader) + index_size * len; + const bytes = try allocator.allocAdvanced(u8, @alignOf(IndexHeader), nbytes, .exact); + @memset(bytes.ptr + @sizeOf(IndexHeader), 0xff, bytes.len - @sizeOf(IndexHeader)); + const result = @ptrCast(*IndexHeader, bytes.ptr); + result.* = .{ + .max_distance_from_start_index = 0, + .indexes_len = len, + }; + return result; + } + + fn free(header: *IndexHeader, allocator: *Allocator) void { + const index_size = hash_map.capacityIndexSize(header.indexes_len); + const ptr = @ptrCast([*]u8, header); + const slice = ptr[0 .. @sizeOf(IndexHeader) + header.indexes_len * index_size]; + allocator.free(slice); + } +}; + test "basic hash map usage" { var map = AutoHashMap(i32, i32).init(std.testing.allocator); defer map.deinit(); - testing.expect((try map.put(1, 11)) == null); - testing.expect((try map.put(2, 22)) == null); - testing.expect((try map.put(3, 33)) == null); - testing.expect((try map.put(4, 44)) == null); + testing.expect((try map.fetchPut(1, 11)) == null); + testing.expect((try map.fetchPut(2, 22)) == null); + testing.expect((try map.fetchPut(3, 33)) == null); + testing.expect((try map.fetchPut(4, 44)) == null); try map.putNoClobber(5, 55); - testing.expect((try map.put(5, 66)).?.value == 55); - testing.expect((try map.put(5, 55)).?.value == 66); + testing.expect((try map.fetchPut(5, 66)).?.value == 55); + testing.expect((try map.fetchPut(5, 55)).?.value == 66); const gop1 = try map.getOrPut(5); testing.expect(gop1.found_existing == true); - testing.expect(gop1.kv.value == 55); - gop1.kv.value = 77; - testing.expect(map.get(5).?.value == 77); + testing.expect(gop1.entry.value == 55); + gop1.entry.value = 77; + testing.expect(map.getEntry(5).?.value == 77); const gop2 = try map.getOrPut(99); testing.expect(gop2.found_existing == false); - gop2.kv.value = 42; - testing.expect(map.get(99).?.value == 42); + gop2.entry.value = 42; + testing.expect(map.getEntry(99).?.value == 42); const gop3 = try map.getOrPutValue(5, 5); testing.expect(gop3.value == 77); @@ -454,15 +876,15 @@ test "basic hash map usage" { testing.expect(gop4.value == 41); testing.expect(map.contains(2)); - testing.expect(map.get(2).?.value == 22); - testing.expect(map.getValue(2).? == 22); + testing.expect(map.getEntry(2).?.value == 22); + testing.expect(map.get(2).? == 22); const rmv1 = map.remove(2); testing.expect(rmv1.?.key == 2); testing.expect(rmv1.?.value == 22); testing.expect(map.remove(2) == null); + testing.expect(map.getEntry(2) == null); testing.expect(map.get(2) == null); - testing.expect(map.getValue(2) == null); map.removeAssertDiscard(3); } @@ -498,8 +920,8 @@ test "iterator hash map" { it.reset(); var count: usize = 0; - while (it.next()) |kv| : (count += 1) { - buffer[@intCast(usize, kv.key)] = kv.value; + while (it.next()) |entry| : (count += 1) { + buffer[@intCast(usize, entry.key)] = entry.value; } testing.expect(count == 3); testing.expect(it.next() == null); @@ -510,8 +932,8 @@ test "iterator hash map" { it.reset(); count = 0; - while (it.next()) |kv| { - buffer[@intCast(usize, kv.key)] = kv.value; + while (it.next()) |entry| { + buffer[@intCast(usize, entry.key)] = entry.value; count += 1; if (count >= 2) break; } @@ -531,14 +953,14 @@ test "ensure capacity" { defer map.deinit(); try map.ensureCapacity(20); - const initialCapacity = map.entries.len; - testing.expect(initialCapacity >= 20); + const initial_capacity = map.capacity(); + testing.expect(initial_capacity >= 20); var i: i32 = 0; while (i < 20) : (i += 1) { - testing.expect(map.putAssumeCapacity(i, i + 10) == null); + testing.expect(map.fetchPutAssumeCapacity(i, i + 10) == null); } // shouldn't resize from putAssumeCapacity - testing.expect(initialCapacity == map.entries.len); + testing.expect(initial_capacity == map.capacity()); } pub fn getHashPtrAddrFn(comptime K: type) (fn (K) u32) { @@ -575,6 +997,24 @@ pub fn getAutoEqlFn(comptime K: type) (fn (K, K) bool) { }.eql; } +pub fn autoEqlIsCheap(comptime K: type) bool { + return switch (@typeInfo(K)) { + .Bool, + .Int, + .Float, + .Pointer, + .ComptimeFloat, + .ComptimeInt, + .Enum, + .Fn, + .ErrorSet, + .AnyFrame, + .EnumLiteral, + => true, + else => false, + }; +} + pub fn getAutoHashStratFn(comptime K: type, comptime strategy: std.hash.Strategy) (fn (K) u32) { return struct { fn hash(key: K) u32 { -- cgit v1.2.3 From 3a89f214aa672c5844def1704845ad38ea60bdcd Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 4 Jul 2020 22:25:49 +0000 Subject: update more HashMap API usage --- doc/docgen.zig | 2 +- doc/langref.html.in | 14 +--- lib/std/debug.zig | 6 +- lib/std/hash_map.zig | 2 +- src-self-hosted/Module.zig | 197 +++++++++++++++++++------------------------- src-self-hosted/codegen.zig | 4 +- src-self-hosted/link.zig | 6 +- src-self-hosted/zir.zig | 34 ++++---- 8 files changed, 112 insertions(+), 153 deletions(-) (limited to 'lib/std/debug.zig') diff --git a/doc/docgen.zig b/doc/docgen.zig index 7886c7cc90..e2acfae768 100644 --- a/doc/docgen.zig +++ b/doc/docgen.zig @@ -392,7 +392,7 @@ fn genToc(allocator: *mem.Allocator, tokenizer: *Tokenizer) !Toc { .n = header_stack_size, }, }); - if (try urls.put(urlized, tag_token)) |entry| { + if (try urls.fetchPut(urlized, tag_token)) |entry| { parseError(tokenizer, tag_token, "duplicate header url: #{}", .{urlized}) catch {}; parseError(tokenizer, entry.value, "other tag here", .{}) catch {}; return error.ParseError; diff --git a/doc/langref.html.in b/doc/langref.html.in index dfbb93decf..d7b9de3c6e 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -5363,11 +5363,11 @@ const std = @import("std"); const assert = std.debug.assert; test "turn HashMap into a set with void" { - var map = std.HashMap(i32, void, hash_i32, eql_i32).init(std.testing.allocator); + var map = std.AutoHashMap(i32, void).init(std.testing.allocator); defer map.deinit(); - _ = try map.put(1, {}); - _ = try map.put(2, {}); + try map.put(1, {}); + try map.put(2, {}); assert(map.contains(2)); assert(!map.contains(3)); @@ -5375,14 +5375,6 @@ test "turn HashMap into a set with void" { _ = map.remove(2); assert(!map.contains(2)); } - -fn hash_i32(x: i32) u32 { - return @bitCast(u32, x); -} - -fn eql_i32(a: i32, b: i32) bool { - return a == b; -} {#code_end#}

Note that this is different from using a dummy value for the hash map value. By using {#syntax#}void{#endsyntax#} as the type of the value, the hash map entry type has no value field, and diff --git a/lib/std/debug.zig b/lib/std/debug.zig index e9bafec94c..e6d0c17da4 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -1132,7 +1132,7 @@ pub const DebugInfo = struct { const seg_end = seg_start + segment_cmd.vmsize; if (rebased_address >= seg_start and rebased_address < seg_end) { - if (self.address_map.getValue(base_address)) |obj_di| { + if (self.address_map.get(base_address)) |obj_di| { return obj_di; } @@ -1204,7 +1204,7 @@ pub const DebugInfo = struct { const seg_end = seg_start + info.SizeOfImage; if (address >= seg_start and address < seg_end) { - if (self.address_map.getValue(seg_start)) |obj_di| { + if (self.address_map.get(seg_start)) |obj_di| { return obj_di; } @@ -1441,7 +1441,7 @@ pub const ModuleDebugInfo = switch (builtin.os.tag) { const o_file_path = mem.spanZ(self.strings[symbol.ofile.?.n_strx..]); // Check if its debug infos are already in the cache - var o_file_di = self.ofiles.getValue(o_file_path) orelse + var o_file_di = self.ofiles.get(o_file_path) orelse (self.loadOFile(o_file_path) catch |err| switch (err) { error.FileNotFound, error.MissingDebugInfo, diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index 0fe8ae34f1..aaec9a4d58 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -458,7 +458,7 @@ pub fn HashMapUnmanaged( } /// Inserts a new `Entry` into the hash map, returning the previous one, if any. - /// If insertion happuns, asserts there is enough capacity without allocating. + /// If insertion happens, asserts there is enough capacity without allocating. pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?Entry { const gop = self.getOrPutAssumeCapacity(key); var result: ?Entry = null; diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig index 7a61cd5ccd..0c80803fc7 100644 --- a/src-self-hosted/Module.zig +++ b/src-self-hosted/Module.zig @@ -75,7 +75,7 @@ deletion_set: std.ArrayListUnmanaged(*Decl) = .{}, keep_source_files_loaded: bool, -const DeclTable = std.HashMap(Scope.NameHash, *Decl, Scope.name_hash_hash, Scope.name_hash_eql); +const DeclTable = std.HashMap(Scope.NameHash, *Decl, Scope.name_hash_hash, Scope.name_hash_eql, false); const WorkItem = union(enum) { /// Write the machine code for a Decl to the output file. @@ -795,49 +795,38 @@ pub fn deinit(self: *Module) void { const allocator = self.allocator; self.deletion_set.deinit(allocator); self.work_queue.deinit(); - { - var it = self.decl_table.iterator(); - while (it.next()) |kv| { - kv.value.destroy(allocator); - } - self.decl_table.deinit(); + + for (self.decl_table.items()) |entry| { + entry.value.destroy(allocator); } - { - var it = self.failed_decls.iterator(); - while (it.next()) |kv| { - kv.value.destroy(allocator); - } - self.failed_decls.deinit(); + self.decl_table.deinit(); + + for (self.failed_decls.items()) |entry| { + entry.value.destroy(allocator); } - { - var it = self.failed_files.iterator(); - while (it.next()) |kv| { - kv.value.destroy(allocator); - } - self.failed_files.deinit(); + self.failed_decls.deinit(); + + for (self.failed_files.items()) |entry| { + entry.value.destroy(allocator); } - { - var it = self.failed_exports.iterator(); - while (it.next()) |kv| { - kv.value.destroy(allocator); - } - self.failed_exports.deinit(); + self.failed_files.deinit(); + + for (self.failed_exports.items()) |entry| { + entry.value.destroy(allocator); } - { - var it = self.decl_exports.iterator(); - while (it.next()) |kv| { - const export_list = kv.value; - allocator.free(export_list); - } - self.decl_exports.deinit(); + self.failed_exports.deinit(); + + for (self.decl_exports.items()) |entry| { + const export_list = entry.value; + allocator.free(export_list); } - { - var it = self.export_owners.iterator(); - while (it.next()) |kv| { - freeExportList(allocator, kv.value); - } - self.export_owners.deinit(); + self.decl_exports.deinit(); + + for (self.export_owners.items()) |entry| { + freeExportList(allocator, entry.value); } + self.export_owners.deinit(); + self.symbol_exports.deinit(); self.root_scope.destroy(allocator); self.* = undefined; @@ -918,9 +907,9 @@ pub fn makeBinFileWritable(self: *Module) !void { } pub fn totalErrorCount(self: *Module) usize { - const total = self.failed_decls.size + - self.failed_files.size + - self.failed_exports.size; + const total = self.failed_decls.items().len + + self.failed_files.items().len + + self.failed_exports.items().len; return if (total == 0) @boolToInt(self.link_error_flags.no_entry_point_found) else total; } @@ -931,32 +920,23 @@ pub fn getAllErrorsAlloc(self: *Module) !AllErrors { var errors = std.ArrayList(AllErrors.Message).init(self.allocator); defer errors.deinit(); - { - var it = self.failed_files.iterator(); - while (it.next()) |kv| { - const scope = kv.key; - const err_msg = kv.value; - const source = try scope.getSource(self); - try AllErrors.add(&arena, &errors, scope.subFilePath(), source, err_msg.*); - } + for (self.failed_files.items()) |entry| { + const scope = entry.key; + const err_msg = entry.value; + const source = try scope.getSource(self); + try AllErrors.add(&arena, &errors, scope.subFilePath(), source, err_msg.*); } - { - var it = self.failed_decls.iterator(); - while (it.next()) |kv| { - const decl = kv.key; - const err_msg = kv.value; - const source = try decl.scope.getSource(self); - try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*); - } + for (self.failed_decls.items()) |entry| { + const decl = entry.key; + const err_msg = entry.value; + const source = try decl.scope.getSource(self); + try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*); } - { - var it = self.failed_exports.iterator(); - while (it.next()) |kv| { - const decl = kv.key.owner_decl; - const err_msg = kv.value; - const source = try decl.scope.getSource(self); - try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*); - } + for (self.failed_exports.items()) |entry| { + const decl = entry.key.owner_decl; + const err_msg = entry.value; + const source = try decl.scope.getSource(self); + try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*); } if (errors.items.len == 0 and self.link_error_flags.no_entry_point_found) { @@ -1016,7 +996,7 @@ pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void { decl.analysis = .dependency_failure; }, else => { - try self.failed_decls.ensureCapacity(self.failed_decls.size + 1); + try self.failed_decls.ensureCapacity(self.failed_decls.items().len + 1); self.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create( self.allocator, decl.src(), @@ -1086,7 +1066,7 @@ fn ensureDeclAnalyzed(self: *Module, decl: *Decl) InnerError!void { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => return error.AnalysisFail, else => { - try self.failed_decls.ensureCapacity(self.failed_decls.size + 1); + try self.failed_decls.ensureCapacity(self.failed_decls.items().len + 1); self.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create( self.allocator, decl.src(), @@ -1636,7 +1616,7 @@ fn declareDeclDependency(self: *Module, depender: *Decl, dependee: *Decl) !void fn getSrcModule(self: *Module, root_scope: *Scope.ZIRModule) !*zir.Module { switch (root_scope.status) { .never_loaded, .unloaded_success => { - try self.failed_files.ensureCapacity(self.failed_files.size + 1); + try self.failed_files.ensureCapacity(self.failed_files.items().len + 1); const source = try root_scope.getSource(self); @@ -1677,7 +1657,7 @@ fn getAstTree(self: *Module, root_scope: *Scope.File) !*ast.Tree { switch (root_scope.status) { .never_loaded, .unloaded_success => { - try self.failed_files.ensureCapacity(self.failed_files.size + 1); + try self.failed_files.ensureCapacity(self.failed_files.items().len + 1); const source = try root_scope.getSource(self); @@ -1745,8 +1725,7 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void { const name = tree.tokenSliceLoc(name_loc); const name_hash = root_scope.fullyQualifiedNameHash(name); const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl)); - if (self.decl_table.get(name_hash)) |kv| { - const decl = kv.value; + if (self.decl_table.get(name_hash)) |decl| { // Update the AST Node index of the decl, even if its contents are unchanged, it may // have been re-ordered. decl.src_index = decl_i; @@ -1774,14 +1753,11 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void { // TODO also look for global variable declarations // TODO also look for comptime blocks and exported globals } - { - // Handle explicitly deleted decls from the source code. Not to be confused - // with when we delete decls because they are no longer referenced. - var it = deleted_decls.iterator(); - while (it.next()) |kv| { - //std.debug.warn("noticed '{}' deleted from source\n", .{kv.key.name}); - try self.deleteDecl(kv.key); - } + // Handle explicitly deleted decls from the source code. Not to be confused + // with when we delete decls because they are no longer referenced. + for (deleted_decls.items()) |entry| { + //std.debug.warn("noticed '{}' deleted from source\n", .{entry.key.name}); + try self.deleteDecl(entry.key); } } @@ -1800,18 +1776,14 @@ fn analyzeRootZIRModule(self: *Module, root_scope: *Scope.ZIRModule) !void { // we know which ones have been deleted. var deleted_decls = std.AutoHashMap(*Decl, void).init(self.allocator); defer deleted_decls.deinit(); - try deleted_decls.ensureCapacity(self.decl_table.size); - { - var it = self.decl_table.iterator(); - while (it.next()) |kv| { - deleted_decls.putAssumeCapacityNoClobber(kv.value, {}); - } + try deleted_decls.ensureCapacity(self.decl_table.items().len); + for (self.decl_table.items()) |entry| { + deleted_decls.putAssumeCapacityNoClobber(entry.value, {}); } for (src_module.decls) |src_decl, decl_i| { const name_hash = root_scope.fullyQualifiedNameHash(src_decl.name); - if (self.decl_table.get(name_hash)) |kv| { - const decl = kv.value; + if (self.decl_table.get(name_hash)) |decl| { deleted_decls.removeAssertDiscard(decl); //std.debug.warn("'{}' contents: '{}'\n", .{ src_decl.name, src_decl.contents }); if (!srcHashEql(src_decl.contents_hash, decl.contents_hash)) { @@ -1835,14 +1807,11 @@ fn analyzeRootZIRModule(self: *Module, root_scope: *Scope.ZIRModule) !void { for (exports_to_resolve.items) |export_decl| { _ = try self.resolveZirDecl(&root_scope.base, export_decl); } - { - // Handle explicitly deleted decls from the source code. Not to be confused - // with when we delete decls because they are no longer referenced. - var it = deleted_decls.iterator(); - while (it.next()) |kv| { - //std.debug.warn("noticed '{}' deleted from source\n", .{kv.key.name}); - try self.deleteDecl(kv.key); - } + // Handle explicitly deleted decls from the source code. Not to be confused + // with when we delete decls because they are no longer referenced. + for (deleted_decls.items()) |entry| { + //std.debug.warn("noticed '{}' deleted from source\n", .{entry.key.name}); + try self.deleteDecl(entry.key); } } @@ -1888,7 +1857,7 @@ fn deleteDeclExports(self: *Module, decl: *Decl) void { const kv = self.export_owners.remove(decl) orelse return; for (kv.value) |exp| { - if (self.decl_exports.get(exp.exported_decl)) |decl_exports_kv| { + if (self.decl_exports.getEntry(exp.exported_decl)) |decl_exports_kv| { // Remove exports with owner_decl matching the regenerating decl. const list = decl_exports_kv.value; var i: usize = 0; @@ -1983,7 +1952,7 @@ fn createNewDecl( name_hash: Scope.NameHash, contents_hash: std.zig.SrcHash, ) !*Decl { - try self.decl_table.ensureCapacity(self.decl_table.size + 1); + try self.decl_table.ensureCapacity(self.decl_table.items().len + 1); const new_decl = try self.allocateNewDecl(scope, src_index, contents_hash); errdefer self.allocator.destroy(new_decl); new_decl.name = try mem.dupeZ(self.allocator, u8, decl_name); @@ -2043,7 +2012,7 @@ fn resolveZirDecl(self: *Module, scope: *Scope, src_decl: *zir.Decl) InnerError! fn resolveZirDeclHavingIndex(self: *Module, scope: *Scope, src_decl: *zir.Decl, src_index: usize) InnerError!*Decl { const name_hash = scope.namespace().fullyQualifiedNameHash(src_decl.name); - const decl = self.decl_table.getValue(name_hash).?; + const decl = self.decl_table.get(name_hash).?; decl.src_index = src_index; try self.ensureDeclAnalyzed(decl); return decl; @@ -2148,8 +2117,8 @@ fn analyzeExport(self: *Module, scope: *Scope, src: usize, symbol_name: []const else => return self.fail(scope, src, "unable to export type '{}'", .{typed_value.ty}), } - try self.decl_exports.ensureCapacity(self.decl_exports.size + 1); - try self.export_owners.ensureCapacity(self.export_owners.size + 1); + try self.decl_exports.ensureCapacity(self.decl_exports.items().len + 1); + try self.export_owners.ensureCapacity(self.export_owners.items().len + 1); const new_export = try self.allocator.create(Export); errdefer self.allocator.destroy(new_export); @@ -2168,23 +2137,23 @@ fn analyzeExport(self: *Module, scope: *Scope, src: usize, symbol_name: []const // Add to export_owners table. const eo_gop = self.export_owners.getOrPut(owner_decl) catch unreachable; if (!eo_gop.found_existing) { - eo_gop.kv.value = &[0]*Export{}; + eo_gop.entry.value = &[0]*Export{}; } - eo_gop.kv.value = try self.allocator.realloc(eo_gop.kv.value, eo_gop.kv.value.len + 1); - eo_gop.kv.value[eo_gop.kv.value.len - 1] = new_export; - errdefer eo_gop.kv.value = self.allocator.shrink(eo_gop.kv.value, eo_gop.kv.value.len - 1); + eo_gop.entry.value = try self.allocator.realloc(eo_gop.entry.value, eo_gop.entry.value.len + 1); + eo_gop.entry.value[eo_gop.entry.value.len - 1] = new_export; + errdefer eo_gop.entry.value = self.allocator.shrink(eo_gop.entry.value, eo_gop.entry.value.len - 1); // Add to exported_decl table. const de_gop = self.decl_exports.getOrPut(exported_decl) catch unreachable; if (!de_gop.found_existing) { - de_gop.kv.value = &[0]*Export{}; + de_gop.entry.value = &[0]*Export{}; } - de_gop.kv.value = try self.allocator.realloc(de_gop.kv.value, de_gop.kv.value.len + 1); - de_gop.kv.value[de_gop.kv.value.len - 1] = new_export; - errdefer de_gop.kv.value = self.allocator.shrink(de_gop.kv.value, de_gop.kv.value.len - 1); + de_gop.entry.value = try self.allocator.realloc(de_gop.entry.value, de_gop.entry.value.len + 1); + de_gop.entry.value[de_gop.entry.value.len - 1] = new_export; + errdefer de_gop.entry.value = self.allocator.shrink(de_gop.entry.value, de_gop.entry.value.len - 1); if (self.symbol_exports.get(symbol_name)) |_| { - try self.failed_exports.ensureCapacity(self.failed_exports.size + 1); + try self.failed_exports.ensureCapacity(self.failed_exports.items().len + 1); self.failed_exports.putAssumeCapacityNoClobber(new_export, try ErrorMsg.create( self.allocator, src, @@ -2197,10 +2166,10 @@ fn analyzeExport(self: *Module, scope: *Scope, src: usize, symbol_name: []const } try self.symbol_exports.putNoClobber(symbol_name, new_export); - self.bin_file.updateDeclExports(self, exported_decl, de_gop.kv.value) catch |err| switch (err) { + self.bin_file.updateDeclExports(self, exported_decl, de_gop.entry.value) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, else => { - try self.failed_exports.ensureCapacity(self.failed_exports.size + 1); + try self.failed_exports.ensureCapacity(self.failed_exports.items().len + 1); self.failed_exports.putAssumeCapacityNoClobber(new_export, try ErrorMsg.create( self.allocator, src, @@ -2494,7 +2463,7 @@ fn getNextAnonNameIndex(self: *Module) usize { fn lookupDeclName(self: *Module, scope: *Scope, ident_name: []const u8) ?*Decl { const namespace = scope.namespace(); const name_hash = namespace.fullyQualifiedNameHash(ident_name); - return self.decl_table.getValue(name_hash); + return self.decl_table.get(name_hash); } fn analyzeInstExport(self: *Module, scope: *Scope, export_inst: *zir.Inst.Export) InnerError!*Inst { @@ -3489,8 +3458,8 @@ fn failNode( fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *ErrorMsg) InnerError { { errdefer err_msg.destroy(self.allocator); - try self.failed_decls.ensureCapacity(self.failed_decls.size + 1); - try self.failed_files.ensureCapacity(self.failed_files.size + 1); + try self.failed_decls.ensureCapacity(self.failed_decls.items().len + 1); + try self.failed_files.ensureCapacity(self.failed_files.items().len + 1); } switch (scope.tag) { .decl => { diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig index 73758bda87..8885ed2825 100644 --- a/src-self-hosted/codegen.zig +++ b/src-self-hosted/codegen.zig @@ -705,7 +705,7 @@ const Function = struct { } fn resolveInst(self: *Function, inst: *ir.Inst) !MCValue { - if (self.inst_table.getValue(inst)) |mcv| { + if (self.inst_table.get(inst)) |mcv| { return mcv; } if (inst.cast(ir.Inst.Constant)) |const_inst| { @@ -713,7 +713,7 @@ const Function = struct { try self.inst_table.putNoClobber(inst, mcvalue); return mcvalue; } else { - return self.inst_table.getValue(inst).?; + return self.inst_table.get(inst).?; } } diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig index c6acf21b84..c615ad35fd 100644 --- a/src-self-hosted/link.zig +++ b/src-self-hosted/link.zig @@ -1071,7 +1071,7 @@ pub const ElfFile = struct { try self.file.?.pwriteAll(code, file_offset); // Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated. - const decl_exports = module.decl_exports.getValue(decl) orelse &[0]*Module.Export{}; + const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; return self.updateDeclExports(module, decl, decl_exports); } @@ -1093,7 +1093,7 @@ pub const ElfFile = struct { for (exports) |exp| { if (exp.options.section) |section_name| { if (!mem.eql(u8, section_name, ".text")) { - try module.failed_exports.ensureCapacity(module.failed_exports.size + 1); + try module.failed_exports.ensureCapacity(module.failed_exports.items().len + 1); module.failed_exports.putAssumeCapacityNoClobber( exp, try Module.ErrorMsg.create(self.allocator, 0, "Unimplemented: ExportOptions.section", .{}), @@ -1111,7 +1111,7 @@ pub const ElfFile = struct { }, .Weak => elf.STB_WEAK, .LinkOnce => { - try module.failed_exports.ensureCapacity(module.failed_exports.size + 1); + try module.failed_exports.ensureCapacity(module.failed_exports.items().len + 1); module.failed_exports.putAssumeCapacityNoClobber( exp, try Module.ErrorMsg.create(self.allocator, 0, "Unimplemented: GlobalLinkage.LinkOnce", .{}), diff --git a/src-self-hosted/zir.zig b/src-self-hosted/zir.zig index 92dbc66e2b..7dceaaea1b 100644 --- a/src-self-hosted/zir.zig +++ b/src-self-hosted/zir.zig @@ -758,7 +758,7 @@ pub const Module = struct { } fn writeInstParamToStream(self: Module, stream: var, inst: *Inst, inst_table: *const InstPtrTable) !void { - if (inst_table.getValue(inst)) |info| { + if (inst_table.get(inst)) |info| { if (info.index) |i| { try stream.print("%{}", .{info.index}); } else { @@ -843,7 +843,7 @@ const Parser = struct { skipSpace(self); const decl = try parseInstruction(self, &body_context, ident); const ident_index = body_context.instructions.items.len; - if (try body_context.name_map.put(ident, decl.inst)) |_| { + if (try body_context.name_map.fetchPut(ident, decl.inst)) |_| { return self.fail("redefinition of identifier '{}'", .{ident}); } try body_context.instructions.append(decl.inst); @@ -929,7 +929,7 @@ const Parser = struct { skipSpace(self); const decl = try parseInstruction(self, null, ident); const ident_index = self.decls.items.len; - if (try self.global_name_map.put(ident, decl.inst)) |_| { + if (try self.global_name_map.fetchPut(ident, decl.inst)) |_| { return self.fail("redefinition of identifier '{}'", .{ident}); } try self.decls.append(self.allocator, decl); @@ -1153,7 +1153,7 @@ const Parser = struct { else => continue, }; const ident = self.source[name_start..self.i]; - const kv = map.get(ident) orelse { + return map.get(ident) orelse { const bad_name = self.source[name_start - 1 .. self.i]; const src = name_start - 1; if (local_ref) { @@ -1172,7 +1172,6 @@ const Parser = struct { return &declval.base; } }; - return kv.value; } fn generateName(self: *Parser) ![]u8 { @@ -1219,13 +1218,12 @@ const EmitZIR = struct { // by the hash table. var src_decls = std.ArrayList(*IrModule.Decl).init(self.allocator); defer src_decls.deinit(); - try src_decls.ensureCapacity(self.old_module.decl_table.size); - try self.decls.ensureCapacity(self.allocator, self.old_module.decl_table.size); - try self.names.ensureCapacity(self.old_module.decl_table.size); + try src_decls.ensureCapacity(self.old_module.decl_table.items().len); + try self.decls.ensureCapacity(self.allocator, self.old_module.decl_table.items().len); + try self.names.ensureCapacity(self.old_module.decl_table.items().len); - var decl_it = self.old_module.decl_table.iterator(); - while (decl_it.next()) |kv| { - const decl = kv.value; + for (self.old_module.decl_table.items()) |entry| { + const decl = entry.value; src_decls.appendAssumeCapacity(decl); self.names.putAssumeCapacityNoClobber(mem.spanZ(decl.name), {}); } @@ -1248,7 +1246,7 @@ const EmitZIR = struct { .codegen_failure, .dependency_failure, .codegen_failure_retryable, - => if (self.old_module.failed_decls.getValue(ir_decl)) |err_msg| { + => if (self.old_module.failed_decls.get(ir_decl)) |err_msg| { const fail_inst = try self.arena.allocator.create(Inst.CompileError); fail_inst.* = .{ .base = .{ @@ -1270,7 +1268,7 @@ const EmitZIR = struct { continue; }, } - if (self.old_module.export_owners.getValue(ir_decl)) |exports| { + if (self.old_module.export_owners.get(ir_decl)) |exports| { for (exports) |module_export| { const symbol_name = try self.emitStringLiteral(module_export.src, module_export.options.name); const export_inst = try self.arena.allocator.create(Inst.Export); @@ -1314,7 +1312,7 @@ const EmitZIR = struct { try new_body.inst_table.putNoClobber(inst, new_inst); return new_inst; } else { - return new_body.inst_table.getValue(inst).?; + return new_body.inst_table.get(inst).?; } } @@ -1424,7 +1422,7 @@ const EmitZIR = struct { try self.emitBody(body, &inst_table, &instructions); }, .sema_failure => { - const err_msg = self.old_module.failed_decls.getValue(module_fn.owner_decl).?; + const err_msg = self.old_module.failed_decls.get(module_fn.owner_decl).?; const fail_inst = try self.arena.allocator.create(Inst.CompileError); fail_inst.* = .{ .base = .{ @@ -1841,7 +1839,7 @@ const EmitZIR = struct { self.next_auto_name += 1; const gop = try self.names.getOrPut(proposed_name); if (!gop.found_existing) { - gop.kv.value = {}; + gop.entry.value = {}; return proposed_name; } } @@ -1861,9 +1859,9 @@ const EmitZIR = struct { }, .kw_args = .{}, }; - gop.kv.value = try self.emitUnnamedDecl(&primitive_inst.base); + gop.entry.value = try self.emitUnnamedDecl(&primitive_inst.base); } - return gop.kv.value; + return gop.entry.value; } fn emitStringLiteral(self: *EmitZIR, src: usize, str: []const u8) !*Decl { -- cgit v1.2.3 From e85fe13e44b1e2957b9d90e19c171fdfa8cb5505 Mon Sep 17 00:00:00 2001 From: Vexu Date: Sat, 11 Jul 2020 14:09:04 +0300 Subject: run zig fmt on std lib and self hosted --- build.zig | 8 +- lib/std/array_list.zig | 2 +- lib/std/array_list_sentineled.zig | 4 +- lib/std/atomic/queue.zig | 4 +- lib/std/build.zig | 4 +- lib/std/build/emit_raw.zig | 2 +- lib/std/builtin.zig | 10 +- lib/std/c.zig | 2 +- lib/std/c/ast.zig | 14 +-- lib/std/cache_hash.zig | 2 +- lib/std/comptime_string_map.zig | 6 +- lib/std/crypto/benchmark.zig | 12 +-- lib/std/crypto/test.zig | 2 +- lib/std/debug.zig | 24 ++--- lib/std/debug/leb128.zig | 14 +-- lib/std/dwarf.zig | 18 ++-- lib/std/elf.zig | 4 +- lib/std/event/group.zig | 2 +- lib/std/fmt.zig | 70 ++++++------- lib/std/fs/wasi.zig | 2 +- lib/std/hash/auto_hash.zig | 16 +-- lib/std/hash/benchmark.zig | 4 +- lib/std/hash/cityhash.zig | 2 +- lib/std/hash/murmur.zig | 2 +- lib/std/heap.zig | 31 +++--- lib/std/heap/logging_allocator.zig | 4 +- lib/std/http/headers.zig | 2 +- lib/std/io/bit_reader.zig | 2 +- lib/std/io/bit_writer.zig | 4 +- lib/std/io/buffered_reader.zig | 2 +- lib/std/io/buffered_writer.zig | 2 +- lib/std/io/counting_writer.zig | 2 +- lib/std/io/fixed_buffer_stream.zig | 2 +- lib/std/io/multi_writer.zig | 2 +- lib/std/io/peek_stream.zig | 2 +- lib/std/io/serialization.zig | 14 +-- lib/std/io/writer.zig | 2 +- lib/std/json.zig | 16 +-- lib/std/json/write_stream.zig | 6 +- lib/std/log.zig | 18 ++-- lib/std/math.zig | 36 +++---- lib/std/math/acos.zig | 2 +- lib/std/math/acosh.zig | 2 +- lib/std/math/asin.zig | 2 +- lib/std/math/asinh.zig | 2 +- lib/std/math/atan.zig | 2 +- lib/std/math/atanh.zig | 2 +- lib/std/math/big/int.zig | 20 ++-- lib/std/math/big/rational.zig | 4 +- lib/std/math/cbrt.zig | 2 +- lib/std/math/ceil.zig | 2 +- lib/std/math/complex/abs.zig | 2 +- lib/std/math/complex/acos.zig | 2 +- lib/std/math/complex/acosh.zig | 2 +- lib/std/math/complex/arg.zig | 2 +- lib/std/math/complex/asin.zig | 2 +- lib/std/math/complex/asinh.zig | 2 +- lib/std/math/complex/atan.zig | 2 +- lib/std/math/complex/atanh.zig | 2 +- lib/std/math/complex/conj.zig | 2 +- lib/std/math/complex/cos.zig | 2 +- lib/std/math/complex/cosh.zig | 2 +- lib/std/math/complex/exp.zig | 2 +- lib/std/math/complex/ldexp.zig | 2 +- lib/std/math/complex/log.zig | 2 +- lib/std/math/complex/proj.zig | 2 +- lib/std/math/complex/sin.zig | 2 +- lib/std/math/complex/sinh.zig | 2 +- lib/std/math/complex/sqrt.zig | 2 +- lib/std/math/complex/tan.zig | 2 +- lib/std/math/complex/tanh.zig | 2 +- lib/std/math/cos.zig | 2 +- lib/std/math/cosh.zig | 2 +- lib/std/math/exp.zig | 2 +- lib/std/math/exp2.zig | 2 +- lib/std/math/expm1.zig | 2 +- lib/std/math/expo2.zig | 2 +- lib/std/math/fabs.zig | 2 +- lib/std/math/floor.zig | 2 +- lib/std/math/frexp.zig | 2 +- lib/std/math/ilogb.zig | 2 +- lib/std/math/isfinite.zig | 2 +- lib/std/math/isinf.zig | 6 +- lib/std/math/isnan.zig | 4 +- lib/std/math/isnormal.zig | 2 +- lib/std/math/ln.zig | 2 +- lib/std/math/log10.zig | 2 +- lib/std/math/log1p.zig | 2 +- lib/std/math/log2.zig | 2 +- lib/std/math/modf.zig | 2 +- lib/std/math/round.zig | 2 +- lib/std/math/scalbn.zig | 2 +- lib/std/math/signbit.zig | 2 +- lib/std/math/sin.zig | 2 +- lib/std/math/sinh.zig | 2 +- lib/std/math/sqrt.zig | 2 +- lib/std/math/tan.zig | 2 +- lib/std/math/tanh.zig | 2 +- lib/std/math/trunc.zig | 2 +- lib/std/mem.zig | 173 +++++++++++++++++---------------- lib/std/meta.zig | 10 +- lib/std/meta/trait.zig | 8 +- lib/std/net.zig | 6 +- lib/std/os.zig | 2 +- lib/std/os/uefi.zig | 2 +- lib/std/progress.zig | 4 +- lib/std/segmented_list.zig | 4 +- lib/std/sort.zig | 38 ++++---- lib/std/special/build_runner.zig | 4 +- lib/std/special/test_runner.zig | 4 +- lib/std/target.zig | 19 ++-- lib/std/testing.zig | 4 +- lib/std/thread.zig | 2 +- lib/std/zig/ast.zig | 16 +-- lib/std/zig/parse.zig | 2 +- lib/std/zig/string_literal.zig | 2 +- lib/std/zig/system.zig | 8 +- src-self-hosted/Module.zig | 10 +- src-self-hosted/codegen.zig | 21 ++-- src-self-hosted/dep_tokenizer.zig | 26 ++--- src-self-hosted/ir.zig | 2 +- src-self-hosted/libc_installation.zig | 4 +- src-self-hosted/link.zig | 8 +- src-self-hosted/liveness.zig | 2 +- src-self-hosted/main.zig | 2 +- src-self-hosted/print_targets.zig | 2 +- src-self-hosted/translate_c.zig | 18 ++-- src-self-hosted/type.zig | 3 +- src-self-hosted/value.zig | 2 +- src-self-hosted/zir.zig | 13 ++- test/stage1/behavior/async_fn.zig | 10 +- test/stage1/behavior/bitcast.zig | 4 +- test/stage1/behavior/bugs/2114.zig | 2 +- test/stage1/behavior/bugs/3742.zig | 2 +- test/stage1/behavior/bugs/4328.zig | 8 +- test/stage1/behavior/bugs/4769_a.zig | 2 +- test/stage1/behavior/bugs/4769_b.zig | 2 +- test/stage1/behavior/byval_arg_var.zig | 4 +- test/stage1/behavior/call.zig | 2 +- test/stage1/behavior/enum.zig | 2 +- test/stage1/behavior/error.zig | 2 +- test/stage1/behavior/eval.zig | 9 +- test/stage1/behavior/fn.zig | 6 +- test/stage1/behavior/generics.zig | 8 +- test/stage1/behavior/optional.zig | 16 ++- test/stage1/behavior/struct.zig | 10 +- test/stage1/behavior/tuple.zig | 4 +- test/stage1/behavior/type_info.zig | 2 +- test/stage1/behavior/union.zig | 2 +- test/stage1/behavior/var_args.zig | 16 +-- test/stage1/behavior/vector.zig | 8 +- 151 files changed, 534 insertions(+), 527 deletions(-) (limited to 'lib/std/debug.zig') diff --git a/build.zig b/build.zig index f9cd1b0dea..e4ed04ea16 100644 --- a/build.zig +++ b/build.zig @@ -153,7 +153,7 @@ pub fn build(b: *Builder) !void { test_step.dependOn(docs_step); } -fn dependOnLib(b: *Builder, lib_exe_obj: var, dep: LibraryDep) void { +fn dependOnLib(b: *Builder, lib_exe_obj: anytype, dep: LibraryDep) void { for (dep.libdirs.items) |lib_dir| { lib_exe_obj.addLibPath(lib_dir); } @@ -193,7 +193,7 @@ fn fileExists(filename: []const u8) !bool { return true; } -fn addCppLib(b: *Builder, lib_exe_obj: var, cmake_binary_dir: []const u8, lib_name: []const u8) void { +fn addCppLib(b: *Builder, lib_exe_obj: anytype, cmake_binary_dir: []const u8, lib_name: []const u8) void { lib_exe_obj.addObjectFile(fs.path.join(b.allocator, &[_][]const u8{ cmake_binary_dir, "zig_cpp", @@ -275,7 +275,7 @@ fn findLLVM(b: *Builder, llvm_config_exe: []const u8) !LibraryDep { return result; } -fn configureStage2(b: *Builder, exe: var, ctx: Context) !void { +fn configureStage2(b: *Builder, exe: anytype, ctx: Context) !void { exe.addIncludeDir("src"); exe.addIncludeDir(ctx.cmake_binary_dir); addCppLib(b, exe, ctx.cmake_binary_dir, "zig_cpp"); @@ -340,7 +340,7 @@ fn configureStage2(b: *Builder, exe: var, ctx: Context) !void { fn addCxxKnownPath( b: *Builder, ctx: Context, - exe: var, + exe: anytype, objname: []const u8, errtxt: ?[]const u8, ) !void { diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index d667bc4d17..4d8cdc200c 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -53,7 +53,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { /// Deprecated: use `items` field directly. /// Return contents as a slice. Only valid while the list /// doesn't change size. - pub fn span(self: var) @TypeOf(self.items) { + pub fn span(self: anytype) @TypeOf(self.items) { return self.items; } diff --git a/lib/std/array_list_sentineled.zig b/lib/std/array_list_sentineled.zig index b83cc4ad62..828be7462f 100644 --- a/lib/std/array_list_sentineled.zig +++ b/lib/std/array_list_sentineled.zig @@ -69,7 +69,7 @@ pub fn ArrayListSentineled(comptime T: type, comptime sentinel: T) type { } /// Only works when `T` is `u8`. - pub fn allocPrint(allocator: *Allocator, comptime format: []const u8, args: var) !Self { + pub fn allocPrint(allocator: *Allocator, comptime format: []const u8, args: anytype) !Self { const size = std.math.cast(usize, std.fmt.count(format, args)) catch |err| switch (err) { error.Overflow => return error.OutOfMemory, }; @@ -82,7 +82,7 @@ pub fn ArrayListSentineled(comptime T: type, comptime sentinel: T) type { self.list.deinit(); } - pub fn span(self: var) @TypeOf(self.list.items[0..:sentinel]) { + pub fn span(self: anytype) @TypeOf(self.list.items[0..:sentinel]) { return self.list.items[0..self.len() :sentinel]; } diff --git a/lib/std/atomic/queue.zig b/lib/std/atomic/queue.zig index d6d0b70754..880af37ef4 100644 --- a/lib/std/atomic/queue.zig +++ b/lib/std/atomic/queue.zig @@ -123,10 +123,10 @@ pub fn Queue(comptime T: type) type { /// Dumps the contents of the queue to `stream`. /// Up to 4 elements from the head are dumped and the tail of the queue is /// dumped as well. - pub fn dumpToStream(self: *Self, stream: var) !void { + pub fn dumpToStream(self: *Self, stream: anytype) !void { const S = struct { fn dumpRecursive( - s: var, + s: anytype, optional_node: ?*Node, indent: usize, comptime depth: comptime_int, diff --git a/lib/std/build.zig b/lib/std/build.zig index 2d5ec4bd91..19de76b00d 100644 --- a/lib/std/build.zig +++ b/lib/std/build.zig @@ -312,7 +312,7 @@ pub const Builder = struct { return write_file_step; } - pub fn addLog(self: *Builder, comptime format: []const u8, args: var) *LogStep { + pub fn addLog(self: *Builder, comptime format: []const u8, args: anytype) *LogStep { const data = self.fmt(format, args); const log_step = self.allocator.create(LogStep) catch unreachable; log_step.* = LogStep.init(self, data); @@ -883,7 +883,7 @@ pub const Builder = struct { return fs.path.resolve(self.allocator, &[_][]const u8{ self.build_root, rel_path }) catch unreachable; } - pub fn fmt(self: *Builder, comptime format: []const u8, args: var) []u8 { + pub fn fmt(self: *Builder, comptime format: []const u8, args: anytype) []u8 { return fmt_lib.allocPrint(self.allocator, format, args) catch unreachable; } diff --git a/lib/std/build/emit_raw.zig b/lib/std/build/emit_raw.zig index 746b0ac91b..058a4a64ff 100644 --- a/lib/std/build/emit_raw.zig +++ b/lib/std/build/emit_raw.zig @@ -126,7 +126,7 @@ const BinaryElfOutput = struct { return segment.p_offset <= section.elfOffset and (segment.p_offset + segment.p_filesz) >= (section.elfOffset + section.fileSize); } - fn sectionValidForOutput(shdr: var) bool { + fn sectionValidForOutput(shdr: anytype) bool { return shdr.sh_size > 0 and shdr.sh_type != elf.SHT_NOBITS and ((shdr.sh_flags & elf.SHF_ALLOC) == elf.SHF_ALLOC); } diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index 0c7e534bed..5eafc4e409 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -198,7 +198,7 @@ pub const TypeInfo = union(enum) { /// The type of the sentinel is the element type of the pointer, which is /// the value of the `child` field in this struct. However there is no way /// to refer to that type here, so we use `var`. - sentinel: var, + sentinel: anytype, /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. @@ -220,7 +220,7 @@ pub const TypeInfo = union(enum) { /// The type of the sentinel is the element type of the array, which is /// the value of the `child` field in this struct. However there is no way /// to refer to that type here, so we use `var`. - sentinel: var, + sentinel: anytype, }; /// This data structure is used by the Zig language code generation and @@ -237,7 +237,7 @@ pub const TypeInfo = union(enum) { name: []const u8, offset: ?comptime_int, field_type: type, - default_value: var, + default_value: anytype, }; /// This data structure is used by the Zig language code generation and @@ -328,7 +328,7 @@ pub const TypeInfo = union(enum) { /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Frame = struct { - function: var, + function: anytype, }; /// This data structure is used by the Zig language code generation and @@ -452,7 +452,7 @@ pub const Version = struct { self: Version, comptime fmt: []const u8, options: std.fmt.FormatOptions, - out_stream: var, + out_stream: anytype, ) !void { if (fmt.len == 0) { if (self.patch == 0) { diff --git a/lib/std/c.zig b/lib/std/c.zig index 28d4157d6a..e483b5b50f 100644 --- a/lib/std/c.zig +++ b/lib/std/c.zig @@ -27,7 +27,7 @@ pub usingnamespace switch (std.Target.current.os.tag) { else => struct {}, }; -pub fn getErrno(rc: var) u16 { +pub fn getErrno(rc: anytype) u16 { if (rc == -1) { return @intCast(u16, _errno().*); } else { diff --git a/lib/std/c/ast.zig b/lib/std/c/ast.zig index bb8c01f138..467050d57d 100644 --- a/lib/std/c/ast.zig +++ b/lib/std/c/ast.zig @@ -64,7 +64,7 @@ pub const Error = union(enum) { NothingDeclared: SimpleError("declaration doesn't declare anything"), QualifierIgnored: SingleTokenError("qualifier '{}' ignored"), - pub fn render(self: *const Error, tree: *Tree, stream: var) !void { + pub fn render(self: *const Error, tree: *Tree, stream: anytype) !void { switch (self.*) { .InvalidToken => |*x| return x.render(tree, stream), .ExpectedToken => |*x| return x.render(tree, stream), @@ -114,7 +114,7 @@ pub const Error = union(enum) { token: TokenIndex, expected_id: @TagType(Token.Id), - pub fn render(self: *const ExpectedToken, tree: *Tree, stream: var) !void { + pub fn render(self: *const ExpectedToken, tree: *Tree, stream: anytype) !void { const found_token = tree.tokens.at(self.token); if (found_token.id == .Invalid) { return stream.print("expected '{}', found invalid bytes", .{self.expected_id.symbol()}); @@ -129,7 +129,7 @@ pub const Error = union(enum) { token: TokenIndex, type_spec: *Node.TypeSpec, - pub fn render(self: *const ExpectedToken, tree: *Tree, stream: var) !void { + pub fn render(self: *const ExpectedToken, tree: *Tree, stream: anytype) !void { try stream.write("invalid type specifier '"); try type_spec.spec.print(tree, stream); const token_name = tree.tokens.at(self.token).id.symbol(); @@ -141,7 +141,7 @@ pub const Error = union(enum) { kw: TokenIndex, name: TokenIndex, - pub fn render(self: *const ExpectedToken, tree: *Tree, stream: var) !void { + pub fn render(self: *const ExpectedToken, tree: *Tree, stream: anytype) !void { return stream.print("must use '{}' tag to refer to type '{}'", .{ tree.slice(kw), tree.slice(name) }); } }; @@ -150,7 +150,7 @@ pub const Error = union(enum) { return struct { token: TokenIndex, - pub fn render(self: *const @This(), tree: *Tree, stream: var) !void { + pub fn render(self: *const @This(), tree: *Tree, stream: anytype) !void { const actual_token = tree.tokens.at(self.token); return stream.print(msg, .{actual_token.id.symbol()}); } @@ -163,7 +163,7 @@ pub const Error = union(enum) { token: TokenIndex, - pub fn render(self: *const ThisError, tokens: *Tree.TokenList, stream: var) !void { + pub fn render(self: *const ThisError, tokens: *Tree.TokenList, stream: anytype) !void { return stream.write(msg); } }; @@ -317,7 +317,7 @@ pub const Node = struct { sym_type: *Type, }, - pub fn print(self: *@This(), self: *const @This(), tree: *Tree, stream: var) !void { + pub fn print(self: *@This(), self: *const @This(), tree: *Tree, stream: anytype) !void { switch (self.spec) { .None => unreachable, .Void => |index| try stream.write(tree.slice(index)), diff --git a/lib/std/cache_hash.zig b/lib/std/cache_hash.zig index 257f407826..acaa5edc8d 100644 --- a/lib/std/cache_hash.zig +++ b/lib/std/cache_hash.zig @@ -70,7 +70,7 @@ pub const CacheHash = struct { /// Convert the input value into bytes and record it as a dependency of the /// process being cached - pub fn add(self: *CacheHash, val: var) void { + pub fn add(self: *CacheHash, val: anytype) void { assert(self.manifest_file == null); const valPtr = switch (@typeInfo(@TypeOf(val))) { diff --git a/lib/std/comptime_string_map.zig b/lib/std/comptime_string_map.zig index 3021f6bc1e..8cc5cac130 100644 --- a/lib/std/comptime_string_map.zig +++ b/lib/std/comptime_string_map.zig @@ -8,7 +8,7 @@ const mem = std.mem; /// `kvs` expects a list literal containing list literals or an array/slice of structs /// where `.@"0"` is the `[]const u8` key and `.@"1"` is the associated value of type `V`. /// TODO: https://github.com/ziglang/zig/issues/4335 -pub fn ComptimeStringMap(comptime V: type, comptime kvs: var) type { +pub fn ComptimeStringMap(comptime V: type, comptime kvs: anytype) type { const precomputed = comptime blk: { @setEvalBranchQuota(2000); const KV = struct { @@ -126,7 +126,7 @@ test "ComptimeStringMap slice of structs" { testMap(map); } -fn testMap(comptime map: var) void { +fn testMap(comptime map: anytype) void { std.testing.expectEqual(TestEnum.A, map.get("have").?); std.testing.expectEqual(TestEnum.B, map.get("nothing").?); std.testing.expect(null == map.get("missing")); @@ -165,7 +165,7 @@ test "ComptimeStringMap void value type, list literal of list literals" { testSet(map); } -fn testSet(comptime map: var) void { +fn testSet(comptime map: anytype) void { std.testing.expectEqual({}, map.get("have").?); std.testing.expectEqual({}, map.get("nothing").?); std.testing.expect(null == map.get("missing")); diff --git a/lib/std/crypto/benchmark.zig b/lib/std/crypto/benchmark.zig index 8c5d75f80a..f0f40bd231 100644 --- a/lib/std/crypto/benchmark.zig +++ b/lib/std/crypto/benchmark.zig @@ -29,7 +29,7 @@ const hashes = [_]Crypto{ Crypto{ .ty = crypto.Blake3, .name = "blake3" }, }; -pub fn benchmarkHash(comptime Hash: var, comptime bytes: comptime_int) !u64 { +pub fn benchmarkHash(comptime Hash: anytype, comptime bytes: comptime_int) !u64 { var h = Hash.init(); var block: [Hash.digest_length]u8 = undefined; @@ -56,7 +56,7 @@ const macs = [_]Crypto{ Crypto{ .ty = crypto.HmacSha256, .name = "hmac-sha256" }, }; -pub fn benchmarkMac(comptime Mac: var, comptime bytes: comptime_int) !u64 { +pub fn benchmarkMac(comptime Mac: anytype, comptime bytes: comptime_int) !u64 { std.debug.assert(32 >= Mac.mac_length and 32 >= Mac.minimum_key_length); var in: [1 * MiB]u8 = undefined; @@ -81,7 +81,7 @@ pub fn benchmarkMac(comptime Mac: var, comptime bytes: comptime_int) !u64 { const exchanges = [_]Crypto{Crypto{ .ty = crypto.X25519, .name = "x25519" }}; -pub fn benchmarkKeyExchange(comptime DhKeyExchange: var, comptime exchange_count: comptime_int) !u64 { +pub fn benchmarkKeyExchange(comptime DhKeyExchange: anytype, comptime exchange_count: comptime_int) !u64 { std.debug.assert(DhKeyExchange.minimum_key_length >= DhKeyExchange.secret_length); var in: [DhKeyExchange.minimum_key_length]u8 = undefined; @@ -166,21 +166,21 @@ pub fn main() !void { inline for (hashes) |H| { if (filter == null or std.mem.indexOf(u8, H.name, filter.?) != null) { const throughput = try benchmarkHash(H.ty, mode(32 * MiB)); - try stdout.print("{:>11}: {:5} MiB/s\n", .{H.name, throughput / (1 * MiB)}); + try stdout.print("{:>11}: {:5} MiB/s\n", .{ H.name, throughput / (1 * MiB) }); } } inline for (macs) |M| { if (filter == null or std.mem.indexOf(u8, M.name, filter.?) != null) { const throughput = try benchmarkMac(M.ty, mode(128 * MiB)); - try stdout.print("{:>11}: {:5} MiB/s\n", .{M.name, throughput / (1 * MiB)}); + try stdout.print("{:>11}: {:5} MiB/s\n", .{ M.name, throughput / (1 * MiB) }); } } inline for (exchanges) |E| { if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) { const throughput = try benchmarkKeyExchange(E.ty, mode(1000)); - try stdout.print("{:>11}: {:5} exchanges/s\n", .{E.name, throughput}); + try stdout.print("{:>11}: {:5} exchanges/s\n", .{ E.name, throughput }); } } } diff --git a/lib/std/crypto/test.zig b/lib/std/crypto/test.zig index 1ff326cf39..61260c7e39 100644 --- a/lib/std/crypto/test.zig +++ b/lib/std/crypto/test.zig @@ -4,7 +4,7 @@ const mem = std.mem; const fmt = std.fmt; // Hash using the specified hasher `H` asserting `expected == H(input)`. -pub fn assertEqualHash(comptime Hasher: var, comptime expected: []const u8, input: []const u8) void { +pub fn assertEqualHash(comptime Hasher: anytype, comptime expected: []const u8, input: []const u8) void { var h: [expected.len / 2]u8 = undefined; Hasher.hash(input, h[0..]); diff --git a/lib/std/debug.zig b/lib/std/debug.zig index e6d0c17da4..3346598ab7 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -58,7 +58,7 @@ pub const warn = print; /// Print to stderr, unbuffered, and silently returning on failure. Intended /// for use in "printf debugging." Use `std.log` functions for proper logging. -pub fn print(comptime fmt: []const u8, args: var) void { +pub fn print(comptime fmt: []const u8, args: anytype) void { const held = stderr_mutex.acquire(); defer held.release(); const stderr = io.getStdErr().writer(); @@ -223,7 +223,7 @@ pub fn assert(ok: bool) void { if (!ok) unreachable; // assertion failure } -pub fn panic(comptime format: []const u8, args: var) noreturn { +pub fn panic(comptime format: []const u8, args: anytype) noreturn { @setCold(true); // TODO: remove conditional once wasi / LLVM defines __builtin_return_address const first_trace_addr = if (builtin.os.tag == .wasi) null else @returnAddress(); @@ -241,7 +241,7 @@ var panic_mutex = std.Mutex.init(); /// This is used to catch and handle panics triggered by the panic handler. threadlocal var panic_stage: usize = 0; -pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: var) noreturn { +pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: anytype) noreturn { @setCold(true); if (enable_segfault_handler) { @@ -306,7 +306,7 @@ const RESET = "\x1b[0m"; pub fn writeStackTrace( stack_trace: builtin.StackTrace, - out_stream: var, + out_stream: anytype, allocator: *mem.Allocator, debug_info: *DebugInfo, tty_config: TTY.Config, @@ -384,7 +384,7 @@ pub const StackIterator = struct { }; pub fn writeCurrentStackTrace( - out_stream: var, + out_stream: anytype, debug_info: *DebugInfo, tty_config: TTY.Config, start_addr: ?usize, @@ -399,7 +399,7 @@ pub fn writeCurrentStackTrace( } pub fn writeCurrentStackTraceWindows( - out_stream: var, + out_stream: anytype, debug_info: *DebugInfo, tty_config: TTY.Config, start_addr: ?usize, @@ -435,7 +435,7 @@ pub const TTY = struct { // TODO give this a payload of file handle windows_api, - fn setColor(conf: Config, out_stream: var, color: Color) void { + fn setColor(conf: Config, out_stream: anytype, color: Color) void { nosuspend switch (conf) { .no_color => return, .escape_codes => switch (color) { @@ -555,7 +555,7 @@ fn machoSearchSymbols(symbols: []const MachoSymbol, address: usize) ?*const Mach } /// TODO resources https://github.com/ziglang/zig/issues/4353 -pub fn printSourceAtAddress(debug_info: *DebugInfo, out_stream: var, address: usize, tty_config: TTY.Config) !void { +pub fn printSourceAtAddress(debug_info: *DebugInfo, out_stream: anytype, address: usize, tty_config: TTY.Config) !void { const module = debug_info.getModuleForAddress(address) catch |err| switch (err) { error.MissingDebugInfo, error.InvalidDebugInfo => { return printLineInfo( @@ -586,13 +586,13 @@ pub fn printSourceAtAddress(debug_info: *DebugInfo, out_stream: var, address: us } fn printLineInfo( - out_stream: var, + out_stream: anytype, line_info: ?LineInfo, address: usize, symbol_name: []const u8, compile_unit_name: []const u8, tty_config: TTY.Config, - comptime printLineFromFile: var, + comptime printLineFromFile: anytype, ) !void { nosuspend { tty_config.setColor(out_stream, .White); @@ -820,7 +820,7 @@ fn readCoffDebugInfo(allocator: *mem.Allocator, coff_file: File) !ModuleDebugInf } } -fn readSparseBitVector(stream: var, allocator: *mem.Allocator) ![]usize { +fn readSparseBitVector(stream: anytype, allocator: *mem.Allocator) ![]usize { const num_words = try stream.readIntLittle(u32); var word_i: usize = 0; var list = ArrayList(usize).init(allocator); @@ -1004,7 +1004,7 @@ fn readMachODebugInfo(allocator: *mem.Allocator, macho_file: File) !ModuleDebugI }; } -fn printLineFromFileAnyOs(out_stream: var, line_info: LineInfo) !void { +fn printLineFromFileAnyOs(out_stream: anytype, line_info: LineInfo) !void { // Need this to always block even in async I/O mode, because this could potentially // be called from e.g. the event loop code crashing. var f = try fs.cwd().openFile(line_info.file_name, .{ .intended_io_mode = .blocking }); diff --git a/lib/std/debug/leb128.zig b/lib/std/debug/leb128.zig index 16a23da123..8149554246 100644 --- a/lib/std/debug/leb128.zig +++ b/lib/std/debug/leb128.zig @@ -3,7 +3,7 @@ const testing = std.testing; /// Read a single unsigned LEB128 value from the given reader as type T, /// or error.Overflow if the value cannot fit. -pub fn readULEB128(comptime T: type, reader: var) !T { +pub fn readULEB128(comptime T: type, reader: anytype) !T { const U = if (T.bit_count < 8) u8 else T; const ShiftT = std.math.Log2Int(U); @@ -33,7 +33,7 @@ pub fn readULEB128(comptime T: type, reader: var) !T { } /// Write a single unsigned integer as unsigned LEB128 to the given writer. -pub fn writeULEB128(writer: var, uint_value: var) !void { +pub fn writeULEB128(writer: anytype, uint_value: anytype) !void { const T = @TypeOf(uint_value); const U = if (T.bit_count < 8) u8 else T; var value = @intCast(U, uint_value); @@ -61,7 +61,7 @@ pub fn readULEB128Mem(comptime T: type, ptr: *[]const u8) !T { /// Write a single unsigned LEB128 integer to the given memory as unsigned LEB128, /// returning the number of bytes written. -pub fn writeULEB128Mem(ptr: []u8, uint_value: var) !usize { +pub fn writeULEB128Mem(ptr: []u8, uint_value: anytype) !usize { const T = @TypeOf(uint_value); const max_group = (T.bit_count + 6) / 7; var buf = std.io.fixedBufferStream(ptr); @@ -71,7 +71,7 @@ pub fn writeULEB128Mem(ptr: []u8, uint_value: var) !usize { /// Read a single signed LEB128 value from the given reader as type T, /// or error.Overflow if the value cannot fit. -pub fn readILEB128(comptime T: type, reader: var) !T { +pub fn readILEB128(comptime T: type, reader: anytype) !T { const S = if (T.bit_count < 8) i8 else T; const U = std.meta.Int(false, S.bit_count); const ShiftU = std.math.Log2Int(U); @@ -120,7 +120,7 @@ pub fn readILEB128(comptime T: type, reader: var) !T { } /// Write a single signed integer as signed LEB128 to the given writer. -pub fn writeILEB128(writer: var, int_value: var) !void { +pub fn writeILEB128(writer: anytype, int_value: anytype) !void { const T = @TypeOf(int_value); const S = if (T.bit_count < 8) i8 else T; const U = std.meta.Int(false, S.bit_count); @@ -152,7 +152,7 @@ pub fn readILEB128Mem(comptime T: type, ptr: *[]const u8) !T { /// Write a single signed LEB128 integer to the given memory as unsigned LEB128, /// returning the number of bytes written. -pub fn writeILEB128Mem(ptr: []u8, int_value: var) !usize { +pub fn writeILEB128Mem(ptr: []u8, int_value: anytype) !usize { const T = @TypeOf(int_value); var buf = std.io.fixedBufferStream(ptr); try writeILEB128(buf.writer(), int_value); @@ -295,7 +295,7 @@ test "deserialize unsigned LEB128" { try test_read_uleb128_seq(u64, 4, "\x81\x01\x3f\x80\x7f\x80\x80\x80\x00"); } -fn test_write_leb128(value: var) !void { +fn test_write_leb128(value: anytype) !void { const T = @TypeOf(value); const writeStream = if (T.is_signed) writeILEB128 else writeULEB128; diff --git a/lib/std/dwarf.zig b/lib/std/dwarf.zig index 24792c7ca0..1400442247 100644 --- a/lib/std/dwarf.zig +++ b/lib/std/dwarf.zig @@ -236,7 +236,7 @@ const LineNumberProgram = struct { } }; -fn readUnitLength(in_stream: var, endian: builtin.Endian, is_64: *bool) !u64 { +fn readUnitLength(in_stream: anytype, endian: builtin.Endian, is_64: *bool) !u64 { const first_32_bits = try in_stream.readInt(u32, endian); is_64.* = (first_32_bits == 0xffffffff); if (is_64.*) { @@ -249,7 +249,7 @@ fn readUnitLength(in_stream: var, endian: builtin.Endian, is_64: *bool) !u64 { } // TODO the nosuspends here are workarounds -fn readAllocBytes(allocator: *mem.Allocator, in_stream: var, size: usize) ![]u8 { +fn readAllocBytes(allocator: *mem.Allocator, in_stream: anytype, size: usize) ![]u8 { const buf = try allocator.alloc(u8, size); errdefer allocator.free(buf); if ((try nosuspend in_stream.read(buf)) < size) return error.EndOfFile; @@ -257,25 +257,25 @@ fn readAllocBytes(allocator: *mem.Allocator, in_stream: var, size: usize) ![]u8 } // TODO the nosuspends here are workarounds -fn readAddress(in_stream: var, endian: builtin.Endian, is_64: bool) !u64 { +fn readAddress(in_stream: anytype, endian: builtin.Endian, is_64: bool) !u64 { return nosuspend if (is_64) try in_stream.readInt(u64, endian) else @as(u64, try in_stream.readInt(u32, endian)); } -fn parseFormValueBlockLen(allocator: *mem.Allocator, in_stream: var, size: usize) !FormValue { +fn parseFormValueBlockLen(allocator: *mem.Allocator, in_stream: anytype, size: usize) !FormValue { const buf = try readAllocBytes(allocator, in_stream, size); return FormValue{ .Block = buf }; } // TODO the nosuspends here are workarounds -fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: var, endian: builtin.Endian, size: usize) !FormValue { +fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: anytype, endian: builtin.Endian, size: usize) !FormValue { const block_len = try nosuspend in_stream.readVarInt(usize, endian, size); return parseFormValueBlockLen(allocator, in_stream, block_len); } -fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: var, signed: bool, endian: builtin.Endian, comptime size: i32) !FormValue { +fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed: bool, endian: builtin.Endian, comptime size: i32) !FormValue { // TODO: Please forgive me, I've worked around zig not properly spilling some intermediate values here. // `nosuspend` should be removed from all the function calls once it is fixed. return FormValue{ @@ -302,7 +302,7 @@ fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: var, signed: boo } // TODO the nosuspends here are workarounds -fn parseFormValueRef(allocator: *mem.Allocator, in_stream: var, endian: builtin.Endian, size: i32) !FormValue { +fn parseFormValueRef(allocator: *mem.Allocator, in_stream: anytype, endian: builtin.Endian, size: i32) !FormValue { return FormValue{ .Ref = switch (size) { 1 => try nosuspend in_stream.readInt(u8, endian), @@ -316,7 +316,7 @@ fn parseFormValueRef(allocator: *mem.Allocator, in_stream: var, endian: builtin. } // TODO the nosuspends here are workarounds -fn parseFormValue(allocator: *mem.Allocator, in_stream: var, form_id: u64, endian: builtin.Endian, is_64: bool) anyerror!FormValue { +fn parseFormValue(allocator: *mem.Allocator, in_stream: anytype, form_id: u64, endian: builtin.Endian, is_64: bool) anyerror!FormValue { return switch (form_id) { FORM_addr => FormValue{ .Address = try readAddress(in_stream, endian, @sizeOf(usize) == 8) }, FORM_block1 => parseFormValueBlock(allocator, in_stream, endian, 1), @@ -670,7 +670,7 @@ pub const DwarfInfo = struct { } } - fn parseDie(di: *DwarfInfo, in_stream: var, abbrev_table: *const AbbrevTable, is_64: bool) !?Die { + fn parseDie(di: *DwarfInfo, in_stream: anytype, abbrev_table: *const AbbrevTable, is_64: bool) !?Die { const abbrev_code = try leb.readULEB128(u64, in_stream); if (abbrev_code == 0) return null; const table_entry = getAbbrevTableEntry(abbrev_table, abbrev_code) orelse return error.InvalidDebugInfo; diff --git a/lib/std/elf.zig b/lib/std/elf.zig index b6609d8b31..98508df190 100644 --- a/lib/std/elf.zig +++ b/lib/std/elf.zig @@ -517,7 +517,7 @@ pub fn readAllHeaders(allocator: *mem.Allocator, file: File) !AllHeaders { return hdrs; } -pub fn int(is_64: bool, need_bswap: bool, int_32: var, int_64: var) @TypeOf(int_64) { +pub fn int(is_64: bool, need_bswap: bool, int_32: anytype, int_64: anytype) @TypeOf(int_64) { if (is_64) { if (need_bswap) { return @byteSwap(@TypeOf(int_64), int_64); @@ -529,7 +529,7 @@ pub fn int(is_64: bool, need_bswap: bool, int_32: var, int_64: var) @TypeOf(int_ } } -pub fn int32(need_bswap: bool, int_32: var, comptime Int64: var) Int64 { +pub fn int32(need_bswap: bool, int_32: anytype, comptime Int64: anytype) Int64 { if (need_bswap) { return @byteSwap(@TypeOf(int_32), int_32); } else { diff --git a/lib/std/event/group.zig b/lib/std/event/group.zig index 61130b32cb..0dc6550218 100644 --- a/lib/std/event/group.zig +++ b/lib/std/event/group.zig @@ -65,7 +65,7 @@ pub fn Group(comptime ReturnType: type) type { /// allocated by the group and freed by `wait`. /// `func` must be async and have return type `ReturnType`. /// Thread-safe. - pub fn call(self: *Self, comptime func: var, args: var) error{OutOfMemory}!void { + pub fn call(self: *Self, comptime func: anytype, args: anytype) error{OutOfMemory}!void { var frame = try self.allocator.create(@TypeOf(@call(.{ .modifier = .async_kw }, func, args))); errdefer self.allocator.destroy(frame); const node = try self.allocator.create(AllocStack.Node); diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig index 2674ba485a..7415b1b520 100644 --- a/lib/std/fmt.zig +++ b/lib/std/fmt.zig @@ -76,9 +76,9 @@ fn peekIsAlign(comptime fmt: []const u8) bool { /// /// A user type may be a `struct`, `vector`, `union` or `enum` type. pub fn format( - writer: var, + writer: anytype, comptime fmt: []const u8, - args: var, + args: anytype, ) !void { const ArgSetType = u32; if (@typeInfo(@TypeOf(args)) != .Struct) { @@ -311,10 +311,10 @@ pub fn format( } pub fn formatType( - value: var, + value: anytype, comptime fmt: []const u8, options: FormatOptions, - writer: var, + writer: anytype, max_depth: usize, ) @TypeOf(writer).Error!void { if (comptime std.mem.eql(u8, fmt, "*")) { @@ -490,10 +490,10 @@ pub fn formatType( } fn formatValue( - value: var, + value: anytype, comptime fmt: []const u8, options: FormatOptions, - writer: var, + writer: anytype, ) !void { if (comptime std.mem.eql(u8, fmt, "B")) { return formatBytes(value, options, 1000, writer); @@ -511,10 +511,10 @@ fn formatValue( } pub fn formatIntValue( - value: var, + value: anytype, comptime fmt: []const u8, options: FormatOptions, - writer: var, + writer: anytype, ) !void { comptime var radix = 10; comptime var uppercase = false; @@ -551,10 +551,10 @@ pub fn formatIntValue( } fn formatFloatValue( - value: var, + value: anytype, comptime fmt: []const u8, options: FormatOptions, - writer: var, + writer: anytype, ) !void { if (fmt.len == 0 or comptime std.mem.eql(u8, fmt, "e")) { return formatFloatScientific(value, options, writer); @@ -569,7 +569,7 @@ pub fn formatText( bytes: []const u8, comptime fmt: []const u8, options: FormatOptions, - writer: var, + writer: anytype, ) !void { if (comptime std.mem.eql(u8, fmt, "s") or (fmt.len == 0)) { return formatBuf(bytes, options, writer); @@ -586,7 +586,7 @@ pub fn formatText( pub fn formatAsciiChar( c: u8, options: FormatOptions, - writer: var, + writer: anytype, ) !void { return writer.writeAll(@as(*const [1]u8, &c)); } @@ -594,7 +594,7 @@ pub fn formatAsciiChar( pub fn formatBuf( buf: []const u8, options: FormatOptions, - writer: var, + writer: anytype, ) !void { const width = options.width orelse buf.len; var padding = if (width > buf.len) (width - buf.len) else 0; @@ -626,9 +626,9 @@ pub fn formatBuf( // It should be the case that every full precision, printed value can be re-parsed back to the // same type unambiguously. pub fn formatFloatScientific( - value: var, + value: anytype, options: FormatOptions, - writer: var, + writer: anytype, ) !void { var x = @floatCast(f64, value); @@ -719,9 +719,9 @@ pub fn formatFloatScientific( // Print a float of the format x.yyyyy where the number of y is specified by the precision argument. // By default floats are printed at full precision (no rounding). pub fn formatFloatDecimal( - value: var, + value: anytype, options: FormatOptions, - writer: var, + writer: anytype, ) !void { var x = @as(f64, value); @@ -860,10 +860,10 @@ pub fn formatFloatDecimal( } pub fn formatBytes( - value: var, + value: anytype, options: FormatOptions, comptime radix: usize, - writer: var, + writer: anytype, ) !void { if (value == 0) { return writer.writeAll("0B"); @@ -901,11 +901,11 @@ pub fn formatBytes( } pub fn formatInt( - value: var, + value: anytype, base: u8, uppercase: bool, options: FormatOptions, - writer: var, + writer: anytype, ) !void { const int_value = if (@TypeOf(value) == comptime_int) blk: { const Int = math.IntFittingRange(value, value); @@ -921,11 +921,11 @@ pub fn formatInt( } fn formatIntSigned( - value: var, + value: anytype, base: u8, uppercase: bool, options: FormatOptions, - writer: var, + writer: anytype, ) !void { const new_options = FormatOptions{ .width = if (options.width) |w| (if (w == 0) 0 else w - 1) else null, @@ -948,11 +948,11 @@ fn formatIntSigned( } fn formatIntUnsigned( - value: var, + value: anytype, base: u8, uppercase: bool, options: FormatOptions, - writer: var, + writer: anytype, ) !void { assert(base >= 2); var buf: [math.max(@TypeOf(value).bit_count, 1)]u8 = undefined; @@ -990,7 +990,7 @@ fn formatIntUnsigned( } } -pub fn formatIntBuf(out_buf: []u8, value: var, base: u8, uppercase: bool, options: FormatOptions) usize { +pub fn formatIntBuf(out_buf: []u8, value: anytype, base: u8, uppercase: bool, options: FormatOptions) usize { var fbs = std.io.fixedBufferStream(out_buf); formatInt(value, base, uppercase, options, fbs.writer()) catch unreachable; return fbs.pos; @@ -1050,7 +1050,7 @@ fn parseWithSign( .Pos => math.add, .Neg => math.sub, }; - + var x: T = 0; for (buf) |c| { @@ -1132,14 +1132,14 @@ pub const BufPrintError = error{ /// As much as possible was written to the buffer, but it was too small to fit all the printed bytes. NoSpaceLeft, }; -pub fn bufPrint(buf: []u8, comptime fmt: []const u8, args: var) BufPrintError![]u8 { +pub fn bufPrint(buf: []u8, comptime fmt: []const u8, args: anytype) BufPrintError![]u8 { var fbs = std.io.fixedBufferStream(buf); try format(fbs.writer(), fmt, args); return fbs.getWritten(); } // Count the characters needed for format. Useful for preallocating memory -pub fn count(comptime fmt: []const u8, args: var) u64 { +pub fn count(comptime fmt: []const u8, args: anytype) u64 { var counting_writer = std.io.countingWriter(std.io.null_writer); format(counting_writer.writer(), fmt, args) catch |err| switch (err) {}; return counting_writer.bytes_written; @@ -1147,7 +1147,7 @@ pub fn count(comptime fmt: []const u8, args: var) u64 { pub const AllocPrintError = error{OutOfMemory}; -pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: var) AllocPrintError![]u8 { +pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![]u8 { const size = math.cast(usize, count(fmt, args)) catch |err| switch (err) { // Output too long. Can't possibly allocate enough memory to display it. error.Overflow => return error.OutOfMemory, @@ -1158,7 +1158,7 @@ pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: var }; } -pub fn allocPrint0(allocator: *mem.Allocator, comptime fmt: []const u8, args: var) AllocPrintError![:0]u8 { +pub fn allocPrint0(allocator: *mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![:0]u8 { const result = try allocPrint(allocator, fmt ++ "\x00", args); return result[0 .. result.len - 1 :0]; } @@ -1184,7 +1184,7 @@ test "bufPrintInt" { std.testing.expectEqualSlices(u8, "-42", bufPrintIntToSlice(buf, @as(i32, -42), 10, false, FormatOptions{ .width = 3 })); } -fn bufPrintIntToSlice(buf: []u8, value: var, base: u8, uppercase: bool, options: FormatOptions) []u8 { +fn bufPrintIntToSlice(buf: []u8, value: anytype, base: u8, uppercase: bool, options: FormatOptions) []u8 { return buf[0..formatIntBuf(buf, value, base, uppercase, options)]; } @@ -1452,7 +1452,7 @@ test "custom" { self: SelfType, comptime fmt: []const u8, options: FormatOptions, - writer: var, + writer: anytype, ) !void { if (fmt.len == 0 or comptime std.mem.eql(u8, fmt, "p")) { return std.fmt.format(writer, "({d:.3},{d:.3})", .{ self.x, self.y }); @@ -1573,7 +1573,7 @@ test "bytes.hex" { try testFmt("lowercase: 000ebabe\n", "lowercase: {x}\n", .{bytes_with_zeros}); } -fn testFmt(expected: []const u8, comptime template: []const u8, args: var) !void { +fn testFmt(expected: []const u8, comptime template: []const u8, args: anytype) !void { var buf: [100]u8 = undefined; const result = try bufPrint(buf[0..], template, args); if (mem.eql(u8, result, expected)) return; @@ -1669,7 +1669,7 @@ test "formatType max_depth" { self: SelfType, comptime fmt: []const u8, options: FormatOptions, - writer: var, + writer: anytype, ) !void { if (fmt.len == 0) { return std.fmt.format(writer, "({d:.3},{d:.3})", .{ self.x, self.y }); diff --git a/lib/std/fs/wasi.zig b/lib/std/fs/wasi.zig index db4317064d..4a897c78f7 100644 --- a/lib/std/fs/wasi.zig +++ b/lib/std/fs/wasi.zig @@ -29,7 +29,7 @@ pub const PreopenType = union(PreopenTypeTag) { } } - pub fn format(self: Self, comptime fmt: []const u8, options: std.fmt.FormatOptions, out_stream: var) !void { + pub fn format(self: Self, comptime fmt: []const u8, options: std.fmt.FormatOptions, out_stream: anytype) !void { try out_stream.print("PreopenType{{ ", .{}); switch (self) { PreopenType.Dir => |path| try out_stream.print(".Dir = '{}'", .{path}), diff --git a/lib/std/hash/auto_hash.zig b/lib/std/hash/auto_hash.zig index a33b23354b..a3e1a390c2 100644 --- a/lib/std/hash/auto_hash.zig +++ b/lib/std/hash/auto_hash.zig @@ -21,7 +21,7 @@ pub const HashStrategy = enum { }; /// Helper function to hash a pointer and mutate the strategy if needed. -pub fn hashPointer(hasher: var, key: var, comptime strat: HashStrategy) void { +pub fn hashPointer(hasher: anytype, key: anytype, comptime strat: HashStrategy) void { const info = @typeInfo(@TypeOf(key)); switch (info.Pointer.size) { @@ -53,7 +53,7 @@ pub fn hashPointer(hasher: var, key: var, comptime strat: HashStrategy) void { } /// Helper function to hash a set of contiguous objects, from an array or slice. -pub fn hashArray(hasher: var, key: var, comptime strat: HashStrategy) void { +pub fn hashArray(hasher: anytype, key: anytype, comptime strat: HashStrategy) void { switch (strat) { .Shallow => { // TODO detect via a trait when Key has no padding bits to @@ -73,7 +73,7 @@ pub fn hashArray(hasher: var, key: var, comptime strat: HashStrategy) void { /// Provides generic hashing for any eligible type. /// Strategy is provided to determine if pointers should be followed or not. -pub fn hash(hasher: var, key: var, comptime strat: HashStrategy) void { +pub fn hash(hasher: anytype, key: anytype, comptime strat: HashStrategy) void { const Key = @TypeOf(key); switch (@typeInfo(Key)) { .NoReturn, @@ -161,7 +161,7 @@ pub fn hash(hasher: var, key: var, comptime strat: HashStrategy) void { /// Provides generic hashing for any eligible type. /// Only hashes `key` itself, pointers are not followed. /// Slices are rejected to avoid ambiguity on the user's intention. -pub fn autoHash(hasher: var, key: var) void { +pub fn autoHash(hasher: anytype, key: anytype) void { const Key = @TypeOf(key); if (comptime meta.trait.isSlice(Key)) { comptime assert(@hasDecl(std, "StringHashMap")); // detect when the following message needs updated @@ -181,28 +181,28 @@ pub fn autoHash(hasher: var, key: var) void { const testing = std.testing; const Wyhash = std.hash.Wyhash; -fn testHash(key: var) u64 { +fn testHash(key: anytype) u64 { // Any hash could be used here, for testing autoHash. var hasher = Wyhash.init(0); hash(&hasher, key, .Shallow); return hasher.final(); } -fn testHashShallow(key: var) u64 { +fn testHashShallow(key: anytype) u64 { // Any hash could be used here, for testing autoHash. var hasher = Wyhash.init(0); hash(&hasher, key, .Shallow); return hasher.final(); } -fn testHashDeep(key: var) u64 { +fn testHashDeep(key: anytype) u64 { // Any hash could be used here, for testing autoHash. var hasher = Wyhash.init(0); hash(&hasher, key, .Deep); return hasher.final(); } -fn testHashDeepRecursive(key: var) u64 { +fn testHashDeepRecursive(key: anytype) u64 { // Any hash could be used here, for testing autoHash. var hasher = Wyhash.init(0); hash(&hasher, key, .DeepRecursive); diff --git a/lib/std/hash/benchmark.zig b/lib/std/hash/benchmark.zig index 0eb3a25fe1..5f8a15831c 100644 --- a/lib/std/hash/benchmark.zig +++ b/lib/std/hash/benchmark.zig @@ -88,7 +88,7 @@ const Result = struct { const block_size: usize = 8 * 8192; -pub fn benchmarkHash(comptime H: var, bytes: usize) !Result { +pub fn benchmarkHash(comptime H: anytype, bytes: usize) !Result { var h = blk: { if (H.init_u8s) |init| { break :blk H.ty.init(init); @@ -119,7 +119,7 @@ pub fn benchmarkHash(comptime H: var, bytes: usize) !Result { }; } -pub fn benchmarkHashSmallKeys(comptime H: var, key_size: usize, bytes: usize) !Result { +pub fn benchmarkHashSmallKeys(comptime H: anytype, key_size: usize, bytes: usize) !Result { const key_count = bytes / key_size; var block: [block_size]u8 = undefined; prng.random.bytes(block[0..]); diff --git a/lib/std/hash/cityhash.zig b/lib/std/hash/cityhash.zig index a717303090..73b94acbd2 100644 --- a/lib/std/hash/cityhash.zig +++ b/lib/std/hash/cityhash.zig @@ -354,7 +354,7 @@ pub const CityHash64 = struct { } }; -fn SMHasherTest(comptime hash_fn: var, comptime hashbits: u32) u32 { +fn SMHasherTest(comptime hash_fn: anytype, comptime hashbits: u32) u32 { const hashbytes = hashbits / 8; var key: [256]u8 = undefined; var hashes: [hashbytes * 256]u8 = undefined; diff --git a/lib/std/hash/murmur.zig b/lib/std/hash/murmur.zig index 96efc8b9c1..effa13ad69 100644 --- a/lib/std/hash/murmur.zig +++ b/lib/std/hash/murmur.zig @@ -279,7 +279,7 @@ pub const Murmur3_32 = struct { } }; -fn SMHasherTest(comptime hash_fn: var, comptime hashbits: u32) u32 { +fn SMHasherTest(comptime hash_fn: anytype, comptime hashbits: u32) u32 { const hashbytes = hashbits / 8; var key: [256]u8 = undefined; var hashes: [hashbytes * 256]u8 = undefined; diff --git a/lib/std/heap.zig b/lib/std/heap.zig index ea9e95c675..ba96257557 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -15,15 +15,20 @@ pub const ArenaAllocator = @import("heap/arena_allocator.zig").ArenaAllocator; const Allocator = mem.Allocator; -usingnamespace if (comptime @hasDecl(c, "malloc_size")) struct { - pub const supports_malloc_size = true; - pub const malloc_size = c.malloc_size; -} else if (comptime @hasDecl(c, "malloc_usable_size")) struct { - pub const supports_malloc_size = true; - pub const malloc_size = c.malloc_usable_size; -} else struct { - pub const supports_malloc_size = false; -}; +usingnamespace if (comptime @hasDecl(c, "malloc_size")) + struct { + pub const supports_malloc_size = true; + pub const malloc_size = c.malloc_size; + } +else if (comptime @hasDecl(c, "malloc_usable_size")) + struct { + pub const supports_malloc_size = true; + pub const malloc_size = c.malloc_usable_size; + } +else + struct { + pub const supports_malloc_size = false; + }; pub const c_allocator = &c_allocator_state; var c_allocator_state = Allocator{ @@ -151,8 +156,7 @@ const PageAllocator = struct { } const maxDropLen = alignment - std.math.min(alignment, mem.page_size); - const allocLen = if (maxDropLen <= alignedLen - n) alignedLen - else mem.alignForward(alignedLen + maxDropLen, mem.page_size); + const allocLen = if (maxDropLen <= alignedLen - n) alignedLen else mem.alignForward(alignedLen + maxDropLen, mem.page_size); const slice = os.mmap( null, allocLen, @@ -331,8 +335,7 @@ const WasmPageAllocator = struct { fn alloc(allocator: *Allocator, len: usize, alignment: u29, len_align: u29) error{OutOfMemory}![]u8 { const page_count = nPages(len); const page_idx = try allocPages(page_count, alignment); - return @intToPtr([*]u8, page_idx * mem.page_size) - [0..alignPageAllocLen(page_count * mem.page_size, len, len_align)]; + return @intToPtr([*]u8, page_idx * mem.page_size)[0..alignPageAllocLen(page_count * mem.page_size, len, len_align)]; } fn allocPages(page_count: usize, alignment: u29) !usize { { @@ -452,7 +455,7 @@ pub const HeapAllocator = switch (builtin.os.tag) { fn resize(allocator: *Allocator, buf: []u8, new_size: usize, len_align: u29) error{OutOfMemory}!usize { const self = @fieldParentPtr(HeapAllocator, "allocator", allocator); if (new_size == 0) { - os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void ,getRecordPtr(buf).*)); + os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void, getRecordPtr(buf).*)); return 0; } diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig index b521515a79..d3055c75ee 100644 --- a/lib/std/heap/logging_allocator.zig +++ b/lib/std/heap/logging_allocator.zig @@ -40,7 +40,7 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type { if (new_len == 0) { self.out_stream.print("free : {}\n", .{buf.len}) catch {}; } else if (new_len <= buf.len) { - self.out_stream.print("shrink: {} to {}\n", .{buf.len, new_len}) catch {}; + self.out_stream.print("shrink: {} to {}\n", .{ buf.len, new_len }) catch {}; } else { self.out_stream.print("expand: {} to {}", .{ buf.len, new_len }) catch {}; } @@ -60,7 +60,7 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type { pub fn loggingAllocator( parent_allocator: *Allocator, - out_stream: var, + out_stream: anytype, ) LoggingAllocator(@TypeOf(out_stream)) { return LoggingAllocator(@TypeOf(out_stream)).init(parent_allocator, out_stream); } diff --git a/lib/std/http/headers.zig b/lib/std/http/headers.zig index 9310dac348..f5465d4151 100644 --- a/lib/std/http/headers.zig +++ b/lib/std/http/headers.zig @@ -348,7 +348,7 @@ pub const Headers = struct { self: Self, comptime fmt: []const u8, options: std.fmt.FormatOptions, - out_stream: var, + out_stream: anytype, ) !void { for (self.toSlice()) |entry| { try out_stream.writeAll(entry.name); diff --git a/lib/std/io/bit_reader.zig b/lib/std/io/bit_reader.zig index d5e8ce934f..fbdf7fbe78 100644 --- a/lib/std/io/bit_reader.zig +++ b/lib/std/io/bit_reader.zig @@ -170,7 +170,7 @@ pub fn BitReader(endian: builtin.Endian, comptime ReaderType: type) type { pub fn bitReader( comptime endian: builtin.Endian, - underlying_stream: var, + underlying_stream: anytype, ) BitReader(endian, @TypeOf(underlying_stream)) { return BitReader(endian, @TypeOf(underlying_stream)).init(underlying_stream); } diff --git a/lib/std/io/bit_writer.zig b/lib/std/io/bit_writer.zig index bdf9156136..7c1d3e5dba 100644 --- a/lib/std/io/bit_writer.zig +++ b/lib/std/io/bit_writer.zig @@ -34,7 +34,7 @@ pub fn BitWriter(endian: builtin.Endian, comptime WriterType: type) type { /// Write the specified number of bits to the stream from the least significant bits of /// the specified unsigned int value. Bits will only be written to the stream when there /// are enough to fill a byte. - pub fn writeBits(self: *Self, value: var, bits: usize) Error!void { + pub fn writeBits(self: *Self, value: anytype, bits: usize) Error!void { if (bits == 0) return; const U = @TypeOf(value); @@ -145,7 +145,7 @@ pub fn BitWriter(endian: builtin.Endian, comptime WriterType: type) type { pub fn bitWriter( comptime endian: builtin.Endian, - underlying_stream: var, + underlying_stream: anytype, ) BitWriter(endian, @TypeOf(underlying_stream)) { return BitWriter(endian, @TypeOf(underlying_stream)).init(underlying_stream); } diff --git a/lib/std/io/buffered_reader.zig b/lib/std/io/buffered_reader.zig index f33dc127d2..73d74b465f 100644 --- a/lib/std/io/buffered_reader.zig +++ b/lib/std/io/buffered_reader.zig @@ -48,7 +48,7 @@ pub fn BufferedReader(comptime buffer_size: usize, comptime ReaderType: type) ty }; } -pub fn bufferedReader(underlying_stream: var) BufferedReader(4096, @TypeOf(underlying_stream)) { +pub fn bufferedReader(underlying_stream: anytype) BufferedReader(4096, @TypeOf(underlying_stream)) { return .{ .unbuffered_reader = underlying_stream }; } diff --git a/lib/std/io/buffered_writer.zig b/lib/std/io/buffered_writer.zig index 5cd102b510..a970f899d6 100644 --- a/lib/std/io/buffered_writer.zig +++ b/lib/std/io/buffered_writer.zig @@ -43,6 +43,6 @@ pub fn BufferedWriter(comptime buffer_size: usize, comptime WriterType: type) ty }; } -pub fn bufferedWriter(underlying_stream: var) BufferedWriter(4096, @TypeOf(underlying_stream)) { +pub fn bufferedWriter(underlying_stream: anytype) BufferedWriter(4096, @TypeOf(underlying_stream)) { return .{ .unbuffered_writer = underlying_stream }; } diff --git a/lib/std/io/counting_writer.zig b/lib/std/io/counting_writer.zig index 90e4580eea..c0cd53c7ee 100644 --- a/lib/std/io/counting_writer.zig +++ b/lib/std/io/counting_writer.zig @@ -32,7 +32,7 @@ pub fn CountingWriter(comptime WriterType: type) type { }; } -pub fn countingWriter(child_stream: var) CountingWriter(@TypeOf(child_stream)) { +pub fn countingWriter(child_stream: anytype) CountingWriter(@TypeOf(child_stream)) { return .{ .bytes_written = 0, .child_stream = child_stream }; } diff --git a/lib/std/io/fixed_buffer_stream.zig b/lib/std/io/fixed_buffer_stream.zig index ee5fe48ca5..32625f3b7a 100644 --- a/lib/std/io/fixed_buffer_stream.zig +++ b/lib/std/io/fixed_buffer_stream.zig @@ -127,7 +127,7 @@ pub fn FixedBufferStream(comptime Buffer: type) type { }; } -pub fn fixedBufferStream(buffer: var) FixedBufferStream(NonSentinelSpan(@TypeOf(buffer))) { +pub fn fixedBufferStream(buffer: anytype) FixedBufferStream(NonSentinelSpan(@TypeOf(buffer))) { return .{ .buffer = mem.span(buffer), .pos = 0 }; } diff --git a/lib/std/io/multi_writer.zig b/lib/std/io/multi_writer.zig index 02ed75eaaa..e63940bff7 100644 --- a/lib/std/io/multi_writer.zig +++ b/lib/std/io/multi_writer.zig @@ -43,7 +43,7 @@ pub fn MultiWriter(comptime Writers: type) type { }; } -pub fn multiWriter(streams: var) MultiWriter(@TypeOf(streams)) { +pub fn multiWriter(streams: anytype) MultiWriter(@TypeOf(streams)) { return .{ .streams = streams }; } diff --git a/lib/std/io/peek_stream.zig b/lib/std/io/peek_stream.zig index 2bf6b83bc5..08e940c6ec 100644 --- a/lib/std/io/peek_stream.zig +++ b/lib/std/io/peek_stream.zig @@ -80,7 +80,7 @@ pub fn PeekStream( pub fn peekStream( comptime lookahead: comptime_int, - underlying_stream: var, + underlying_stream: anytype, ) PeekStream(.{ .Static = lookahead }, @TypeOf(underlying_stream)) { return PeekStream(.{ .Static = lookahead }, @TypeOf(underlying_stream)).init(underlying_stream); } diff --git a/lib/std/io/serialization.zig b/lib/std/io/serialization.zig index 8c63b8b966..8fe0782c84 100644 --- a/lib/std/io/serialization.zig +++ b/lib/std/io/serialization.zig @@ -93,7 +93,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing, } /// Deserializes data into the type pointed to by `ptr` - pub fn deserializeInto(self: *Self, ptr: var) !void { + pub fn deserializeInto(self: *Self, ptr: anytype) !void { const T = @TypeOf(ptr); comptime assert(trait.is(.Pointer)(T)); @@ -190,7 +190,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing, pub fn deserializer( comptime endian: builtin.Endian, comptime packing: Packing, - in_stream: var, + in_stream: anytype, ) Deserializer(endian, packing, @TypeOf(in_stream)) { return Deserializer(endian, packing, @TypeOf(in_stream)).init(in_stream); } @@ -229,7 +229,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co if (packing == .Bit) return self.out_stream.flushBits(); } - fn serializeInt(self: *Self, value: var) Error!void { + fn serializeInt(self: *Self, value: anytype) Error!void { const T = @TypeOf(value); comptime assert(trait.is(.Int)(T) or trait.is(.Float)(T)); @@ -261,7 +261,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co } /// Serializes the passed value into the stream - pub fn serialize(self: *Self, value: var) Error!void { + pub fn serialize(self: *Self, value: anytype) Error!void { const T = comptime @TypeOf(value); if (comptime trait.isIndexable(T)) { @@ -346,7 +346,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co pub fn serializer( comptime endian: builtin.Endian, comptime packing: Packing, - out_stream: var, + out_stream: anytype, ) Serializer(endian, packing, @TypeOf(out_stream)) { return Serializer(endian, packing, @TypeOf(out_stream)).init(out_stream); } @@ -462,7 +462,7 @@ test "Serializer/Deserializer Int: Inf/NaN" { try testIntSerializerDeserializerInfNaN(.Little, .Bit); } -fn testAlternateSerializer(self: var, _serializer: var) !void { +fn testAlternateSerializer(self: anytype, _serializer: anytype) !void { try _serializer.serialize(self.f_f16); } @@ -503,7 +503,7 @@ fn testSerializerDeserializer(comptime endian: builtin.Endian, comptime packing: f_f16: f16, f_unused_u32: u32, - pub fn deserialize(self: *@This(), _deserializer: var) !void { + pub fn deserialize(self: *@This(), _deserializer: anytype) !void { try _deserializer.deserializeInto(&self.f_f16); self.f_unused_u32 = 47; } diff --git a/lib/std/io/writer.zig b/lib/std/io/writer.zig index 659ba2703e..a98e3b1acd 100644 --- a/lib/std/io/writer.zig +++ b/lib/std/io/writer.zig @@ -24,7 +24,7 @@ pub fn Writer( } } - pub fn print(self: Self, comptime format: []const u8, args: var) Error!void { + pub fn print(self: Self, comptime format: []const u8, args: anytype) Error!void { return std.fmt.format(self, format, args); } diff --git a/lib/std/json.zig b/lib/std/json.zig index 65ebe55072..f1b91fc829 100644 --- a/lib/std/json.zig +++ b/lib/std/json.zig @@ -239,7 +239,7 @@ pub const StreamingParser = struct { NullLiteral3, // Only call this function to generate array/object final state. - pub fn fromInt(x: var) State { + pub fn fromInt(x: anytype) State { debug.assert(x == 0 or x == 1); const T = @TagType(State); return @intToEnum(State, @intCast(T, x)); @@ -1236,7 +1236,7 @@ pub const Value = union(enum) { pub fn jsonStringify( value: @This(), options: StringifyOptions, - out_stream: var, + out_stream: anytype, ) @TypeOf(out_stream).Error!void { switch (value) { .Null => try stringify(null, options, out_stream), @@ -2338,7 +2338,7 @@ pub const StringifyOptions = struct { pub fn outputIndent( whitespace: @This(), - out_stream: var, + out_stream: anytype, ) @TypeOf(out_stream).Error!void { var char: u8 = undefined; var n_chars: usize = undefined; @@ -2380,7 +2380,7 @@ pub const StringifyOptions = struct { fn outputUnicodeEscape( codepoint: u21, - out_stream: var, + out_stream: anytype, ) !void { if (codepoint <= 0xFFFF) { // If the character is in the Basic Multilingual Plane (U+0000 through U+FFFF), @@ -2402,9 +2402,9 @@ fn outputUnicodeEscape( } pub fn stringify( - value: var, + value: anytype, options: StringifyOptions, - out_stream: var, + out_stream: anytype, ) @TypeOf(out_stream).Error!void { const T = @TypeOf(value); switch (@typeInfo(T)) { @@ -2584,7 +2584,7 @@ pub fn stringify( unreachable; } -fn teststringify(expected: []const u8, value: var, options: StringifyOptions) !void { +fn teststringify(expected: []const u8, value: anytype, options: StringifyOptions) !void { const ValidationOutStream = struct { const Self = @This(); pub const OutStream = std.io.OutStream(*Self, Error, write); @@ -2758,7 +2758,7 @@ test "stringify struct with custom stringifier" { pub fn jsonStringify( value: Self, options: StringifyOptions, - out_stream: var, + out_stream: anytype, ) !void { try out_stream.writeAll("[\"something special\","); try stringify(42, options, out_stream); diff --git a/lib/std/json/write_stream.zig b/lib/std/json/write_stream.zig index dcfbf04bc1..778173cc24 100644 --- a/lib/std/json/write_stream.zig +++ b/lib/std/json/write_stream.zig @@ -152,7 +152,7 @@ pub fn WriteStream(comptime OutStream: type, comptime max_depth: usize) type { self: *Self, /// An integer, float, or `std.math.BigInt`. Emitted as a bare number if it fits losslessly /// in a IEEE 754 double float, otherwise emitted as a string to the full precision. - value: var, + value: anytype, ) !void { assert(self.state[self.state_index] == State.Value); switch (@typeInfo(@TypeOf(value))) { @@ -215,7 +215,7 @@ pub fn WriteStream(comptime OutStream: type, comptime max_depth: usize) type { self.state_index -= 1; } - fn stringify(self: *Self, value: var) !void { + fn stringify(self: *Self, value: anytype) !void { try std.json.stringify(value, std.json.StringifyOptions{ .whitespace = self.whitespace, }, self.stream); @@ -224,7 +224,7 @@ pub fn WriteStream(comptime OutStream: type, comptime max_depth: usize) type { } pub fn writeStream( - out_stream: var, + out_stream: anytype, comptime max_depth: usize, ) WriteStream(@TypeOf(out_stream), max_depth) { return WriteStream(@TypeOf(out_stream), max_depth).init(out_stream); diff --git a/lib/std/log.zig b/lib/std/log.zig index 63aeaecf88..0006580031 100644 --- a/lib/std/log.zig +++ b/lib/std/log.zig @@ -101,7 +101,7 @@ fn log( comptime message_level: Level, comptime scope: @Type(.EnumLiteral), comptime format: []const u8, - args: var, + args: anytype, ) void { if (@enumToInt(message_level) <= @enumToInt(level)) { if (@hasDecl(root, "log")) { @@ -120,7 +120,7 @@ fn log( pub fn emerg( comptime scope: @Type(.EnumLiteral), comptime format: []const u8, - args: var, + args: anytype, ) void { @setCold(true); log(.emerg, scope, format, args); @@ -131,7 +131,7 @@ pub fn emerg( pub fn alert( comptime scope: @Type(.EnumLiteral), comptime format: []const u8, - args: var, + args: anytype, ) void { @setCold(true); log(.alert, scope, format, args); @@ -143,7 +143,7 @@ pub fn alert( pub fn crit( comptime scope: @Type(.EnumLiteral), comptime format: []const u8, - args: var, + args: anytype, ) void { @setCold(true); log(.crit, scope, format, args); @@ -154,7 +154,7 @@ pub fn crit( pub fn err( comptime scope: @Type(.EnumLiteral), comptime format: []const u8, - args: var, + args: anytype, ) void { @setCold(true); log(.err, scope, format, args); @@ -166,7 +166,7 @@ pub fn err( pub fn warn( comptime scope: @Type(.EnumLiteral), comptime format: []const u8, - args: var, + args: anytype, ) void { log(.warn, scope, format, args); } @@ -176,7 +176,7 @@ pub fn warn( pub fn notice( comptime scope: @Type(.EnumLiteral), comptime format: []const u8, - args: var, + args: anytype, ) void { log(.notice, scope, format, args); } @@ -186,7 +186,7 @@ pub fn notice( pub fn info( comptime scope: @Type(.EnumLiteral), comptime format: []const u8, - args: var, + args: anytype, ) void { log(.info, scope, format, args); } @@ -196,7 +196,7 @@ pub fn info( pub fn debug( comptime scope: @Type(.EnumLiteral), comptime format: []const u8, - args: var, + args: anytype, ) void { log(.debug, scope, format, args); } diff --git a/lib/std/math.zig b/lib/std/math.zig index 14ffd61c29..111a618cef 100644 --- a/lib/std/math.zig +++ b/lib/std/math.zig @@ -104,7 +104,7 @@ pub fn approxEq(comptime T: type, x: T, y: T, epsilon: T) bool { } // TODO: Hide the following in an internal module. -pub fn forceEval(value: var) void { +pub fn forceEval(value: anytype) void { const T = @TypeOf(value); switch (T) { f16 => { @@ -259,7 +259,7 @@ pub fn Min(comptime A: type, comptime B: type) type { /// Returns the smaller number. When one of the parameter's type's full range fits in the other, /// the return type is the smaller type. -pub fn min(x: var, y: var) Min(@TypeOf(x), @TypeOf(y)) { +pub fn min(x: anytype, y: anytype) Min(@TypeOf(x), @TypeOf(y)) { const Result = Min(@TypeOf(x), @TypeOf(y)); if (x < y) { // TODO Zig should allow this as an implicit cast because x is immutable and in this @@ -310,7 +310,7 @@ test "math.min" { } } -pub fn max(x: var, y: var) @TypeOf(x, y) { +pub fn max(x: anytype, y: anytype) @TypeOf(x, y) { return if (x > y) x else y; } @@ -318,7 +318,7 @@ test "math.max" { testing.expect(max(@as(i32, -1), @as(i32, 2)) == 2); } -pub fn clamp(val: var, lower: var, upper: var) @TypeOf(val, lower, upper) { +pub fn clamp(val: anytype, lower: anytype, upper: anytype) @TypeOf(val, lower, upper) { assert(lower <= upper); return max(lower, min(val, upper)); } @@ -354,7 +354,7 @@ pub fn sub(comptime T: type, a: T, b: T) (error{Overflow}!T) { return if (@subWithOverflow(T, a, b, &answer)) error.Overflow else answer; } -pub fn negate(x: var) !@TypeOf(x) { +pub fn negate(x: anytype) !@TypeOf(x) { return sub(@TypeOf(x), 0, x); } @@ -365,7 +365,7 @@ pub fn shlExact(comptime T: type, a: T, shift_amt: Log2Int(T)) !T { /// Shifts left. Overflowed bits are truncated. /// A negative shift amount results in a right shift. -pub fn shl(comptime T: type, a: T, shift_amt: var) T { +pub fn shl(comptime T: type, a: T, shift_amt: anytype) T { const abs_shift_amt = absCast(shift_amt); const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt); @@ -391,7 +391,7 @@ test "math.shl" { /// Shifts right. Overflowed bits are truncated. /// A negative shift amount results in a left shift. -pub fn shr(comptime T: type, a: T, shift_amt: var) T { +pub fn shr(comptime T: type, a: T, shift_amt: anytype) T { const abs_shift_amt = absCast(shift_amt); const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt); @@ -419,7 +419,7 @@ test "math.shr" { /// Rotates right. Only unsigned values can be rotated. /// Negative shift values results in shift modulo the bit count. -pub fn rotr(comptime T: type, x: T, r: var) T { +pub fn rotr(comptime T: type, x: T, r: anytype) T { if (T.is_signed) { @compileError("cannot rotate signed integer"); } else { @@ -438,7 +438,7 @@ test "math.rotr" { /// Rotates left. Only unsigned values can be rotated. /// Negative shift values results in shift modulo the bit count. -pub fn rotl(comptime T: type, x: T, r: var) T { +pub fn rotl(comptime T: type, x: T, r: anytype) T { if (T.is_signed) { @compileError("cannot rotate signed integer"); } else { @@ -541,7 +541,7 @@ fn testOverflow() void { testing.expect((shlExact(i32, 0b11, 4) catch unreachable) == 0b110000); } -pub fn absInt(x: var) !@TypeOf(x) { +pub fn absInt(x: anytype) !@TypeOf(x) { const T = @TypeOf(x); comptime assert(@typeInfo(T) == .Int); // must pass an integer to absInt comptime assert(T.is_signed); // must pass a signed integer to absInt @@ -689,7 +689,7 @@ fn testRem() void { /// Returns the absolute value of the integer parameter. /// Result is an unsigned integer. -pub fn absCast(x: var) switch (@typeInfo(@TypeOf(x))) { +pub fn absCast(x: anytype) switch (@typeInfo(@TypeOf(x))) { .ComptimeInt => comptime_int, .Int => |intInfo| std.meta.Int(false, intInfo.bits), else => @compileError("absCast only accepts integers"), @@ -724,7 +724,7 @@ test "math.absCast" { /// Returns the negation of the integer parameter. /// Result is a signed integer. -pub fn negateCast(x: var) !std.meta.Int(true, @TypeOf(x).bit_count) { +pub fn negateCast(x: anytype) !std.meta.Int(true, @TypeOf(x).bit_count) { if (@TypeOf(x).is_signed) return negate(x); const int = std.meta.Int(true, @TypeOf(x).bit_count); @@ -747,7 +747,7 @@ test "math.negateCast" { /// Cast an integer to a different integer type. If the value doesn't fit, /// return an error. -pub fn cast(comptime T: type, x: var) (error{Overflow}!T) { +pub fn cast(comptime T: type, x: anytype) (error{Overflow}!T) { comptime assert(@typeInfo(T) == .Int); // must pass an integer comptime assert(@typeInfo(@TypeOf(x)) == .Int); // must pass an integer if (maxInt(@TypeOf(x)) > maxInt(T) and x > maxInt(T)) { @@ -772,7 +772,7 @@ test "math.cast" { pub const AlignCastError = error{UnalignedMemory}; /// Align cast a pointer but return an error if it's the wrong alignment -pub fn alignCast(comptime alignment: u29, ptr: var) AlignCastError!@TypeOf(@alignCast(alignment, ptr)) { +pub fn alignCast(comptime alignment: u29, ptr: anytype) AlignCastError!@TypeOf(@alignCast(alignment, ptr)) { const addr = @ptrToInt(ptr); if (addr % alignment != 0) { return error.UnalignedMemory; @@ -780,7 +780,7 @@ pub fn alignCast(comptime alignment: u29, ptr: var) AlignCastError!@TypeOf(@alig return @alignCast(alignment, ptr); } -pub fn isPowerOfTwo(v: var) bool { +pub fn isPowerOfTwo(v: anytype) bool { assert(v != 0); return (v & (v - 1)) == 0; } @@ -897,7 +897,7 @@ test "std.math.log2_int_ceil" { testing.expect(log2_int_ceil(u32, 10) == 4); } -pub fn lossyCast(comptime T: type, value: var) T { +pub fn lossyCast(comptime T: type, value: anytype) T { switch (@typeInfo(@TypeOf(value))) { .Int => return @intToFloat(T, value), .Float => return @floatCast(T, value), @@ -1031,7 +1031,7 @@ pub const Order = enum { }; /// Given two numbers, this function returns the order they are with respect to each other. -pub fn order(a: var, b: var) Order { +pub fn order(a: anytype, b: anytype) Order { if (a == b) { return .eq; } else if (a < b) { @@ -1062,7 +1062,7 @@ pub const CompareOperator = enum { /// This function does the same thing as comparison operators, however the /// operator is a runtime-known enum value. Works on any operands that /// support comparison operators. -pub fn compare(a: var, op: CompareOperator, b: var) bool { +pub fn compare(a: anytype, op: CompareOperator, b: anytype) bool { return switch (op) { .lt => a < b, .lte => a <= b, diff --git a/lib/std/math/acos.zig b/lib/std/math/acos.zig index aec0d4706a..cdd86601fd 100644 --- a/lib/std/math/acos.zig +++ b/lib/std/math/acos.zig @@ -12,7 +12,7 @@ const expect = std.testing.expect; /// /// Special cases: /// - acos(x) = nan if x < -1 or x > 1 -pub fn acos(x: var) @TypeOf(x) { +pub fn acos(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => acos32(x), diff --git a/lib/std/math/acosh.zig b/lib/std/math/acosh.zig index 0f99335058..9a594f9cc4 100644 --- a/lib/std/math/acosh.zig +++ b/lib/std/math/acosh.zig @@ -14,7 +14,7 @@ const expect = std.testing.expect; /// Special cases: /// - acosh(x) = snan if x < 1 /// - acosh(nan) = nan -pub fn acosh(x: var) @TypeOf(x) { +pub fn acosh(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => acosh32(x), diff --git a/lib/std/math/asin.zig b/lib/std/math/asin.zig index db57e2088f..4cff69fc1b 100644 --- a/lib/std/math/asin.zig +++ b/lib/std/math/asin.zig @@ -13,7 +13,7 @@ const expect = std.testing.expect; /// Special Cases: /// - asin(+-0) = +-0 /// - asin(x) = nan if x < -1 or x > 1 -pub fn asin(x: var) @TypeOf(x) { +pub fn asin(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => asin32(x), diff --git a/lib/std/math/asinh.zig b/lib/std/math/asinh.zig index ab1b650139..940b953d06 100644 --- a/lib/std/math/asinh.zig +++ b/lib/std/math/asinh.zig @@ -15,7 +15,7 @@ const maxInt = std.math.maxInt; /// - asinh(+-0) = +-0 /// - asinh(+-inf) = +-inf /// - asinh(nan) = nan -pub fn asinh(x: var) @TypeOf(x) { +pub fn asinh(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => asinh32(x), diff --git a/lib/std/math/atan.zig b/lib/std/math/atan.zig index eb9154b5fe..9342b6ed59 100644 --- a/lib/std/math/atan.zig +++ b/lib/std/math/atan.zig @@ -13,7 +13,7 @@ const expect = std.testing.expect; /// Special Cases: /// - atan(+-0) = +-0 /// - atan(+-inf) = +-pi/2 -pub fn atan(x: var) @TypeOf(x) { +pub fn atan(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => atan32(x), diff --git a/lib/std/math/atanh.zig b/lib/std/math/atanh.zig index e58a10fff5..de742bd4cd 100644 --- a/lib/std/math/atanh.zig +++ b/lib/std/math/atanh.zig @@ -15,7 +15,7 @@ const maxInt = std.math.maxInt; /// - atanh(+-1) = +-inf with signal /// - atanh(x) = nan if |x| > 1 with signal /// - atanh(nan) = nan -pub fn atanh(x: var) @TypeOf(x) { +pub fn atanh(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => atanh_32(x), diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index 85e14bc55c..b6d7731f1a 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -12,7 +12,7 @@ const assert = std.debug.assert; /// Returns the number of limbs needed to store `scalar`, which must be a /// primitive integer value. -pub fn calcLimbLen(scalar: var) usize { +pub fn calcLimbLen(scalar: anytype) usize { const T = @TypeOf(scalar); switch (@typeInfo(T)) { .Int => |info| { @@ -110,7 +110,7 @@ pub const Mutable = struct { /// `value` is a primitive integer type. /// Asserts the value fits within the provided `limbs_buffer`. /// Note: `calcLimbLen` can be used to figure out how big an array to allocate for `limbs_buffer`. - pub fn init(limbs_buffer: []Limb, value: var) Mutable { + pub fn init(limbs_buffer: []Limb, value: anytype) Mutable { limbs_buffer[0] = 0; var self: Mutable = .{ .limbs = limbs_buffer, @@ -169,7 +169,7 @@ pub const Mutable = struct { /// Asserts the value fits within the limbs buffer. /// Note: `calcLimbLen` can be used to figure out how big the limbs buffer /// needs to be to store a specific value. - pub fn set(self: *Mutable, value: var) void { + pub fn set(self: *Mutable, value: anytype) void { const T = @TypeOf(value); switch (@typeInfo(T)) { @@ -281,7 +281,7 @@ pub const Mutable = struct { /// /// Asserts the result fits in `r`. An upper bound on the number of limbs needed by /// r is `math.max(a.limbs.len, calcLimbLen(scalar)) + 1`. - pub fn addScalar(r: *Mutable, a: Const, scalar: var) void { + pub fn addScalar(r: *Mutable, a: Const, scalar: anytype) void { var limbs: [calcLimbLen(scalar)]Limb = undefined; const operand = init(&limbs, scalar).toConst(); return add(r, a, operand); @@ -1058,7 +1058,7 @@ pub const Const = struct { self: Const, comptime fmt: []const u8, options: std.fmt.FormatOptions, - out_stream: var, + out_stream: anytype, ) !void { comptime var radix = 10; comptime var uppercase = false; @@ -1261,7 +1261,7 @@ pub const Const = struct { } /// Same as `order` but the right-hand operand is a primitive integer. - pub fn orderAgainstScalar(lhs: Const, scalar: var) math.Order { + pub fn orderAgainstScalar(lhs: Const, scalar: anytype) math.Order { var limbs: [calcLimbLen(scalar)]Limb = undefined; const rhs = Mutable.init(&limbs, scalar); return order(lhs, rhs.toConst()); @@ -1333,7 +1333,7 @@ pub const Managed = struct { /// Creates a new `Managed` with value `value`. /// /// This is identical to an `init`, followed by a `set`. - pub fn initSet(allocator: *Allocator, value: var) !Managed { + pub fn initSet(allocator: *Allocator, value: anytype) !Managed { var s = try Managed.init(allocator); try s.set(value); return s; @@ -1496,7 +1496,7 @@ pub const Managed = struct { } /// Sets an Managed to value. Value must be an primitive integer type. - pub fn set(self: *Managed, value: var) Allocator.Error!void { + pub fn set(self: *Managed, value: anytype) Allocator.Error!void { try self.ensureCapacity(calcLimbLen(value)); var m = self.toMutable(); m.set(value); @@ -1549,7 +1549,7 @@ pub const Managed = struct { self: Managed, comptime fmt: []const u8, options: std.fmt.FormatOptions, - out_stream: var, + out_stream: anytype, ) !void { return self.toConst().format(fmt, options, out_stream); } @@ -1607,7 +1607,7 @@ pub const Managed = struct { /// scalar is a primitive integer type. /// /// Returns an error if memory could not be allocated. - pub fn addScalar(r: *Managed, a: Const, scalar: var) Allocator.Error!void { + pub fn addScalar(r: *Managed, a: Const, scalar: anytype) Allocator.Error!void { try r.ensureCapacity(math.max(a.limbs.len, calcLimbLen(scalar)) + 1); var m = r.toMutable(); m.addScalar(a, scalar); diff --git a/lib/std/math/big/rational.zig b/lib/std/math/big/rational.zig index 3624a16139..6f62a462b8 100644 --- a/lib/std/math/big/rational.zig +++ b/lib/std/math/big/rational.zig @@ -43,7 +43,7 @@ pub const Rational = struct { } /// Set a Rational from a primitive integer type. - pub fn setInt(self: *Rational, a: var) !void { + pub fn setInt(self: *Rational, a: anytype) !void { try self.p.set(a); try self.q.set(1); } @@ -280,7 +280,7 @@ pub const Rational = struct { } /// Set a rational from an integer ratio. - pub fn setRatio(self: *Rational, p: var, q: var) !void { + pub fn setRatio(self: *Rational, p: anytype, q: anytype) !void { try self.p.set(p); try self.q.set(q); diff --git a/lib/std/math/cbrt.zig b/lib/std/math/cbrt.zig index 2b219d5368..42163b96dc 100644 --- a/lib/std/math/cbrt.zig +++ b/lib/std/math/cbrt.zig @@ -14,7 +14,7 @@ const expect = std.testing.expect; /// - cbrt(+-0) = +-0 /// - cbrt(+-inf) = +-inf /// - cbrt(nan) = nan -pub fn cbrt(x: var) @TypeOf(x) { +pub fn cbrt(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => cbrt32(x), diff --git a/lib/std/math/ceil.zig b/lib/std/math/ceil.zig index e3b5679318..39de46f361 100644 --- a/lib/std/math/ceil.zig +++ b/lib/std/math/ceil.zig @@ -15,7 +15,7 @@ const expect = std.testing.expect; /// - ceil(+-0) = +-0 /// - ceil(+-inf) = +-inf /// - ceil(nan) = nan -pub fn ceil(x: var) @TypeOf(x) { +pub fn ceil(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => ceil32(x), diff --git a/lib/std/math/complex/abs.zig b/lib/std/math/complex/abs.zig index 75b967f3d2..db31aef42a 100644 --- a/lib/std/math/complex/abs.zig +++ b/lib/std/math/complex/abs.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the absolute value (modulus) of z. -pub fn abs(z: var) @TypeOf(z.re) { +pub fn abs(z: anytype) @TypeOf(z.re) { const T = @TypeOf(z.re); return math.hypot(T, z.re, z.im); } diff --git a/lib/std/math/complex/acos.zig b/lib/std/math/complex/acos.zig index 24a645375c..072fd77f08 100644 --- a/lib/std/math/complex/acos.zig +++ b/lib/std/math/complex/acos.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the arc-cosine of z. -pub fn acos(z: var) Complex(@TypeOf(z.re)) { +pub fn acos(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); const q = cmath.asin(z); return Complex(T).new(@as(T, math.pi) / 2 - q.re, -q.im); diff --git a/lib/std/math/complex/acosh.zig b/lib/std/math/complex/acosh.zig index 996334034a..59117a8b27 100644 --- a/lib/std/math/complex/acosh.zig +++ b/lib/std/math/complex/acosh.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the hyperbolic arc-cosine of z. -pub fn acosh(z: var) Complex(@TypeOf(z.re)) { +pub fn acosh(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); const q = cmath.acos(z); return Complex(T).new(-q.im, q.re); diff --git a/lib/std/math/complex/arg.zig b/lib/std/math/complex/arg.zig index f690e92143..6cf959a081 100644 --- a/lib/std/math/complex/arg.zig +++ b/lib/std/math/complex/arg.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the angular component (in radians) of z. -pub fn arg(z: var) @TypeOf(z.re) { +pub fn arg(z: anytype) @TypeOf(z.re) { const T = @TypeOf(z.re); return math.atan2(T, z.im, z.re); } diff --git a/lib/std/math/complex/asin.zig b/lib/std/math/complex/asin.zig index 01fa33156a..9f7cd396aa 100644 --- a/lib/std/math/complex/asin.zig +++ b/lib/std/math/complex/asin.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; // Returns the arc-sine of z. -pub fn asin(z: var) Complex(@TypeOf(z.re)) { +pub fn asin(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); const x = z.re; const y = z.im; diff --git a/lib/std/math/complex/asinh.zig b/lib/std/math/complex/asinh.zig index 47d8244adb..0c3c2bd115 100644 --- a/lib/std/math/complex/asinh.zig +++ b/lib/std/math/complex/asinh.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the hyperbolic arc-sine of z. -pub fn asinh(z: var) Complex(@TypeOf(z.re)) { +pub fn asinh(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); const q = Complex(T).new(-z.im, z.re); const r = cmath.asin(q); diff --git a/lib/std/math/complex/atan.zig b/lib/std/math/complex/atan.zig index 5ba6f7b0d2..98bde3e125 100644 --- a/lib/std/math/complex/atan.zig +++ b/lib/std/math/complex/atan.zig @@ -12,7 +12,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the arc-tangent of z. -pub fn atan(z: var) @TypeOf(z) { +pub fn atan(z: anytype) @TypeOf(z) { const T = @TypeOf(z.re); return switch (T) { f32 => atan32(z), diff --git a/lib/std/math/complex/atanh.zig b/lib/std/math/complex/atanh.zig index 8b70306224..a07c2969e4 100644 --- a/lib/std/math/complex/atanh.zig +++ b/lib/std/math/complex/atanh.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the hyperbolic arc-tangent of z. -pub fn atanh(z: var) Complex(@TypeOf(z.re)) { +pub fn atanh(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); const q = Complex(T).new(-z.im, z.re); const r = cmath.atan(q); diff --git a/lib/std/math/complex/conj.zig b/lib/std/math/complex/conj.zig index 1065d4bb73..42a34e7dfc 100644 --- a/lib/std/math/complex/conj.zig +++ b/lib/std/math/complex/conj.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the complex conjugate of z. -pub fn conj(z: var) Complex(@TypeOf(z.re)) { +pub fn conj(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); return Complex(T).new(z.re, -z.im); } diff --git a/lib/std/math/complex/cos.zig b/lib/std/math/complex/cos.zig index 1aefa73db5..9daf89c730 100644 --- a/lib/std/math/complex/cos.zig +++ b/lib/std/math/complex/cos.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the cosine of z. -pub fn cos(z: var) Complex(@TypeOf(z.re)) { +pub fn cos(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); const p = Complex(T).new(-z.im, z.re); return cmath.cosh(p); diff --git a/lib/std/math/complex/cosh.zig b/lib/std/math/complex/cosh.zig index a9ac893602..bd51629bd4 100644 --- a/lib/std/math/complex/cosh.zig +++ b/lib/std/math/complex/cosh.zig @@ -14,7 +14,7 @@ const Complex = cmath.Complex; const ldexp_cexp = @import("ldexp.zig").ldexp_cexp; /// Returns the hyperbolic arc-cosine of z. -pub fn cosh(z: var) Complex(@TypeOf(z.re)) { +pub fn cosh(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); return switch (T) { f32 => cosh32(z), diff --git a/lib/std/math/complex/exp.zig b/lib/std/math/complex/exp.zig index 9f9e3db807..6f6061a947 100644 --- a/lib/std/math/complex/exp.zig +++ b/lib/std/math/complex/exp.zig @@ -14,7 +14,7 @@ const Complex = cmath.Complex; const ldexp_cexp = @import("ldexp.zig").ldexp_cexp; /// Returns e raised to the power of z (e^z). -pub fn exp(z: var) @TypeOf(z) { +pub fn exp(z: anytype) @TypeOf(z) { const T = @TypeOf(z.re); return switch (T) { diff --git a/lib/std/math/complex/ldexp.zig b/lib/std/math/complex/ldexp.zig index 9eccd4bb98..c23b9b346e 100644 --- a/lib/std/math/complex/ldexp.zig +++ b/lib/std/math/complex/ldexp.zig @@ -11,7 +11,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns exp(z) scaled to avoid overflow. -pub fn ldexp_cexp(z: var, expt: i32) @TypeOf(z) { +pub fn ldexp_cexp(z: anytype, expt: i32) @TypeOf(z) { const T = @TypeOf(z.re); return switch (T) { diff --git a/lib/std/math/complex/log.zig b/lib/std/math/complex/log.zig index f1fad3175e..ec02c6c325 100644 --- a/lib/std/math/complex/log.zig +++ b/lib/std/math/complex/log.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the natural logarithm of z. -pub fn log(z: var) Complex(@TypeOf(z.re)) { +pub fn log(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); const r = cmath.abs(z); const phi = cmath.arg(z); diff --git a/lib/std/math/complex/proj.zig b/lib/std/math/complex/proj.zig index 349f6b3abb..e208ae0370 100644 --- a/lib/std/math/complex/proj.zig +++ b/lib/std/math/complex/proj.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the projection of z onto the riemann sphere. -pub fn proj(z: var) Complex(@TypeOf(z.re)) { +pub fn proj(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); if (math.isInf(z.re) or math.isInf(z.im)) { diff --git a/lib/std/math/complex/sin.zig b/lib/std/math/complex/sin.zig index 87dc57911b..1b10f8fca6 100644 --- a/lib/std/math/complex/sin.zig +++ b/lib/std/math/complex/sin.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the sine of z. -pub fn sin(z: var) Complex(@TypeOf(z.re)) { +pub fn sin(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); const p = Complex(T).new(-z.im, z.re); const q = cmath.sinh(p); diff --git a/lib/std/math/complex/sinh.zig b/lib/std/math/complex/sinh.zig index 7dd880c71c..32f2a730fb 100644 --- a/lib/std/math/complex/sinh.zig +++ b/lib/std/math/complex/sinh.zig @@ -14,7 +14,7 @@ const Complex = cmath.Complex; const ldexp_cexp = @import("ldexp.zig").ldexp_cexp; /// Returns the hyperbolic sine of z. -pub fn sinh(z: var) @TypeOf(z) { +pub fn sinh(z: anytype) @TypeOf(z) { const T = @TypeOf(z.re); return switch (T) { f32 => sinh32(z), diff --git a/lib/std/math/complex/sqrt.zig b/lib/std/math/complex/sqrt.zig index 57e73f6cd1..0edb02a7a9 100644 --- a/lib/std/math/complex/sqrt.zig +++ b/lib/std/math/complex/sqrt.zig @@ -12,7 +12,7 @@ const Complex = cmath.Complex; /// Returns the square root of z. The real and imaginary parts of the result have the same sign /// as the imaginary part of z. -pub fn sqrt(z: var) @TypeOf(z) { +pub fn sqrt(z: anytype) @TypeOf(z) { const T = @TypeOf(z.re); return switch (T) { diff --git a/lib/std/math/complex/tan.zig b/lib/std/math/complex/tan.zig index 70304803db..050898c573 100644 --- a/lib/std/math/complex/tan.zig +++ b/lib/std/math/complex/tan.zig @@ -5,7 +5,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the tanget of z. -pub fn tan(z: var) Complex(@TypeOf(z.re)) { +pub fn tan(z: anytype) Complex(@TypeOf(z.re)) { const T = @TypeOf(z.re); const q = Complex(T).new(-z.im, z.re); const r = cmath.tanh(q); diff --git a/lib/std/math/complex/tanh.zig b/lib/std/math/complex/tanh.zig index afd2e6aee4..1d614cca58 100644 --- a/lib/std/math/complex/tanh.zig +++ b/lib/std/math/complex/tanh.zig @@ -12,7 +12,7 @@ const cmath = math.complex; const Complex = cmath.Complex; /// Returns the hyperbolic tangent of z. -pub fn tanh(z: var) @TypeOf(z) { +pub fn tanh(z: anytype) @TypeOf(z) { const T = @TypeOf(z.re); return switch (T) { f32 => tanh32(z), diff --git a/lib/std/math/cos.zig b/lib/std/math/cos.zig index aa336769b1..df5c0a53be 100644 --- a/lib/std/math/cos.zig +++ b/lib/std/math/cos.zig @@ -13,7 +13,7 @@ const expect = std.testing.expect; /// Special Cases: /// - cos(+-inf) = nan /// - cos(nan) = nan -pub fn cos(x: var) @TypeOf(x) { +pub fn cos(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => cos_(f32, x), diff --git a/lib/std/math/cosh.zig b/lib/std/math/cosh.zig index 1cd8c5f27f..bab47dcdbd 100644 --- a/lib/std/math/cosh.zig +++ b/lib/std/math/cosh.zig @@ -17,7 +17,7 @@ const maxInt = std.math.maxInt; /// - cosh(+-0) = 1 /// - cosh(+-inf) = +inf /// - cosh(nan) = nan -pub fn cosh(x: var) @TypeOf(x) { +pub fn cosh(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => cosh32(x), diff --git a/lib/std/math/exp.zig b/lib/std/math/exp.zig index da80b201c0..c84d929adf 100644 --- a/lib/std/math/exp.zig +++ b/lib/std/math/exp.zig @@ -14,7 +14,7 @@ const builtin = @import("builtin"); /// Special Cases: /// - exp(+inf) = +inf /// - exp(nan) = nan -pub fn exp(x: var) @TypeOf(x) { +pub fn exp(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => exp32(x), diff --git a/lib/std/math/exp2.zig b/lib/std/math/exp2.zig index 411f789187..da391189b2 100644 --- a/lib/std/math/exp2.zig +++ b/lib/std/math/exp2.zig @@ -13,7 +13,7 @@ const expect = std.testing.expect; /// Special Cases: /// - exp2(+inf) = +inf /// - exp2(nan) = nan -pub fn exp2(x: var) @TypeOf(x) { +pub fn exp2(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => exp2_32(x), diff --git a/lib/std/math/expm1.zig b/lib/std/math/expm1.zig index 91752e9f80..80cdefae20 100644 --- a/lib/std/math/expm1.zig +++ b/lib/std/math/expm1.zig @@ -18,7 +18,7 @@ const expect = std.testing.expect; /// - expm1(+inf) = +inf /// - expm1(-inf) = -1 /// - expm1(nan) = nan -pub fn expm1(x: var) @TypeOf(x) { +pub fn expm1(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => expm1_32(x), diff --git a/lib/std/math/expo2.zig b/lib/std/math/expo2.zig index e70e365f26..f404570fb6 100644 --- a/lib/std/math/expo2.zig +++ b/lib/std/math/expo2.zig @@ -7,7 +7,7 @@ const math = @import("../math.zig"); /// Returns exp(x) / 2 for x >= log(maxFloat(T)). -pub fn expo2(x: var) @TypeOf(x) { +pub fn expo2(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => expo2f(x), diff --git a/lib/std/math/fabs.zig b/lib/std/math/fabs.zig index a659e35ca2..ca91f594fd 100644 --- a/lib/std/math/fabs.zig +++ b/lib/std/math/fabs.zig @@ -14,7 +14,7 @@ const maxInt = std.math.maxInt; /// Special Cases: /// - fabs(+-inf) = +inf /// - fabs(nan) = nan -pub fn fabs(x: var) @TypeOf(x) { +pub fn fabs(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f16 => fabs16(x), diff --git a/lib/std/math/floor.zig b/lib/std/math/floor.zig index 565e2911a9..3a71cc7cdf 100644 --- a/lib/std/math/floor.zig +++ b/lib/std/math/floor.zig @@ -15,7 +15,7 @@ const math = std.math; /// - floor(+-0) = +-0 /// - floor(+-inf) = +-inf /// - floor(nan) = nan -pub fn floor(x: var) @TypeOf(x) { +pub fn floor(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f16 => floor16(x), diff --git a/lib/std/math/frexp.zig b/lib/std/math/frexp.zig index cfdf9f838d..0e4558dc37 100644 --- a/lib/std/math/frexp.zig +++ b/lib/std/math/frexp.zig @@ -24,7 +24,7 @@ pub const frexp64_result = frexp_result(f64); /// - frexp(+-0) = +-0, 0 /// - frexp(+-inf) = +-inf, 0 /// - frexp(nan) = nan, undefined -pub fn frexp(x: var) frexp_result(@TypeOf(x)) { +pub fn frexp(x: anytype) frexp_result(@TypeOf(x)) { const T = @TypeOf(x); return switch (T) { f32 => frexp32(x), diff --git a/lib/std/math/ilogb.zig b/lib/std/math/ilogb.zig index 22e3fbaa97..748cf9ea0d 100644 --- a/lib/std/math/ilogb.zig +++ b/lib/std/math/ilogb.zig @@ -16,7 +16,7 @@ const minInt = std.math.minInt; /// - ilogb(+-inf) = maxInt(i32) /// - ilogb(0) = maxInt(i32) /// - ilogb(nan) = maxInt(i32) -pub fn ilogb(x: var) i32 { +pub fn ilogb(x: anytype) i32 { const T = @TypeOf(x); return switch (T) { f32 => ilogb32(x), diff --git a/lib/std/math/isfinite.zig b/lib/std/math/isfinite.zig index 26b3ce54a1..0681eae0b7 100644 --- a/lib/std/math/isfinite.zig +++ b/lib/std/math/isfinite.zig @@ -4,7 +4,7 @@ const expect = std.testing.expect; const maxInt = std.math.maxInt; /// Returns whether x is a finite value. -pub fn isFinite(x: var) bool { +pub fn isFinite(x: anytype) bool { const T = @TypeOf(x); switch (T) { f16 => { diff --git a/lib/std/math/isinf.zig b/lib/std/math/isinf.zig index 6eacab52ad..19357d89d1 100644 --- a/lib/std/math/isinf.zig +++ b/lib/std/math/isinf.zig @@ -4,7 +4,7 @@ const expect = std.testing.expect; const maxInt = std.math.maxInt; /// Returns whether x is an infinity, ignoring sign. -pub fn isInf(x: var) bool { +pub fn isInf(x: anytype) bool { const T = @TypeOf(x); switch (T) { f16 => { @@ -30,7 +30,7 @@ pub fn isInf(x: var) bool { } /// Returns whether x is an infinity with a positive sign. -pub fn isPositiveInf(x: var) bool { +pub fn isPositiveInf(x: anytype) bool { const T = @TypeOf(x); switch (T) { f16 => { @@ -52,7 +52,7 @@ pub fn isPositiveInf(x: var) bool { } /// Returns whether x is an infinity with a negative sign. -pub fn isNegativeInf(x: var) bool { +pub fn isNegativeInf(x: anytype) bool { const T = @TypeOf(x); switch (T) { f16 => { diff --git a/lib/std/math/isnan.zig b/lib/std/math/isnan.zig index ac865f0d0c..797c115d1d 100644 --- a/lib/std/math/isnan.zig +++ b/lib/std/math/isnan.zig @@ -4,12 +4,12 @@ const expect = std.testing.expect; const maxInt = std.math.maxInt; /// Returns whether x is a nan. -pub fn isNan(x: var) bool { +pub fn isNan(x: anytype) bool { return x != x; } /// Returns whether x is a signalling nan. -pub fn isSignalNan(x: var) bool { +pub fn isSignalNan(x: anytype) bool { // Note: A signalling nan is identical to a standard nan right now but may have a different bit // representation in the future when required. return isNan(x); diff --git a/lib/std/math/isnormal.zig b/lib/std/math/isnormal.zig index 917b4ebfcf..a3144f2784 100644 --- a/lib/std/math/isnormal.zig +++ b/lib/std/math/isnormal.zig @@ -4,7 +4,7 @@ const expect = std.testing.expect; const maxInt = std.math.maxInt; // Returns whether x has a normalized representation (i.e. integer part of mantissa is 1). -pub fn isNormal(x: var) bool { +pub fn isNormal(x: anytype) bool { const T = @TypeOf(x); switch (T) { f16 => { diff --git a/lib/std/math/ln.zig b/lib/std/math/ln.zig index 555a786907..99e54c4cc7 100644 --- a/lib/std/math/ln.zig +++ b/lib/std/math/ln.zig @@ -15,7 +15,7 @@ const expect = std.testing.expect; /// - ln(0) = -inf /// - ln(x) = nan if x < 0 /// - ln(nan) = nan -pub fn ln(x: var) @TypeOf(x) { +pub fn ln(x: anytype) @TypeOf(x) { const T = @TypeOf(x); switch (@typeInfo(T)) { .ComptimeFloat => { diff --git a/lib/std/math/log10.zig b/lib/std/math/log10.zig index 7367af28c6..e55bd8c1e8 100644 --- a/lib/std/math/log10.zig +++ b/lib/std/math/log10.zig @@ -16,7 +16,7 @@ const maxInt = std.math.maxInt; /// - log10(0) = -inf /// - log10(x) = nan if x < 0 /// - log10(nan) = nan -pub fn log10(x: var) @TypeOf(x) { +pub fn log10(x: anytype) @TypeOf(x) { const T = @TypeOf(x); switch (@typeInfo(T)) { .ComptimeFloat => { diff --git a/lib/std/math/log1p.zig b/lib/std/math/log1p.zig index 5e92cfdea3..e24ba8d84d 100644 --- a/lib/std/math/log1p.zig +++ b/lib/std/math/log1p.zig @@ -17,7 +17,7 @@ const expect = std.testing.expect; /// - log1p(-1) = -inf /// - log1p(x) = nan if x < -1 /// - log1p(nan) = nan -pub fn log1p(x: var) @TypeOf(x) { +pub fn log1p(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => log1p_32(x), diff --git a/lib/std/math/log2.zig b/lib/std/math/log2.zig index 54f8bc2baa..95d06a2b60 100644 --- a/lib/std/math/log2.zig +++ b/lib/std/math/log2.zig @@ -16,7 +16,7 @@ const maxInt = std.math.maxInt; /// - log2(0) = -inf /// - log2(x) = nan if x < 0 /// - log2(nan) = nan -pub fn log2(x: var) @TypeOf(x) { +pub fn log2(x: anytype) @TypeOf(x) { const T = @TypeOf(x); switch (@typeInfo(T)) { .ComptimeFloat => { diff --git a/lib/std/math/modf.zig b/lib/std/math/modf.zig index 6fd89e3dda..5ab5318a79 100644 --- a/lib/std/math/modf.zig +++ b/lib/std/math/modf.zig @@ -24,7 +24,7 @@ pub const modf64_result = modf_result(f64); /// Special Cases: /// - modf(+-inf) = +-inf, nan /// - modf(nan) = nan, nan -pub fn modf(x: var) modf_result(@TypeOf(x)) { +pub fn modf(x: anytype) modf_result(@TypeOf(x)) { const T = @TypeOf(x); return switch (T) { f32 => modf32(x), diff --git a/lib/std/math/round.zig b/lib/std/math/round.zig index 052c0f7670..854adee4ba 100644 --- a/lib/std/math/round.zig +++ b/lib/std/math/round.zig @@ -15,7 +15,7 @@ const math = std.math; /// - round(+-0) = +-0 /// - round(+-inf) = +-inf /// - round(nan) = nan -pub fn round(x: var) @TypeOf(x) { +pub fn round(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => round32(x), diff --git a/lib/std/math/scalbn.zig b/lib/std/math/scalbn.zig index bab109f334..71a8110ce7 100644 --- a/lib/std/math/scalbn.zig +++ b/lib/std/math/scalbn.zig @@ -9,7 +9,7 @@ const math = std.math; const expect = std.testing.expect; /// Returns x * 2^n. -pub fn scalbn(x: var, n: i32) @TypeOf(x) { +pub fn scalbn(x: anytype, n: i32) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => scalbn32(x, n), diff --git a/lib/std/math/signbit.zig b/lib/std/math/signbit.zig index 9cb62b5042..49397f7bd4 100644 --- a/lib/std/math/signbit.zig +++ b/lib/std/math/signbit.zig @@ -3,7 +3,7 @@ const math = std.math; const expect = std.testing.expect; /// Returns whether x is negative or negative 0. -pub fn signbit(x: var) bool { +pub fn signbit(x: anytype) bool { const T = @TypeOf(x); return switch (T) { f16 => signbit16(x), diff --git a/lib/std/math/sin.zig b/lib/std/math/sin.zig index e88f5eeeaf..df3b294ca6 100644 --- a/lib/std/math/sin.zig +++ b/lib/std/math/sin.zig @@ -14,7 +14,7 @@ const expect = std.testing.expect; /// - sin(+-0) = +-0 /// - sin(+-inf) = nan /// - sin(nan) = nan -pub fn sin(x: var) @TypeOf(x) { +pub fn sin(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => sin_(T, x), diff --git a/lib/std/math/sinh.zig b/lib/std/math/sinh.zig index 0e2cb5a3d5..26e0e05f38 100644 --- a/lib/std/math/sinh.zig +++ b/lib/std/math/sinh.zig @@ -17,7 +17,7 @@ const maxInt = std.math.maxInt; /// - sinh(+-0) = +-0 /// - sinh(+-inf) = +-inf /// - sinh(nan) = nan -pub fn sinh(x: var) @TypeOf(x) { +pub fn sinh(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => sinh32(x), diff --git a/lib/std/math/sqrt.zig b/lib/std/math/sqrt.zig index 097b0152f7..2f0d251432 100644 --- a/lib/std/math/sqrt.zig +++ b/lib/std/math/sqrt.zig @@ -13,7 +13,7 @@ const maxInt = std.math.maxInt; /// - sqrt(x) = nan if x < 0 /// - sqrt(nan) = nan /// TODO Decide if all this logic should be implemented directly in the @sqrt bultin function. -pub fn sqrt(x: var) Sqrt(@TypeOf(x)) { +pub fn sqrt(x: anytype) Sqrt(@TypeOf(x)) { const T = @TypeOf(x); switch (@typeInfo(T)) { .Float, .ComptimeFloat => return @sqrt(x), diff --git a/lib/std/math/tan.zig b/lib/std/math/tan.zig index 86f473f448..2cd5a407df 100644 --- a/lib/std/math/tan.zig +++ b/lib/std/math/tan.zig @@ -14,7 +14,7 @@ const expect = std.testing.expect; /// - tan(+-0) = +-0 /// - tan(+-inf) = nan /// - tan(nan) = nan -pub fn tan(x: var) @TypeOf(x) { +pub fn tan(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => tan_(f32, x), diff --git a/lib/std/math/tanh.zig b/lib/std/math/tanh.zig index 1cad399729..7697db5271 100644 --- a/lib/std/math/tanh.zig +++ b/lib/std/math/tanh.zig @@ -17,7 +17,7 @@ const maxInt = std.math.maxInt; /// - sinh(+-0) = +-0 /// - sinh(+-inf) = +-1 /// - sinh(nan) = nan -pub fn tanh(x: var) @TypeOf(x) { +pub fn tanh(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => tanh32(x), diff --git a/lib/std/math/trunc.zig b/lib/std/math/trunc.zig index cdd2fa3c6b..df24b77111 100644 --- a/lib/std/math/trunc.zig +++ b/lib/std/math/trunc.zig @@ -15,7 +15,7 @@ const maxInt = std.math.maxInt; /// - trunc(+-0) = +-0 /// - trunc(+-inf) = +-inf /// - trunc(nan) = nan -pub fn trunc(x: var) @TypeOf(x) { +pub fn trunc(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => trunc32(x), diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 08ecc5167f..82831787a7 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -122,7 +122,7 @@ pub const Allocator = struct { assert(resized_len >= new_byte_count); @memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count); return old_mem.ptr[0..resized_len]; - } else |_| { } + } else |_| {} } if (new_byte_count <= old_mem.len and new_alignment <= old_alignment) { return error.OutOfMemory; @@ -156,7 +156,7 @@ pub const Allocator = struct { /// `ptr` should be the return value of `create`, or otherwise /// have the same address and alignment property. - pub fn destroy(self: *Allocator, ptr: var) void { + pub fn destroy(self: *Allocator, ptr: anytype) void { const T = @TypeOf(ptr).Child; if (@sizeOf(T) == 0) return; const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr)); @@ -225,7 +225,7 @@ pub const Allocator = struct { return self.allocAdvanced(T, alignment, n, .exact); } - const Exact = enum {exact,at_least}; + const Exact = enum { exact, at_least }; pub fn allocAdvanced( self: *Allocator, comptime T: type, @@ -272,7 +272,7 @@ pub const Allocator = struct { /// in `std.ArrayList.shrink`. /// If you need guaranteed success, call `shrink`. /// If `new_n` is 0, this is the same as `free` and it always succeeds. - pub fn realloc(self: *Allocator, old_mem: var, new_n: usize) t: { + pub fn realloc(self: *Allocator, old_mem: anytype, new_n: usize) t: { const Slice = @typeInfo(@TypeOf(old_mem)).Pointer; break :t Error![]align(Slice.alignment) Slice.child; } { @@ -280,7 +280,7 @@ pub const Allocator = struct { return self.reallocAdvanced(old_mem, old_alignment, new_n, .exact); } - pub fn reallocAtLeast(self: *Allocator, old_mem: var, new_n: usize) t: { + pub fn reallocAtLeast(self: *Allocator, old_mem: anytype, new_n: usize) t: { const Slice = @typeInfo(@TypeOf(old_mem)).Pointer; break :t Error![]align(Slice.alignment) Slice.child; } { @@ -291,7 +291,7 @@ pub const Allocator = struct { // Deprecated: use `reallocAdvanced` pub fn alignedRealloc( self: *Allocator, - old_mem: var, + old_mem: anytype, comptime new_alignment: u29, new_n: usize, ) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child { @@ -303,7 +303,7 @@ pub const Allocator = struct { /// allocation. pub fn reallocAdvanced( self: *Allocator, - old_mem: var, + old_mem: anytype, comptime new_alignment: u29, new_n: usize, exact: Exact, @@ -321,8 +321,7 @@ pub const Allocator = struct { const old_byte_slice = mem.sliceAsBytes(old_mem); const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory; // Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure - const new_byte_slice = try self.reallocBytes(old_byte_slice, Slice.alignment, byte_count, new_alignment, - if (exact == .exact) @as(u29, 0) else @sizeOf(T)); + const new_byte_slice = try self.reallocBytes(old_byte_slice, Slice.alignment, byte_count, new_alignment, if (exact == .exact) @as(u29, 0) else @sizeOf(T)); return mem.bytesAsSlice(T, @alignCast(new_alignment, new_byte_slice)); } @@ -331,7 +330,7 @@ pub const Allocator = struct { /// Shrink always succeeds, and `new_n` must be <= `old_mem.len`. /// Returned slice has same alignment as old_mem. /// Shrinking to 0 is the same as calling `free`. - pub fn shrink(self: *Allocator, old_mem: var, new_n: usize) t: { + pub fn shrink(self: *Allocator, old_mem: anytype, new_n: usize) t: { const Slice = @typeInfo(@TypeOf(old_mem)).Pointer; break :t []align(Slice.alignment) Slice.child; } { @@ -344,7 +343,7 @@ pub const Allocator = struct { /// allocation. pub fn alignedShrink( self: *Allocator, - old_mem: var, + old_mem: anytype, comptime new_alignment: u29, new_n: usize, ) []align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child { @@ -368,7 +367,7 @@ pub const Allocator = struct { /// Free an array allocated with `alloc`. To free a single item, /// see `destroy`. - pub fn free(self: *Allocator, memory: var) void { + pub fn free(self: *Allocator, memory: anytype) void { const Slice = @typeInfo(@TypeOf(memory)).Pointer; const bytes = mem.sliceAsBytes(memory); const bytes_len = bytes.len + if (Slice.sentinel != null) @sizeOf(Slice.child) else 0; @@ -396,67 +395,69 @@ pub const Allocator = struct { /// Detects and asserts if the std.mem.Allocator interface is violated by the caller /// or the allocator. -pub fn ValidationAllocator(comptime T: type) type { return struct { - const Self = @This(); - allocator: Allocator, - underlying_allocator: T, - pub fn init(allocator: T) @This() { - return .{ - .allocator = .{ - .allocFn = alloc, - .resizeFn = resize, - }, - .underlying_allocator = allocator, - }; - } - fn getUnderlyingAllocatorPtr(self: *@This()) *Allocator { - if (T == *Allocator) return self.underlying_allocator; - if (*T == *Allocator) return &self.underlying_allocator; - return &self.underlying_allocator.allocator; - } - pub fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) Allocator.Error![]u8 { - assert(n > 0); - assert(mem.isValidAlign(ptr_align)); - if (len_align != 0) { - assert(mem.isAlignedAnyAlign(n, len_align)); - assert(n >= len_align); - } - - const self = @fieldParentPtr(@This(), "allocator", allocator); - const result = try self.getUnderlyingAllocatorPtr().callAllocFn(n, ptr_align, len_align); - assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align)); - if (len_align == 0) { - assert(result.len == n); - } else { - assert(result.len >= n); - assert(mem.isAlignedAnyAlign(result.len, len_align)); +pub fn ValidationAllocator(comptime T: type) type { + return struct { + const Self = @This(); + allocator: Allocator, + underlying_allocator: T, + pub fn init(allocator: T) @This() { + return .{ + .allocator = .{ + .allocFn = alloc, + .resizeFn = resize, + }, + .underlying_allocator = allocator, + }; } - return result; - } - pub fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) Allocator.Error!usize { - assert(buf.len > 0); - if (len_align != 0) { - assert(mem.isAlignedAnyAlign(new_len, len_align)); - assert(new_len >= len_align); + fn getUnderlyingAllocatorPtr(self: *@This()) *Allocator { + if (T == *Allocator) return self.underlying_allocator; + if (*T == *Allocator) return &self.underlying_allocator; + return &self.underlying_allocator.allocator; } - const self = @fieldParentPtr(@This(), "allocator", allocator); - const result = try self.getUnderlyingAllocatorPtr().callResizeFn(buf, new_len, len_align); - if (len_align == 0) { - assert(result == new_len); - } else { - assert(result >= new_len); - assert(mem.isAlignedAnyAlign(result, len_align)); + pub fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) Allocator.Error![]u8 { + assert(n > 0); + assert(mem.isValidAlign(ptr_align)); + if (len_align != 0) { + assert(mem.isAlignedAnyAlign(n, len_align)); + assert(n >= len_align); + } + + const self = @fieldParentPtr(@This(), "allocator", allocator); + const result = try self.getUnderlyingAllocatorPtr().callAllocFn(n, ptr_align, len_align); + assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align)); + if (len_align == 0) { + assert(result.len == n); + } else { + assert(result.len >= n); + assert(mem.isAlignedAnyAlign(result.len, len_align)); + } + return result; } - return result; - } - pub usingnamespace if (T == *Allocator or !@hasDecl(T, "reset")) struct {} else struct { - pub fn reset(self: *Self) void { - self.underlying_allocator.reset(); + pub fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) Allocator.Error!usize { + assert(buf.len > 0); + if (len_align != 0) { + assert(mem.isAlignedAnyAlign(new_len, len_align)); + assert(new_len >= len_align); + } + const self = @fieldParentPtr(@This(), "allocator", allocator); + const result = try self.getUnderlyingAllocatorPtr().callResizeFn(buf, new_len, len_align); + if (len_align == 0) { + assert(result == new_len); + } else { + assert(result >= new_len); + assert(mem.isAlignedAnyAlign(result, len_align)); + } + return result; } + pub usingnamespace if (T == *Allocator or !@hasDecl(T, "reset")) struct {} else struct { + pub fn reset(self: *Self) void { + self.underlying_allocator.reset(); + } + }; }; -};} +} -pub fn validationWrap(allocator: var) ValidationAllocator(@TypeOf(allocator)) { +pub fn validationWrap(allocator: anytype) ValidationAllocator(@TypeOf(allocator)) { return ValidationAllocator(@TypeOf(allocator)).init(allocator); } @@ -465,14 +466,14 @@ pub fn validationWrap(allocator: var) ValidationAllocator(@TypeOf(allocator)) { /// than the `len` that was requsted. This function should only be used by allocators /// that are unaffected by `len_align`. pub fn alignAllocLen(full_len: usize, alloc_len: usize, len_align: u29) usize { - assert(alloc_len > 0); - assert(alloc_len >= len_align); - assert(full_len >= alloc_len); - if (len_align == 0) - return alloc_len; - const adjusted = alignBackwardAnyAlign(full_len, len_align); - assert(adjusted >= alloc_len); - return adjusted; + assert(alloc_len > 0); + assert(alloc_len >= len_align); + assert(full_len >= alloc_len); + if (len_align == 0) + return alloc_len; + const adjusted = alignBackwardAnyAlign(full_len, len_align); + assert(adjusted >= alloc_len); + return adjusted; } var failAllocator = Allocator{ @@ -695,7 +696,7 @@ test "mem.secureZero" { /// Initializes all fields of the struct with their default value, or zero values if no default value is present. /// If the field is present in the provided initial values, it will have that value instead. /// Structs are initialized recursively. -pub fn zeroInit(comptime T: type, init: var) T { +pub fn zeroInit(comptime T: type, init: anytype) T { comptime const Init = @TypeOf(init); switch (@typeInfo(T)) { @@ -895,7 +896,7 @@ test "Span" { /// /// When there is both a sentinel and an array length or slice length, the /// length value is used instead of the sentinel. -pub fn span(ptr: var) Span(@TypeOf(ptr)) { +pub fn span(ptr: anytype) Span(@TypeOf(ptr)) { if (@typeInfo(@TypeOf(ptr)) == .Optional) { if (ptr) |non_null| { return span(non_null); @@ -923,7 +924,7 @@ test "span" { /// Same as `span`, except when there is both a sentinel and an array /// length or slice length, scans the memory for the sentinel value /// rather than using the length. -pub fn spanZ(ptr: var) Span(@TypeOf(ptr)) { +pub fn spanZ(ptr: anytype) Span(@TypeOf(ptr)) { if (@typeInfo(@TypeOf(ptr)) == .Optional) { if (ptr) |non_null| { return spanZ(non_null); @@ -952,7 +953,7 @@ test "spanZ" { /// or a slice, and returns the length. /// In the case of a sentinel-terminated array, it uses the array length. /// For C pointers it assumes it is a pointer-to-many with a 0 sentinel. -pub fn len(value: var) usize { +pub fn len(value: anytype) usize { return switch (@typeInfo(@TypeOf(value))) { .Array => |info| info.len, .Vector => |info| info.len, @@ -1000,7 +1001,7 @@ test "len" { /// In the case of a sentinel-terminated array, it scans the array /// for a sentinel and uses that for the length, rather than using the array length. /// For C pointers it assumes it is a pointer-to-many with a 0 sentinel. -pub fn lenZ(ptr: var) usize { +pub fn lenZ(ptr: anytype) usize { return switch (@typeInfo(@TypeOf(ptr))) { .Array => |info| if (info.sentinel) |sentinel| indexOfSentinel(info.child, sentinel, &ptr) @@ -2031,7 +2032,7 @@ fn AsBytesReturnType(comptime P: type) type { } /// Given a pointer to a single item, returns a slice of the underlying bytes, preserving constness. -pub fn asBytes(ptr: var) AsBytesReturnType(@TypeOf(ptr)) { +pub fn asBytes(ptr: anytype) AsBytesReturnType(@TypeOf(ptr)) { const P = @TypeOf(ptr); return @ptrCast(AsBytesReturnType(P), ptr); } @@ -2071,7 +2072,7 @@ test "asBytes" { } ///Given any value, returns a copy of its bytes in an array. -pub fn toBytes(value: var) [@sizeOf(@TypeOf(value))]u8 { +pub fn toBytes(value: anytype) [@sizeOf(@TypeOf(value))]u8 { return asBytes(&value).*; } @@ -2106,7 +2107,7 @@ fn BytesAsValueReturnType(comptime T: type, comptime B: type) type { ///Given a pointer to an array of bytes, returns a pointer to a value of the specified type /// backed by those bytes, preserving constness. -pub fn bytesAsValue(comptime T: type, bytes: var) BytesAsValueReturnType(T, @TypeOf(bytes)) { +pub fn bytesAsValue(comptime T: type, bytes: anytype) BytesAsValueReturnType(T, @TypeOf(bytes)) { return @ptrCast(BytesAsValueReturnType(T, @TypeOf(bytes)), bytes); } @@ -2149,7 +2150,7 @@ test "bytesAsValue" { ///Given a pointer to an array of bytes, returns a value of the specified type backed by a /// copy of those bytes. -pub fn bytesToValue(comptime T: type, bytes: var) T { +pub fn bytesToValue(comptime T: type, bytes: anytype) T { return bytesAsValue(T, bytes).*; } test "bytesToValue" { @@ -2177,7 +2178,7 @@ fn BytesAsSliceReturnType(comptime T: type, comptime bytesType: type) type { return if (trait.isConstPtr(bytesType)) []align(alignment) const T else []align(alignment) T; } -pub fn bytesAsSlice(comptime T: type, bytes: var) BytesAsSliceReturnType(T, @TypeOf(bytes)) { +pub fn bytesAsSlice(comptime T: type, bytes: anytype) BytesAsSliceReturnType(T, @TypeOf(bytes)) { // let's not give an undefined pointer to @ptrCast // it may be equal to zero and fail a null check if (bytes.len == 0) { @@ -2256,7 +2257,7 @@ fn SliceAsBytesReturnType(comptime sliceType: type) type { return if (trait.isConstPtr(sliceType)) []align(alignment) const u8 else []align(alignment) u8; } -pub fn sliceAsBytes(slice: var) SliceAsBytesReturnType(@TypeOf(slice)) { +pub fn sliceAsBytes(slice: anytype) SliceAsBytesReturnType(@TypeOf(slice)) { const Slice = @TypeOf(slice); // let's not give an undefined pointer to @ptrCast diff --git a/lib/std/meta.zig b/lib/std/meta.zig index 6c10941aa7..2827cfecf4 100644 --- a/lib/std/meta.zig +++ b/lib/std/meta.zig @@ -9,7 +9,7 @@ pub const trait = @import("meta/trait.zig"); const TypeInfo = builtin.TypeInfo; -pub fn tagName(v: var) []const u8 { +pub fn tagName(v: anytype) []const u8 { const T = @TypeOf(v); switch (@typeInfo(T)) { .ErrorSet => return @errorName(v), @@ -430,7 +430,7 @@ test "std.meta.TagType" { } ///Returns the active tag of a tagged union -pub fn activeTag(u: var) @TagType(@TypeOf(u)) { +pub fn activeTag(u: anytype) @TagType(@TypeOf(u)) { const T = @TypeOf(u); return @as(@TagType(T), u); } @@ -480,7 +480,7 @@ test "std.meta.TagPayloadType" { /// Compares two of any type for equality. Containers are compared on a field-by-field basis, /// where possible. Pointers are not followed. -pub fn eql(a: var, b: @TypeOf(a)) bool { +pub fn eql(a: anytype, b: @TypeOf(a)) bool { const T = @TypeOf(a); switch (@typeInfo(T)) { @@ -627,7 +627,7 @@ test "intToEnum with error return" { pub const IntToEnumError = error{InvalidEnumTag}; -pub fn intToEnum(comptime Tag: type, tag_int: var) IntToEnumError!Tag { +pub fn intToEnum(comptime Tag: type, tag_int: anytype) IntToEnumError!Tag { inline for (@typeInfo(Tag).Enum.fields) |f| { const this_tag_value = @field(Tag, f.name); if (tag_int == @enumToInt(this_tag_value)) { @@ -696,7 +696,7 @@ pub fn Vector(comptime len: u32, comptime child: type) type { /// Given a type and value, cast the value to the type as c would. /// This is for translate-c and is not intended for general use. -pub fn cast(comptime DestType: type, target: var) DestType { +pub fn cast(comptime DestType: type, target: anytype) DestType { const TargetType = @TypeOf(target); switch (@typeInfo(DestType)) { .Pointer => { diff --git a/lib/std/meta/trait.zig b/lib/std/meta/trait.zig index 11aa8457ee..5cea0ecb9a 100644 --- a/lib/std/meta/trait.zig +++ b/lib/std/meta/trait.zig @@ -9,7 +9,7 @@ const meta = @import("../meta.zig"); pub const TraitFn = fn (type) bool; -pub fn multiTrait(comptime traits: var) TraitFn { +pub fn multiTrait(comptime traits: anytype) TraitFn { const Closure = struct { pub fn trait(comptime T: type) bool { inline for (traits) |t| @@ -342,7 +342,7 @@ test "std.meta.trait.isContainer" { testing.expect(!isContainer(u8)); } -pub fn hasDecls(comptime T: type, comptime names: var) bool { +pub fn hasDecls(comptime T: type, comptime names: anytype) bool { inline for (names) |name| { if (!@hasDecl(T, name)) return false; @@ -368,7 +368,7 @@ test "std.meta.trait.hasDecls" { testing.expect(!hasDecls(TestStruct2, tuple)); } -pub fn hasFields(comptime T: type, comptime names: var) bool { +pub fn hasFields(comptime T: type, comptime names: anytype) bool { inline for (names) |name| { if (!@hasField(T, name)) return false; @@ -394,7 +394,7 @@ test "std.meta.trait.hasFields" { testing.expect(!hasFields(TestStruct2, .{ "a", "b", "useless" })); } -pub fn hasFunctions(comptime T: type, comptime names: var) bool { +pub fn hasFunctions(comptime T: type, comptime names: anytype) bool { inline for (names) |name| { if (!hasFn(name)(T)) return false; diff --git a/lib/std/net.zig b/lib/std/net.zig index a9dcf53f92..71bab383fa 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -427,7 +427,7 @@ pub const Address = extern union { self: Address, comptime fmt: []const u8, options: std.fmt.FormatOptions, - out_stream: var, + out_stream: anytype, ) !void { switch (self.any.family) { os.AF_INET => { @@ -1404,8 +1404,8 @@ fn resMSendRc( fn dnsParse( r: []const u8, - ctx: var, - comptime callback: var, + ctx: anytype, + comptime callback: anytype, ) !void { // This implementation is ported from musl libc. // A more idiomatic "ziggy" implementation would be welcome. diff --git a/lib/std/os.zig b/lib/std/os.zig index 1e1049ae51..dfb47208ca 100644 --- a/lib/std/os.zig +++ b/lib/std/os.zig @@ -4068,7 +4068,7 @@ pub fn nanosleep(seconds: u64, nanoseconds: u64) void { } pub fn dl_iterate_phdr( - context: var, + context: anytype, comptime Error: type, comptime callback: fn (info: *dl_phdr_info, size: usize, context: @TypeOf(context)) Error!void, ) Error!void { diff --git a/lib/std/os/uefi.zig b/lib/std/os/uefi.zig index 1d2f88794d..c037075cd8 100644 --- a/lib/std/os/uefi.zig +++ b/lib/std/os/uefi.zig @@ -28,7 +28,7 @@ pub const Guid = extern struct { self: @This(), comptime f: []const u8, options: std.fmt.FormatOptions, - out_stream: var, + out_stream: anytype, ) Errors!void { if (f.len == 0) { return std.fmt.format(out_stream, "{x:0>8}-{x:0>4}-{x:0>4}-{x:0>2}{x:0>2}-{x:0>12}", .{ diff --git a/lib/std/progress.zig b/lib/std/progress.zig index d80f8c4423..b81e81aa2c 100644 --- a/lib/std/progress.zig +++ b/lib/std/progress.zig @@ -224,7 +224,7 @@ pub const Progress = struct { self.prev_refresh_timestamp = self.timer.read(); } - pub fn log(self: *Progress, comptime format: []const u8, args: var) void { + pub fn log(self: *Progress, comptime format: []const u8, args: anytype) void { const file = self.terminal orelse return; self.refresh(); file.outStream().print(format, args) catch { @@ -234,7 +234,7 @@ pub const Progress = struct { self.columns_written = 0; } - fn bufWrite(self: *Progress, end: *usize, comptime format: []const u8, args: var) void { + fn bufWrite(self: *Progress, end: *usize, comptime format: []const u8, args: anytype) void { if (std.fmt.bufPrint(self.output_buffer[end.*..], format, args)) |written| { const amt = written.len; end.* += amt; diff --git a/lib/std/segmented_list.zig b/lib/std/segmented_list.zig index d087e480f6..d39fe3e239 100644 --- a/lib/std/segmented_list.zig +++ b/lib/std/segmented_list.zig @@ -122,7 +122,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type self.* = undefined; } - pub fn at(self: var, i: usize) AtType(@TypeOf(self)) { + pub fn at(self: anytype, i: usize) AtType(@TypeOf(self)) { assert(i < self.len); return self.uncheckedAt(i); } @@ -241,7 +241,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type } } - pub fn uncheckedAt(self: var, index: usize) AtType(@TypeOf(self)) { + pub fn uncheckedAt(self: anytype, index: usize) AtType(@TypeOf(self)) { if (index < prealloc_item_count) { return &self.prealloc_segment[index]; } diff --git a/lib/std/sort.zig b/lib/std/sort.zig index cb6162e9b0..464054e4a5 100644 --- a/lib/std/sort.zig +++ b/lib/std/sort.zig @@ -9,7 +9,7 @@ pub fn binarySearch( comptime T: type, key: T, items: []const T, - context: var, + context: anytype, comptime compareFn: fn (context: @TypeOf(context), lhs: T, rhs: T) math.Order, ) ?usize { var left: usize = 0; @@ -76,7 +76,7 @@ test "binarySearch" { pub fn insertionSort( comptime T: type, items: []T, - context: var, + context: anytype, comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool, ) void { var i: usize = 1; @@ -182,7 +182,7 @@ const Pull = struct { pub fn sort( comptime T: type, items: []T, - context: var, + context: anytype, comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool, ) void { // Implementation ported from https://github.com/BonzaiThePenguin/WikiSort/blob/master/WikiSort.c @@ -813,7 +813,7 @@ fn mergeInPlace( items: []T, A_arg: Range, B_arg: Range, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, ) void { if (A_arg.length() == 0 or B_arg.length() == 0) return; @@ -862,7 +862,7 @@ fn mergeInternal( items: []T, A: Range, B: Range, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, buffer: Range, ) void { @@ -906,7 +906,7 @@ fn findFirstForward( items: []T, value: T, range: Range, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, unique: usize, ) usize { @@ -928,7 +928,7 @@ fn findFirstBackward( items: []T, value: T, range: Range, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, unique: usize, ) usize { @@ -950,7 +950,7 @@ fn findLastForward( items: []T, value: T, range: Range, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, unique: usize, ) usize { @@ -972,7 +972,7 @@ fn findLastBackward( items: []T, value: T, range: Range, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, unique: usize, ) usize { @@ -994,7 +994,7 @@ fn binaryFirst( items: []T, value: T, range: Range, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, ) usize { var curr = range.start; @@ -1017,7 +1017,7 @@ fn binaryLast( items: []T, value: T, range: Range, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, ) usize { var curr = range.start; @@ -1040,7 +1040,7 @@ fn mergeInto( from: []T, A: Range, B: Range, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, into: []T, ) void { @@ -1078,7 +1078,7 @@ fn mergeExternal( items: []T, A: Range, B: Range, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, cache: []T, ) void { @@ -1112,7 +1112,7 @@ fn mergeExternal( fn swap( comptime T: type, items: []T, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, order: *[8]u8, x: usize, @@ -1358,7 +1358,7 @@ fn fuzzTest(rng: *std.rand.Random) !void { pub fn argMin( comptime T: type, items: []const T, - context: var, + context: anytype, comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, ) ?usize { if (items.len == 0) { @@ -1390,7 +1390,7 @@ test "argMin" { pub fn min( comptime T: type, items: []const T, - context: var, + context: anytype, comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool, ) ?T { const i = argMin(T, items, context, lessThan) orelse return null; @@ -1410,7 +1410,7 @@ test "min" { pub fn argMax( comptime T: type, items: []const T, - context: var, + context: anytype, comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool, ) ?usize { if (items.len == 0) { @@ -1442,7 +1442,7 @@ test "argMax" { pub fn max( comptime T: type, items: []const T, - context: var, + context: anytype, comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool, ) ?T { const i = argMax(T, items, context, lessThan) orelse return null; @@ -1462,7 +1462,7 @@ test "max" { pub fn isSorted( comptime T: type, items: []const T, - context: var, + context: anytype, comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool, ) bool { var i: usize = 1; diff --git a/lib/std/special/build_runner.zig b/lib/std/special/build_runner.zig index 1c88f98e6e..83c181e841 100644 --- a/lib/std/special/build_runner.zig +++ b/lib/std/special/build_runner.zig @@ -135,7 +135,7 @@ fn runBuild(builder: *Builder) anyerror!void { } } -fn usage(builder: *Builder, already_ran_build: bool, out_stream: var) !void { +fn usage(builder: *Builder, already_ran_build: bool, out_stream: anytype) !void { // run the build script to collect the options if (!already_ran_build) { builder.setInstallPrefix(null); @@ -202,7 +202,7 @@ fn usage(builder: *Builder, already_ran_build: bool, out_stream: var) !void { ); } -fn usageAndErr(builder: *Builder, already_ran_build: bool, out_stream: var) void { +fn usageAndErr(builder: *Builder, already_ran_build: bool, out_stream: anytype) void { usage(builder, already_ran_build, out_stream) catch {}; process.exit(1); } diff --git a/lib/std/special/test_runner.zig b/lib/std/special/test_runner.zig index fd8c068f05..301457dde0 100644 --- a/lib/std/special/test_runner.zig +++ b/lib/std/special/test_runner.zig @@ -79,9 +79,9 @@ pub fn log( comptime message_level: std.log.Level, comptime scope: @Type(.EnumLiteral), comptime format: []const u8, - args: var, + args: anytype, ) void { if (@enumToInt(message_level) <= @enumToInt(std.testing.log_level)) { - std.debug.print("[{}] ({}): " ++ format, .{@tagName(scope), @tagName(message_level)} ++ args); + std.debug.print("[{}] ({}): " ++ format, .{ @tagName(scope), @tagName(message_level) } ++ args); } } diff --git a/lib/std/target.zig b/lib/std/target.zig index 110b7a088f..0b95e8f75a 100644 --- a/lib/std/target.zig +++ b/lib/std/target.zig @@ -108,23 +108,16 @@ pub const Target = struct { self: WindowsVersion, comptime fmt: []const u8, options: std.fmt.FormatOptions, - out_stream: var, + out_stream: anytype, ) !void { - if (fmt.len > 0 and fmt[0] == 's') { - if ( - @enumToInt(self) >= @enumToInt(WindowsVersion.nt4) and @enumToInt(self) <= @enumToInt(WindowsVersion.win10_19h1) - ) { + if (fmt.len > 0 and fmt[0] == 's') { + if (@enumToInt(self) >= @enumToInt(WindowsVersion.nt4) and @enumToInt(self) <= @enumToInt(WindowsVersion.win10_19h1)) { try std.fmt.format(out_stream, ".{}", .{@tagName(self)}); } else { - try std.fmt.format(out_stream, - "@intToEnum(Target.Os.WindowsVersion, {})", - .{ @enumToInt(self) } - ); + try std.fmt.format(out_stream, "@intToEnum(Target.Os.WindowsVersion, {})", .{@enumToInt(self)}); } } else { - if ( - @enumToInt(self) >= @enumToInt(WindowsVersion.nt4) and @enumToInt(self) <= @enumToInt(WindowsVersion.win10_19h1) - ) { + if (@enumToInt(self) >= @enumToInt(WindowsVersion.nt4) and @enumToInt(self) <= @enumToInt(WindowsVersion.win10_19h1)) { try std.fmt.format(out_stream, "WindowsVersion.{}", .{@tagName(self)}); } else { try std.fmt.format(out_stream, "WindowsVersion(", .{@typeName(@This())}); @@ -1189,7 +1182,7 @@ pub const Target = struct { pub fn standardDynamicLinkerPath(self: Target) DynamicLinker { var result: DynamicLinker = .{}; const S = struct { - fn print(r: *DynamicLinker, comptime fmt: []const u8, args: var) DynamicLinker { + fn print(r: *DynamicLinker, comptime fmt: []const u8, args: anytype) DynamicLinker { r.max_byte = @intCast(u8, (std.fmt.bufPrint(&r.buffer, fmt, args) catch unreachable).len - 1); return r.*; } diff --git a/lib/std/testing.zig b/lib/std/testing.zig index bdaf759d62..44c221d76a 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -19,7 +19,7 @@ pub var log_level = std.log.Level.warn; /// This function is intended to be used only in tests. It prints diagnostics to stderr /// and then aborts when actual_error_union is not expected_error. -pub fn expectError(expected_error: anyerror, actual_error_union: var) void { +pub fn expectError(expected_error: anyerror, actual_error_union: anytype) void { if (actual_error_union) |actual_payload| { std.debug.panic("expected error.{}, found {}", .{ @errorName(expected_error), actual_payload }); } else |actual_error| { @@ -36,7 +36,7 @@ pub fn expectError(expected_error: anyerror, actual_error_union: var) void { /// equal, prints diagnostics to stderr to show exactly how they are not equal, /// then aborts. /// The types must match exactly. -pub fn expectEqual(expected: var, actual: @TypeOf(expected)) void { +pub fn expectEqual(expected: anytype, actual: @TypeOf(expected)) void { switch (@typeInfo(@TypeOf(actual))) { .NoReturn, .BoundFn, diff --git a/lib/std/thread.zig b/lib/std/thread.zig index d07c41c5b0..3d20f54558 100644 --- a/lib/std/thread.zig +++ b/lib/std/thread.zig @@ -143,7 +143,7 @@ pub const Thread = struct { /// fn startFn(@TypeOf(context)) T /// where T is u8, noreturn, void, or !void /// caller must call wait on the returned thread - pub fn spawn(context: var, comptime startFn: var) SpawnError!*Thread { + pub fn spawn(context: anytype, comptime startFn: anytype) SpawnError!*Thread { if (builtin.single_threaded) @compileError("cannot spawn thread when building in single-threaded mode"); // TODO compile-time call graph analysis to determine stack upper bound // https://github.com/ziglang/zig/issues/157 diff --git a/lib/std/zig/ast.zig b/lib/std/zig/ast.zig index e95a8855af..03153f541a 100644 --- a/lib/std/zig/ast.zig +++ b/lib/std/zig/ast.zig @@ -29,7 +29,7 @@ pub const Tree = struct { self.arena.promote(self.gpa).deinit(); } - pub fn renderError(self: *Tree, parse_error: *const Error, stream: var) !void { + pub fn renderError(self: *Tree, parse_error: *const Error, stream: anytype) !void { return parse_error.render(self.token_ids, stream); } @@ -167,7 +167,7 @@ pub const Error = union(enum) { DeclBetweenFields: DeclBetweenFields, InvalidAnd: InvalidAnd, - pub fn render(self: *const Error, tokens: []const Token.Id, stream: var) !void { + pub fn render(self: *const Error, tokens: []const Token.Id, stream: anytype) !void { switch (self.*) { .InvalidToken => |*x| return x.render(tokens, stream), .ExpectedContainerMembers => |*x| return x.render(tokens, stream), @@ -322,7 +322,7 @@ pub const Error = union(enum) { pub const ExpectedCall = struct { node: *Node, - pub fn render(self: *const ExpectedCall, tokens: []const Token.Id, stream: var) !void { + pub fn render(self: *const ExpectedCall, tokens: []const Token.Id, stream: anytype) !void { return stream.print("expected " ++ @tagName(Node.Id.Call) ++ ", found {}", .{ @tagName(self.node.id), }); @@ -332,7 +332,7 @@ pub const Error = union(enum) { pub const ExpectedCallOrFnProto = struct { node: *Node, - pub fn render(self: *const ExpectedCallOrFnProto, tokens: []const Token.Id, stream: var) !void { + pub fn render(self: *const ExpectedCallOrFnProto, tokens: []const Token.Id, stream: anytype) !void { return stream.print("expected " ++ @tagName(Node.Id.Call) ++ " or " ++ @tagName(Node.Id.FnProto) ++ ", found {}", .{@tagName(self.node.id)}); } @@ -342,7 +342,7 @@ pub const Error = union(enum) { token: TokenIndex, expected_id: Token.Id, - pub fn render(self: *const ExpectedToken, tokens: []const Token.Id, stream: var) !void { + pub fn render(self: *const ExpectedToken, tokens: []const Token.Id, stream: anytype) !void { const found_token = tokens[self.token]; switch (found_token) { .Invalid => { @@ -360,7 +360,7 @@ pub const Error = union(enum) { token: TokenIndex, end_id: Token.Id, - pub fn render(self: *const ExpectedCommaOrEnd, tokens: []const Token.Id, stream: var) !void { + pub fn render(self: *const ExpectedCommaOrEnd, tokens: []const Token.Id, stream: anytype) !void { const actual_token = tokens[self.token]; return stream.print("expected ',' or '{}', found '{}'", .{ self.end_id.symbol(), @@ -375,7 +375,7 @@ pub const Error = union(enum) { token: TokenIndex, - pub fn render(self: *const ThisError, tokens: []const Token.Id, stream: var) !void { + pub fn render(self: *const ThisError, tokens: []const Token.Id, stream: anytype) !void { const actual_token = tokens[self.token]; return stream.print(msg, .{actual_token.symbol()}); } @@ -388,7 +388,7 @@ pub const Error = union(enum) { token: TokenIndex, - pub fn render(self: *const ThisError, tokens: []const Token.Id, stream: var) !void { + pub fn render(self: *const ThisError, tokens: []const Token.Id, stream: anytype) !void { return stream.writeAll(msg); } }; diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig index 4f8eff3c68..29b60c0cee 100644 --- a/lib/std/zig/parse.zig +++ b/lib/std/zig/parse.zig @@ -2955,7 +2955,7 @@ const Parser = struct { const NodeParseFn = fn (p: *Parser) Error!?*Node; - fn ListParseFn(comptime E: type, comptime nodeParseFn: var) ParseFn([]E) { + fn ListParseFn(comptime E: type, comptime nodeParseFn: anytype) ParseFn([]E) { return struct { pub fn parse(p: *Parser) ![]E { var list = std.ArrayList(E).init(p.gpa); diff --git a/lib/std/zig/string_literal.zig b/lib/std/zig/string_literal.zig index cc6030ad15..f7ceee16ac 100644 --- a/lib/std/zig/string_literal.zig +++ b/lib/std/zig/string_literal.zig @@ -125,7 +125,7 @@ test "parse" { } /// Writes a Zig-syntax escaped string literal to the stream. Includes the double quotes. -pub fn render(utf8: []const u8, out_stream: var) !void { +pub fn render(utf8: []const u8, out_stream: anytype) !void { try out_stream.writeByte('"'); for (utf8) |byte| switch (byte) { '\n' => try out_stream.writeAll("\\n"), diff --git a/lib/std/zig/system.zig b/lib/std/zig/system.zig index 898d376cc7..af494efbab 100644 --- a/lib/std/zig/system.zig +++ b/lib/std/zig/system.zig @@ -130,7 +130,7 @@ pub const NativePaths = struct { return self.appendArray(&self.include_dirs, s); } - pub fn addIncludeDirFmt(self: *NativePaths, comptime fmt: []const u8, args: var) !void { + pub fn addIncludeDirFmt(self: *NativePaths, comptime fmt: []const u8, args: anytype) !void { const item = try std.fmt.allocPrint0(self.include_dirs.allocator, fmt, args); errdefer self.include_dirs.allocator.free(item); try self.include_dirs.append(item); @@ -140,7 +140,7 @@ pub const NativePaths = struct { return self.appendArray(&self.lib_dirs, s); } - pub fn addLibDirFmt(self: *NativePaths, comptime fmt: []const u8, args: var) !void { + pub fn addLibDirFmt(self: *NativePaths, comptime fmt: []const u8, args: anytype) !void { const item = try std.fmt.allocPrint0(self.lib_dirs.allocator, fmt, args); errdefer self.lib_dirs.allocator.free(item); try self.lib_dirs.append(item); @@ -150,7 +150,7 @@ pub const NativePaths = struct { return self.appendArray(&self.warnings, s); } - pub fn addWarningFmt(self: *NativePaths, comptime fmt: []const u8, args: var) !void { + pub fn addWarningFmt(self: *NativePaths, comptime fmt: []const u8, args: anytype) !void { const item = try std.fmt.allocPrint0(self.warnings.allocator, fmt, args); errdefer self.warnings.allocator.free(item); try self.warnings.append(item); @@ -887,7 +887,7 @@ pub const NativeTargetInfo = struct { abi: Target.Abi, }; - pub fn elfInt(is_64: bool, need_bswap: bool, int_32: var, int_64: var) @TypeOf(int_64) { + pub fn elfInt(is_64: bool, need_bswap: bool, int_32: anytype, int_64: anytype) @TypeOf(int_64) { if (is_64) { if (need_bswap) { return @byteSwap(@TypeOf(int_64), int_64); diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig index a6508943df..b0678ea665 100644 --- a/src-self-hosted/Module.zig +++ b/src-self-hosted/Module.zig @@ -3575,7 +3575,7 @@ fn coerceArrayPtrToSlice(self: *Module, scope: *Scope, dest_type: Type, inst: *I return self.fail(scope, inst.src, "TODO implement coerceArrayPtrToSlice runtime instruction", .{}); } -fn fail(self: *Module, scope: *Scope, src: usize, comptime format: []const u8, args: var) InnerError { +fn fail(self: *Module, scope: *Scope, src: usize, comptime format: []const u8, args: anytype) InnerError { @setCold(true); const err_msg = try ErrorMsg.create(self.gpa, src, format, args); return self.failWithOwnedErrorMsg(scope, src, err_msg); @@ -3586,7 +3586,7 @@ fn failTok( scope: *Scope, token_index: ast.TokenIndex, comptime format: []const u8, - args: var, + args: anytype, ) InnerError { @setCold(true); const src = scope.tree().token_locs[token_index].start; @@ -3598,7 +3598,7 @@ fn failNode( scope: *Scope, ast_node: *ast.Node, comptime format: []const u8, - args: var, + args: anytype, ) InnerError { @setCold(true); const src = scope.tree().token_locs[ast_node.firstToken()].start; @@ -3662,7 +3662,7 @@ pub const ErrorMsg = struct { byte_offset: usize, msg: []const u8, - pub fn create(gpa: *Allocator, byte_offset: usize, comptime format: []const u8, args: var) !*ErrorMsg { + pub fn create(gpa: *Allocator, byte_offset: usize, comptime format: []const u8, args: anytype) !*ErrorMsg { const self = try gpa.create(ErrorMsg); errdefer gpa.destroy(self); self.* = try init(gpa, byte_offset, format, args); @@ -3675,7 +3675,7 @@ pub const ErrorMsg = struct { gpa.destroy(self); } - pub fn init(gpa: *Allocator, byte_offset: usize, comptime format: []const u8, args: var) !ErrorMsg { + pub fn init(gpa: *Allocator, byte_offset: usize, comptime format: []const u8, args: anytype) !ErrorMsg { return ErrorMsg{ .byte_offset = byte_offset, .msg = try std.fmt.allocPrint(gpa, format, args), diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig index 7ed84d540c..ba91e0726f 100644 --- a/src-self-hosted/codegen.zig +++ b/src-self-hosted/codegen.zig @@ -230,7 +230,7 @@ pub fn generateSymbol( } } -const InnerError = error { +const InnerError = error{ OutOfMemory, CodegenFail, }; @@ -673,9 +673,9 @@ const Function = struct { try self.genX8664BinMathCode(inst.base.src, dst_mcv, src_mcv, 7, 0x38); const info = inst.args.lhs.ty.intInfo(self.target.*); if (info.signed) { - return MCValue{.compare_flags_signed = inst.args.op}; + return MCValue{ .compare_flags_signed = inst.args.op }; } else { - return MCValue{.compare_flags_unsigned = inst.args.op}; + return MCValue{ .compare_flags_unsigned = inst.args.op }; } }, else => return self.fail(inst.base.src, "TODO implement cmp for {}", .{self.target.cpu.arch}), @@ -721,7 +721,7 @@ const Function = struct { } fn genX86CondBr(self: *Function, inst: *ir.Inst.CondBr, opcode: u8, comptime arch: std.Target.Cpu.Arch) !MCValue { - self.code.appendSliceAssumeCapacity(&[_]u8{0x0f, opcode}); + self.code.appendSliceAssumeCapacity(&[_]u8{ 0x0f, opcode }); const reloc = Reloc{ .rel32 = self.code.items.len }; self.code.items.len += 4; try self.genBody(inst.args.true_body, arch); @@ -1081,10 +1081,12 @@ const Function = struct { switch (mcv) { .immediate => |imm| { // This immediate is unsigned. - const U = @Type(.{ .Int = .{ - .bits = ti.bits - @boolToInt(ti.is_signed), - .is_signed = false, - }}); + const U = @Type(.{ + .Int = .{ + .bits = ti.bits - @boolToInt(ti.is_signed), + .is_signed = false, + }, + }); if (imm >= std.math.maxInt(U)) { return self.copyToNewRegister(inst); } @@ -1094,7 +1096,6 @@ const Function = struct { return mcv; } - fn genTypedValue(self: *Function, src: usize, typed_value: TypedValue) !MCValue { const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); @@ -1121,7 +1122,7 @@ const Function = struct { } } - fn fail(self: *Function, src: usize, comptime format: []const u8, args: var) error{ CodegenFail, OutOfMemory } { + fn fail(self: *Function, src: usize, comptime format: []const u8, args: anytype) error{ CodegenFail, OutOfMemory } { @setCold(true); assert(self.err_msg == null); self.err_msg = try ErrorMsg.create(self.bin_file.allocator, src, format, args); diff --git a/src-self-hosted/dep_tokenizer.zig b/src-self-hosted/dep_tokenizer.zig index cad12834a7..20324cbf0c 100644 --- a/src-self-hosted/dep_tokenizer.zig +++ b/src-self-hosted/dep_tokenizer.zig @@ -299,12 +299,12 @@ pub const Tokenizer = struct { return null; } - fn errorf(self: *Tokenizer, comptime fmt: []const u8, args: var) Error { + fn errorf(self: *Tokenizer, comptime fmt: []const u8, args: anytype) Error { self.error_text = try std.fmt.allocPrintZ(&self.arena.allocator, fmt, args); return Error.InvalidInput; } - fn errorPosition(self: *Tokenizer, position: usize, bytes: []const u8, comptime fmt: []const u8, args: var) Error { + fn errorPosition(self: *Tokenizer, position: usize, bytes: []const u8, comptime fmt: []const u8, args: anytype) Error { var buffer = try std.ArrayListSentineled(u8, 0).initSize(&self.arena.allocator, 0); try buffer.outStream().print(fmt, args); try buffer.appendSlice(" '"); @@ -316,7 +316,7 @@ pub const Tokenizer = struct { return Error.InvalidInput; } - fn errorIllegalChar(self: *Tokenizer, position: usize, char: u8, comptime fmt: []const u8, args: var) Error { + fn errorIllegalChar(self: *Tokenizer, position: usize, char: u8, comptime fmt: []const u8, args: anytype) Error { var buffer = try std.ArrayListSentineled(u8, 0).initSize(&self.arena.allocator, 0); try buffer.appendSlice("illegal char "); try printUnderstandableChar(&buffer, char); @@ -883,7 +883,7 @@ fn depTokenizer(input: []const u8, expect: []const u8) !void { testing.expect(false); } -fn printSection(out: var, label: []const u8, bytes: []const u8) !void { +fn printSection(out: anytype, label: []const u8, bytes: []const u8) !void { try printLabel(out, label, bytes); try hexDump(out, bytes); try printRuler(out); @@ -891,7 +891,7 @@ fn printSection(out: var, label: []const u8, bytes: []const u8) !void { try out.write("\n"); } -fn printLabel(out: var, label: []const u8, bytes: []const u8) !void { +fn printLabel(out: anytype, label: []const u8, bytes: []const u8) !void { var buf: [80]u8 = undefined; var text = try std.fmt.bufPrint(buf[0..], "{} {} bytes ", .{ label, bytes.len }); try out.write(text); @@ -903,7 +903,7 @@ fn printLabel(out: var, label: []const u8, bytes: []const u8) !void { try out.write("\n"); } -fn printRuler(out: var) !void { +fn printRuler(out: anytype) !void { var i: usize = 0; const end = 79; while (i < 79) : (i += 1) { @@ -912,7 +912,7 @@ fn printRuler(out: var) !void { try out.write("\n"); } -fn hexDump(out: var, bytes: []const u8) !void { +fn hexDump(out: anytype, bytes: []const u8) !void { const n16 = bytes.len >> 4; var line: usize = 0; var offset: usize = 0; @@ -959,7 +959,7 @@ fn hexDump(out: var, bytes: []const u8) !void { try out.write("\n"); } -fn hexDump16(out: var, offset: usize, bytes: []const u8) !void { +fn hexDump16(out: anytype, offset: usize, bytes: []const u8) !void { try printDecValue(out, offset, 8); try out.write(":"); try out.write(" "); @@ -977,19 +977,19 @@ fn hexDump16(out: var, offset: usize, bytes: []const u8) !void { try out.write("|\n"); } -fn printDecValue(out: var, value: u64, width: u8) !void { +fn printDecValue(out: anytype, value: u64, width: u8) !void { var buffer: [20]u8 = undefined; const len = std.fmt.formatIntBuf(buffer[0..], value, 10, false, width); try out.write(buffer[0..len]); } -fn printHexValue(out: var, value: u64, width: u8) !void { +fn printHexValue(out: anytype, value: u64, width: u8) !void { var buffer: [16]u8 = undefined; const len = std.fmt.formatIntBuf(buffer[0..], value, 16, false, width); try out.write(buffer[0..len]); } -fn printCharValues(out: var, bytes: []const u8) !void { +fn printCharValues(out: anytype, bytes: []const u8) !void { for (bytes) |b| { try out.write(&[_]u8{printable_char_tab[b]}); } @@ -1020,13 +1020,13 @@ comptime { // output: must be a function that takes a `self` idiom parameter // and a bytes parameter // context: must be that self -fn makeOutput(comptime output: var, context: var) Output(output, @TypeOf(context)) { +fn makeOutput(comptime output: anytype, context: anytype) Output(output, @TypeOf(context)) { return Output(output, @TypeOf(context)){ .context = context, }; } -fn Output(comptime output_func: var, comptime Context: type) type { +fn Output(comptime output_func: anytype, comptime Context: type) type { return struct { context: Context, diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig index 1de7c626ea..094b877b53 100644 --- a/src-self-hosted/ir.zig +++ b/src-self-hosted/ir.zig @@ -13,7 +13,7 @@ const codegen = @import("codegen.zig"); pub const Inst = struct { tag: Tag, /// Each bit represents the index of an `Inst` parameter in the `args` field. - /// If a bit is set, it marks the end of the lifetime of the corresponding + /// If a bit is set, it marks the end of the lifetime of the corresponding /// instruction parameter. For example, 0b000_00101 means that the first and /// third `Inst` parameters' lifetimes end after this instruction, and will /// not have any more following references. diff --git a/src-self-hosted/libc_installation.zig b/src-self-hosted/libc_installation.zig index dfc0f1235a..65c6c8c16d 100644 --- a/src-self-hosted/libc_installation.zig +++ b/src-self-hosted/libc_installation.zig @@ -37,7 +37,7 @@ pub const LibCInstallation = struct { pub fn parse( allocator: *Allocator, libc_file: []const u8, - stderr: var, + stderr: anytype, ) !LibCInstallation { var self: LibCInstallation = .{}; @@ -115,7 +115,7 @@ pub const LibCInstallation = struct { return self; } - pub fn render(self: LibCInstallation, out: var) !void { + pub fn render(self: LibCInstallation, out: anytype) !void { @setEvalBranchQuota(4000); const include_dir = self.include_dir orelse ""; const sys_include_dir = self.sys_include_dir orelse ""; diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig index 2fb14ab690..b664d93353 100644 --- a/src-self-hosted/link.zig +++ b/src-self-hosted/link.zig @@ -244,7 +244,7 @@ pub const File = struct { need_noreturn: bool = false, error_msg: *Module.ErrorMsg = undefined, - pub fn fail(self: *C, src: usize, comptime format: []const u8, args: var) !void { + pub fn fail(self: *C, src: usize, comptime format: []const u8, args: anytype) !void { self.error_msg = try Module.ErrorMsg.create(self.allocator, src, format, args); return error.CGenFailure; } @@ -1167,10 +1167,10 @@ pub const File = struct { try self.offset_table_free_list.ensureCapacity(self.allocator, self.local_symbols.items.len); if (self.local_symbol_free_list.popOrNull()) |i| { - std.log.debug(.link, "reusing symbol index {} for {}\n", .{i, decl.name}); + std.log.debug(.link, "reusing symbol index {} for {}\n", .{ i, decl.name }); decl.link.local_sym_index = i; } else { - std.log.debug(.link, "allocating symbol index {} for {}\n", .{self.local_symbols.items.len, decl.name}); + std.log.debug(.link, "allocating symbol index {} for {}\n", .{ self.local_symbols.items.len, decl.name }); decl.link.local_sym_index = @intCast(u32, self.local_symbols.items.len); _ = self.local_symbols.addOneAssumeCapacity(); } @@ -1657,7 +1657,7 @@ fn openBinFileInner(allocator: *Allocator, file: fs.File, options: Options) !Fil } /// Saturating multiplication -fn satMul(a: var, b: var) @TypeOf(a, b) { +fn satMul(a: anytype, b: anytype) @TypeOf(a, b) { const T = @TypeOf(a, b); return std.math.mul(T, a, b) catch std.math.maxInt(T); } diff --git a/src-self-hosted/liveness.zig b/src-self-hosted/liveness.zig index 28eb2145c7..797a55a80c 100644 --- a/src-self-hosted/liveness.zig +++ b/src-self-hosted/liveness.zig @@ -135,5 +135,5 @@ fn analyzeInst(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst, void } } - std.log.debug(.liveness, "analyze {}: 0b{b}\n", .{inst.base.tag, inst.base.deaths}); + std.log.debug(.liveness, "analyze {}: 0b{b}\n", .{ inst.base.tag, inst.base.deaths }); } diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig index ee60b600a4..c0ac42c845 100644 --- a/src-self-hosted/main.zig +++ b/src-self-hosted/main.zig @@ -42,7 +42,7 @@ pub fn log( comptime level: std.log.Level, comptime scope: @TypeOf(.EnumLiteral), comptime format: []const u8, - args: var, + args: anytype, ) void { if (@enumToInt(level) > @enumToInt(std.log.level)) return; diff --git a/src-self-hosted/print_targets.zig b/src-self-hosted/print_targets.zig index f84a4a2cbc..34eda71ccf 100644 --- a/src-self-hosted/print_targets.zig +++ b/src-self-hosted/print_targets.zig @@ -62,7 +62,7 @@ pub fn cmdTargets( allocator: *Allocator, args: []const []const u8, /// Output stream - stdout: var, + stdout: anytype, native_target: Target, ) !void { const available_glibcs = blk: { diff --git a/src-self-hosted/translate_c.zig b/src-self-hosted/translate_c.zig index 261cef37b5..396ee09ef9 100644 --- a/src-self-hosted/translate_c.zig +++ b/src-self-hosted/translate_c.zig @@ -1117,7 +1117,7 @@ fn transEnumDecl(c: *Context, enum_decl: *const ZigClangEnumDecl) Error!?*ast.No return transCreateNodeIdentifier(c, name); } -fn createAlias(c: *Context, alias: var) !void { +fn createAlias(c: *Context, alias: anytype) !void { const node = try transCreateNodeVarDecl(c, true, true, alias.alias); node.eq_token = try appendToken(c, .Equal, "="); node.init_node = try transCreateNodeIdentifier(c, alias.name); @@ -2161,7 +2161,7 @@ fn transCreateNodeArrayType( rp: RestorePoint, source_loc: ZigClangSourceLocation, ty: *const ZigClangType, - len: var, + len: anytype, ) TransError!*ast.Node { var node = try transCreateNodePrefixOp( rp.c, @@ -4187,7 +4187,7 @@ fn transCreateNodeBoolLiteral(c: *Context, value: bool) !*ast.Node { return &node.base; } -fn transCreateNodeInt(c: *Context, int: var) !*ast.Node { +fn transCreateNodeInt(c: *Context, int: anytype) !*ast.Node { const token = try appendTokenFmt(c, .IntegerLiteral, "{}", .{int}); const node = try c.arena.create(ast.Node.IntegerLiteral); node.* = .{ @@ -4196,7 +4196,7 @@ fn transCreateNodeInt(c: *Context, int: var) !*ast.Node { return &node.base; } -fn transCreateNodeFloat(c: *Context, int: var) !*ast.Node { +fn transCreateNodeFloat(c: *Context, int: anytype) !*ast.Node { const token = try appendTokenFmt(c, .FloatLiteral, "{}", .{int}); const node = try c.arena.create(ast.Node.FloatLiteral); node.* = .{ @@ -4907,22 +4907,22 @@ fn finishTransFnProto( fn revertAndWarn( rp: RestorePoint, - err: var, + err: anytype, source_loc: ZigClangSourceLocation, comptime format: []const u8, - args: var, + args: anytype, ) (@TypeOf(err) || error{OutOfMemory}) { rp.activate(); try emitWarning(rp.c, source_loc, format, args); return err; } -fn emitWarning(c: *Context, loc: ZigClangSourceLocation, comptime format: []const u8, args: var) !void { +fn emitWarning(c: *Context, loc: ZigClangSourceLocation, comptime format: []const u8, args: anytype) !void { const args_prefix = .{c.locStr(loc)}; _ = try appendTokenFmt(c, .LineComment, "// {}: warning: " ++ format, args_prefix ++ args); } -pub fn failDecl(c: *Context, loc: ZigClangSourceLocation, name: []const u8, comptime format: []const u8, args: var) !void { +pub fn failDecl(c: *Context, loc: ZigClangSourceLocation, name: []const u8, comptime format: []const u8, args: anytype) !void { // pub const name = @compileError(msg); const pub_tok = try appendToken(c, .Keyword_pub, "pub"); const const_tok = try appendToken(c, .Keyword_const, "const"); @@ -4973,7 +4973,7 @@ fn appendToken(c: *Context, token_id: Token.Id, bytes: []const u8) !ast.TokenInd return appendTokenFmt(c, token_id, "{}", .{bytes}); } -fn appendTokenFmt(c: *Context, token_id: Token.Id, comptime format: []const u8, args: var) !ast.TokenIndex { +fn appendTokenFmt(c: *Context, token_id: Token.Id, comptime format: []const u8, args: anytype) !ast.TokenIndex { assert(token_id != .Invalid); try c.token_ids.ensureCapacity(c.gpa, c.token_ids.items.len + 1); diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig index caacf4e7fc..82c7cfa607 100644 --- a/src-self-hosted/type.zig +++ b/src-self-hosted/type.zig @@ -277,7 +277,7 @@ pub const Type = extern union { self: Type, comptime fmt: []const u8, options: std.fmt.FormatOptions, - out_stream: var, + out_stream: anytype, ) @TypeOf(out_stream).Error!void { comptime assert(fmt.len == 0); var ty = self; @@ -591,7 +591,6 @@ pub const Type = extern union { .anyerror => return 2, // TODO revisit this when we have the concept of the error tag type - .int_signed, .int_unsigned => { const bits: u16 = if (self.cast(Payload.IntSigned)) |pl| pl.bits diff --git a/src-self-hosted/value.zig b/src-self-hosted/value.zig index 6509ee52f6..c1e9a38bd1 100644 --- a/src-self-hosted/value.zig +++ b/src-self-hosted/value.zig @@ -227,7 +227,7 @@ pub const Value = extern union { self: Value, comptime fmt: []const u8, options: std.fmt.FormatOptions, - out_stream: var, + out_stream: anytype, ) !void { comptime assert(fmt.len == 0); var val = self; diff --git a/src-self-hosted/zir.zig b/src-self-hosted/zir.zig index 2a4db02c19..8faf248636 100644 --- a/src-self-hosted/zir.zig +++ b/src-self-hosted/zir.zig @@ -655,7 +655,7 @@ pub const Module = struct { /// The allocator is used for temporary storage, but this function always returns /// with no resources allocated. - pub fn writeToStream(self: Module, allocator: *Allocator, stream: var) !void { + pub fn writeToStream(self: Module, allocator: *Allocator, stream: anytype) !void { var write = Writer{ .module = &self, .inst_table = InstPtrTable.init(allocator), @@ -686,7 +686,6 @@ pub const Module = struct { try stream.writeByte('\n'); } } - }; const InstPtrTable = std.AutoHashMap(*Inst, struct { inst: *Inst, index: ?usize, name: []const u8 }); @@ -700,7 +699,7 @@ const Writer = struct { fn writeInstToStream( self: *Writer, - stream: var, + stream: anytype, inst: *Inst, ) (@TypeOf(stream).Error || error{OutOfMemory})!void { // TODO I tried implementing this with an inline for loop and hit a compiler bug @@ -746,7 +745,7 @@ const Writer = struct { fn writeInstToStreamGeneric( self: *Writer, - stream: var, + stream: anytype, comptime inst_tag: Inst.Tag, base: *Inst, ) (@TypeOf(stream).Error || error{OutOfMemory})!void { @@ -783,7 +782,7 @@ const Writer = struct { try stream.writeByte(')'); } - fn writeParamToStream(self: *Writer, stream: var, param: var) !void { + fn writeParamToStream(self: *Writer, stream: anytype, param: anytype) !void { if (@typeInfo(@TypeOf(param)) == .Enum) { return stream.writeAll(@tagName(param)); } @@ -829,7 +828,7 @@ const Writer = struct { } } - fn writeInstParamToStream(self: *Writer, stream: var, inst: *Inst) !void { + fn writeInstParamToStream(self: *Writer, stream: anytype, inst: *Inst) !void { if (self.inst_table.get(inst)) |info| { if (info.index) |i| { try stream.print("%{}", .{info.index}); @@ -1062,7 +1061,7 @@ const Parser = struct { } } - fn fail(self: *Parser, comptime format: []const u8, args: var) InnerError { + fn fail(self: *Parser, comptime format: []const u8, args: anytype) InnerError { @setCold(true); self.error_msg = ErrorMsg{ .byte_offset = self.i, diff --git a/test/stage1/behavior/async_fn.zig b/test/stage1/behavior/async_fn.zig index 4214ed84d2..807e4c6275 100644 --- a/test/stage1/behavior/async_fn.zig +++ b/test/stage1/behavior/async_fn.zig @@ -1016,7 +1016,7 @@ test "@asyncCall using the result location inside the frame" { test "@TypeOf an async function call of generic fn with error union type" { const S = struct { - fn func(comptime x: var) anyerror!i32 { + fn func(comptime x: anytype) anyerror!i32 { const T = @TypeOf(async func(x)); comptime expect(T == @TypeOf(@frame()).Child); return undefined; @@ -1032,7 +1032,7 @@ test "using @TypeOf on a generic function call" { var buf: [100]u8 align(16) = undefined; - fn amain(x: var) void { + fn amain(x: anytype) void { if (x == 0) { global_ok = true; return; @@ -1057,7 +1057,7 @@ test "recursive call of await @asyncCall with struct return type" { var buf: [100]u8 align(16) = undefined; - fn amain(x: var) Foo { + fn amain(x: anytype) Foo { if (x == 0) { global_ok = true; return Foo{ .x = 1, .y = 2, .z = 3 }; @@ -1336,7 +1336,7 @@ test "async function passed 0-bit arg after non-0-bit arg" { bar(1, .{}) catch unreachable; } - fn bar(x: i32, args: var) anyerror!void { + fn bar(x: i32, args: anytype) anyerror!void { global_frame = @frame(); suspend; global_int = x; @@ -1357,7 +1357,7 @@ test "async function passed align(16) arg after align(8) arg" { bar(10, .{a}) catch unreachable; } - fn bar(x: u64, args: var) anyerror!void { + fn bar(x: u64, args: anytype) anyerror!void { expect(x == 10); global_frame = @frame(); suspend; diff --git a/test/stage1/behavior/bitcast.zig b/test/stage1/behavior/bitcast.zig index 009f4544ba..2a86044dc1 100644 --- a/test/stage1/behavior/bitcast.zig +++ b/test/stage1/behavior/bitcast.zig @@ -171,7 +171,7 @@ test "nested bitcast" { test "bitcast passed as tuple element" { const S = struct { - fn foo(args: var) void { + fn foo(args: anytype) void { comptime expect(@TypeOf(args[0]) == f32); expect(args[0] == 12.34); } @@ -181,7 +181,7 @@ test "bitcast passed as tuple element" { test "triple level result location with bitcast sandwich passed as tuple element" { const S = struct { - fn foo(args: var) void { + fn foo(args: anytype) void { comptime expect(@TypeOf(args[0]) == f64); expect(args[0] > 12.33 and args[0] < 12.35); } diff --git a/test/stage1/behavior/bugs/2114.zig b/test/stage1/behavior/bugs/2114.zig index ab32a22cf3..1034a256d3 100644 --- a/test/stage1/behavior/bugs/2114.zig +++ b/test/stage1/behavior/bugs/2114.zig @@ -2,7 +2,7 @@ const std = @import("std"); const expect = std.testing.expect; const math = std.math; -fn ctz(x: var) usize { +fn ctz(x: anytype) usize { return @ctz(@TypeOf(x), x); } diff --git a/test/stage1/behavior/bugs/3742.zig b/test/stage1/behavior/bugs/3742.zig index f09127a66f..bf6e1f5207 100644 --- a/test/stage1/behavior/bugs/3742.zig +++ b/test/stage1/behavior/bugs/3742.zig @@ -23,7 +23,7 @@ pub fn isCommand(comptime T: type) bool { } pub const ArgSerializer = struct { - pub fn serializeCommand(command: var) void { + pub fn serializeCommand(command: anytype) void { const CmdT = @TypeOf(command); if (comptime isCommand(CmdT)) { diff --git a/test/stage1/behavior/bugs/4328.zig b/test/stage1/behavior/bugs/4328.zig index 0196af1748..98ab7bd155 100644 --- a/test/stage1/behavior/bugs/4328.zig +++ b/test/stage1/behavior/bugs/4328.zig @@ -17,11 +17,11 @@ const S = extern struct { test "Extern function calls in @TypeOf" { const Test = struct { - fn test_fn_1(a: var, b: var) @TypeOf(printf("%d %s\n", a, b)) { + fn test_fn_1(a: anytype, b: anytype) @TypeOf(printf("%d %s\n", a, b)) { return 0; } - fn test_fn_2(a: var) @TypeOf((S{ .state = 0 }).s_do_thing(a)) { + fn test_fn_2(a: anytype) @TypeOf((S{ .state = 0 }).s_do_thing(a)) { return 1; } @@ -56,7 +56,7 @@ test "Extern function calls, dereferences and field access in @TypeOf" { return .{ .dummy_field = 0 }; } - fn test_fn_2(a: var) @TypeOf(fopen("test", "r").*.dummy_field) { + fn test_fn_2(a: anytype) @TypeOf(fopen("test", "r").*.dummy_field) { return 255; } @@ -68,4 +68,4 @@ test "Extern function calls, dereferences and field access in @TypeOf" { Test.doTheTest(); comptime Test.doTheTest(); -} \ No newline at end of file +} diff --git a/test/stage1/behavior/bugs/4769_a.zig b/test/stage1/behavior/bugs/4769_a.zig index ab0c01417a..8337712ea5 100644 --- a/test/stage1/behavior/bugs/4769_a.zig +++ b/test/stage1/behavior/bugs/4769_a.zig @@ -1 +1 @@ -// \ No newline at end of file +// diff --git a/test/stage1/behavior/bugs/4769_b.zig b/test/stage1/behavior/bugs/4769_b.zig index 23b2513f17..9d0f028e57 100644 --- a/test/stage1/behavior/bugs/4769_b.zig +++ b/test/stage1/behavior/bugs/4769_b.zig @@ -1 +1 @@ -//! \ No newline at end of file +//! diff --git a/test/stage1/behavior/byval_arg_var.zig b/test/stage1/behavior/byval_arg_var.zig index 3794a965c6..ec3d18a532 100644 --- a/test/stage1/behavior/byval_arg_var.zig +++ b/test/stage1/behavior/byval_arg_var.zig @@ -13,11 +13,11 @@ fn start() void { foo("string literal"); } -fn foo(x: var) void { +fn foo(x: anytype) void { bar(x); } -fn bar(x: var) void { +fn bar(x: anytype) void { result = x; } diff --git a/test/stage1/behavior/call.zig b/test/stage1/behavior/call.zig index 40b5be4cd3..4d05a83a39 100644 --- a/test/stage1/behavior/call.zig +++ b/test/stage1/behavior/call.zig @@ -57,7 +57,7 @@ test "tuple parameters" { test "comptime call with bound function as parameter" { const S = struct { - fn ReturnType(func: var) type { + fn ReturnType(func: anytype) type { return switch (@typeInfo(@TypeOf(func))) { .BoundFn => |info| info, else => unreachable, diff --git a/test/stage1/behavior/enum.zig b/test/stage1/behavior/enum.zig index b6cb86a363..765828f5ce 100644 --- a/test/stage1/behavior/enum.zig +++ b/test/stage1/behavior/enum.zig @@ -208,7 +208,7 @@ test "@tagName non-exhaustive enum" { comptime expect(mem.eql(u8, testEnumTagNameBare(NonExhaustive.B), "B")); } -fn testEnumTagNameBare(n: var) []const u8 { +fn testEnumTagNameBare(n: anytype) []const u8 { return @tagName(n); } diff --git a/test/stage1/behavior/error.zig b/test/stage1/behavior/error.zig index def7fd679d..975e08b04f 100644 --- a/test/stage1/behavior/error.zig +++ b/test/stage1/behavior/error.zig @@ -227,7 +227,7 @@ test "error: Infer error set from literals" { _ = comptime intLiteral("n") catch |err| handleErrors(err); } -fn handleErrors(err: var) noreturn { +fn handleErrors(err: anytype) noreturn { switch (err) { error.T => {}, } diff --git a/test/stage1/behavior/eval.zig b/test/stage1/behavior/eval.zig index 2af34eaf7e..17d5aafe06 100644 --- a/test/stage1/behavior/eval.zig +++ b/test/stage1/behavior/eval.zig @@ -670,10 +670,10 @@ fn loopNTimes(comptime n: usize) void { } test "variable inside inline loop that has different types on different iterations" { - testVarInsideInlineLoop(.{true, @as(u32, 42)}); + testVarInsideInlineLoop(.{ true, @as(u32, 42) }); } -fn testVarInsideInlineLoop(args: var) void { +fn testVarInsideInlineLoop(args: anytype) void { comptime var i = 0; inline while (i < args.len) : (i += 1) { const x = args[i]; @@ -814,17 +814,16 @@ test "two comptime calls with array default initialized to undefined" { dynamic_linker: DynamicLinker = DynamicLinker{}, pub fn parse() void { - var result: CrossTarget = .{ }; + var result: CrossTarget = .{}; result.getCpuArch(); } - pub fn getCpuArch(self: CrossTarget) void { } + pub fn getCpuArch(self: CrossTarget) void {} }; const DynamicLinker = struct { buffer: [255]u8 = undefined, }; - }; comptime { diff --git a/test/stage1/behavior/fn.zig b/test/stage1/behavior/fn.zig index c1e5459378..c9f7477ecf 100644 --- a/test/stage1/behavior/fn.zig +++ b/test/stage1/behavior/fn.zig @@ -104,7 +104,7 @@ test "number literal as an argument" { comptime numberLiteralArg(3); } -fn numberLiteralArg(a: var) void { +fn numberLiteralArg(a: anytype) void { expect(a == 3); } @@ -132,7 +132,7 @@ test "pass by non-copying value through var arg" { expect(addPointCoordsVar(Point{ .x = 1, .y = 2 }) == 3); } -fn addPointCoordsVar(pt: var) i32 { +fn addPointCoordsVar(pt: anytype) i32 { comptime expect(@TypeOf(pt) == Point); return pt.x + pt.y; } @@ -267,7 +267,7 @@ test "ability to give comptime types and non comptime types to same parameter" { expect(foo(i32) == 20); } - fn foo(arg: var) i32 { + fn foo(arg: anytype) i32 { if (@typeInfo(@TypeOf(arg)) == .Type and arg == i32) return 20; return 9 + arg; } diff --git a/test/stage1/behavior/generics.zig b/test/stage1/behavior/generics.zig index a5d2f9dabe..6b584e381d 100644 --- a/test/stage1/behavior/generics.zig +++ b/test/stage1/behavior/generics.zig @@ -47,7 +47,7 @@ comptime { expect(max_f64(1.2, 3.4) == 3.4); } -fn max_var(a: var, b: var) @TypeOf(a + b) { +fn max_var(a: anytype, b: anytype) @TypeOf(a + b) { return if (a > b) a else b; } @@ -133,15 +133,15 @@ fn getFirstByte(comptime T: type, mem: []const T) u8 { return getByte(@ptrCast(*const u8, &mem[0])); } -const foos = [_]fn (var) bool{ +const foos = [_]fn (anytype) bool{ foo1, foo2, }; -fn foo1(arg: var) bool { +fn foo1(arg: anytype) bool { return arg; } -fn foo2(arg: var) bool { +fn foo2(arg: anytype) bool { return !arg; } diff --git a/test/stage1/behavior/optional.zig b/test/stage1/behavior/optional.zig index 0003bb86e1..1dc33eb8ea 100644 --- a/test/stage1/behavior/optional.zig +++ b/test/stage1/behavior/optional.zig @@ -67,8 +67,20 @@ fn test_cmp_optional_non_optional() void { // test evaluation is always lexical // ensure that the optional isn't always computed before the non-optional var mutable_state: i32 = 0; - _ = blk1: { mutable_state += 1; break :blk1 @as(?f64, 10.0); } != blk2: { expect(mutable_state == 1); break :blk2 @as(f64, 5.0); }; - _ = blk1: { mutable_state += 1; break :blk1 @as(f64, 10.0); } != blk2: { expect(mutable_state == 2); break :blk2 @as(?f64, 5.0); }; + _ = blk1: { + mutable_state += 1; + break :blk1 @as(?f64, 10.0); + } != blk2: { + expect(mutable_state == 1); + break :blk2 @as(f64, 5.0); + }; + _ = blk1: { + mutable_state += 1; + break :blk1 @as(f64, 10.0); + } != blk2: { + expect(mutable_state == 2); + break :blk2 @as(?f64, 5.0); + }; } test "passing an optional integer as a parameter" { diff --git a/test/stage1/behavior/struct.zig b/test/stage1/behavior/struct.zig index 7b8690d604..2d83bd23bb 100644 --- a/test/stage1/behavior/struct.zig +++ b/test/stage1/behavior/struct.zig @@ -713,7 +713,7 @@ test "packed struct field passed to generic function" { a: u1, }; - fn genericReadPackedField(ptr: var) u5 { + fn genericReadPackedField(ptr: anytype) u5 { return ptr.*; } }; @@ -754,7 +754,7 @@ test "fully anonymous struct" { .s = "hi", }); } - fn dump(args: var) void { + fn dump(args: anytype) void { expect(args.int == 1234); expect(args.float == 12.34); expect(args.b); @@ -771,7 +771,7 @@ test "fully anonymous list literal" { fn doTheTest() void { dump(.{ @as(u32, 1234), @as(f64, 12.34), true, "hi" }); } - fn dump(args: var) void { + fn dump(args: anytype) void { expect(args.@"0" == 1234); expect(args.@"1" == 12.34); expect(args.@"2"); @@ -792,8 +792,8 @@ test "anonymous struct literal assigned to variable" { test "struct with var field" { const Point = struct { - x: var, - y: var, + x: anytype, + y: anytype, }; const pt = Point{ .x = 1, diff --git a/test/stage1/behavior/tuple.zig b/test/stage1/behavior/tuple.zig index 0b2fdfe4e0..299abec3c9 100644 --- a/test/stage1/behavior/tuple.zig +++ b/test/stage1/behavior/tuple.zig @@ -42,7 +42,7 @@ test "tuple multiplication" { comptime S.doTheTest(); const T = struct { - fn consume_tuple(tuple: var, len: usize) void { + fn consume_tuple(tuple: anytype, len: usize) void { expect(tuple.len == len); } @@ -82,7 +82,7 @@ test "tuple multiplication" { test "pass tuple to comptime var parameter" { const S = struct { - fn Foo(comptime args: var) void { + fn Foo(comptime args: anytype) void { expect(args[0] == 1); } diff --git a/test/stage1/behavior/type_info.zig b/test/stage1/behavior/type_info.zig index 2685a3552e..48263dba6f 100644 --- a/test/stage1/behavior/type_info.zig +++ b/test/stage1/behavior/type_info.zig @@ -385,7 +385,7 @@ test "@typeInfo does not force declarations into existence" { } test "defaut value for a var-typed field" { - const S = struct { x: var }; + const S = struct { x: anytype }; expect(@typeInfo(S).Struct.fields[0].default_value == null); } diff --git a/test/stage1/behavior/union.zig b/test/stage1/behavior/union.zig index 8555e712dd..da898347b9 100644 --- a/test/stage1/behavior/union.zig +++ b/test/stage1/behavior/union.zig @@ -296,7 +296,7 @@ const TaggedUnionWithAVoid = union(enum) { B: i32, }; -fn testTaggedUnionInit(x: var) bool { +fn testTaggedUnionInit(x: anytype) bool { const y = TaggedUnionWithAVoid{ .A = x }; return @as(@TagType(TaggedUnionWithAVoid), y) == TaggedUnionWithAVoid.A; } diff --git a/test/stage1/behavior/var_args.zig b/test/stage1/behavior/var_args.zig index 0c8674a7fb..eae8f8f888 100644 --- a/test/stage1/behavior/var_args.zig +++ b/test/stage1/behavior/var_args.zig @@ -1,6 +1,6 @@ const expect = @import("std").testing.expect; -fn add(args: var) i32 { +fn add(args: anytype) i32 { var sum = @as(i32, 0); { comptime var i: usize = 0; @@ -17,7 +17,7 @@ test "add arbitrary args" { expect(add(.{}) == 0); } -fn readFirstVarArg(args: var) void { +fn readFirstVarArg(args: anytype) void { const value = args[0]; } @@ -31,7 +31,7 @@ test "pass args directly" { expect(addSomeStuff(.{}) == 0); } -fn addSomeStuff(args: var) i32 { +fn addSomeStuff(args: anytype) i32 { return add(args); } @@ -47,7 +47,7 @@ test "runtime parameter before var args" { } } -fn extraFn(extra: u32, args: var) usize { +fn extraFn(extra: u32, args: anytype) usize { if (args.len >= 1) { expect(args[0] == false); } @@ -57,15 +57,15 @@ fn extraFn(extra: u32, args: var) usize { return args.len; } -const foos = [_]fn (var) bool{ +const foos = [_]fn (anytype) bool{ foo1, foo2, }; -fn foo1(args: var) bool { +fn foo1(args: anytype) bool { return true; } -fn foo2(args: var) bool { +fn foo2(args: anytype) bool { return false; } @@ -78,6 +78,6 @@ test "pass zero length array to var args param" { doNothingWithFirstArg(.{""}); } -fn doNothingWithFirstArg(args: var) void { +fn doNothingWithFirstArg(args: anytype) void { const a = args[0]; } diff --git a/test/stage1/behavior/vector.zig b/test/stage1/behavior/vector.zig index 851074e0d1..8bdffcd500 100644 --- a/test/stage1/behavior/vector.zig +++ b/test/stage1/behavior/vector.zig @@ -171,7 +171,7 @@ test "load vector elements via comptime index" { expect(v[1] == 2); expect(loadv(&v[2]) == 3); } - fn loadv(ptr: var) i32 { + fn loadv(ptr: anytype) i32 { return ptr.*; } }; @@ -194,7 +194,7 @@ test "store vector elements via comptime index" { storev(&v[0], 100); expect(v[0] == 100); } - fn storev(ptr: var, x: i32) void { + fn storev(ptr: anytype, x: i32) void { ptr.* = x; } }; @@ -392,7 +392,7 @@ test "vector shift operators" { if (builtin.os.tag == .wasi) return error.SkipZigTest; const S = struct { - fn doTheTestShift(x: var, y: var) void { + fn doTheTestShift(x: anytype, y: anytype) void { const N = @typeInfo(@TypeOf(x)).Array.len; const TX = @typeInfo(@TypeOf(x)).Array.child; const TY = @typeInfo(@TypeOf(y)).Array.child; @@ -409,7 +409,7 @@ test "vector shift operators" { expectEqual(x[i] << y[i], v); } } - fn doTheTestShiftExact(x: var, y: var, dir: enum { Left, Right }) void { + fn doTheTestShiftExact(x: anytype, y: anytype, dir: enum { Left, Right }) void { const N = @typeInfo(@TypeOf(x)).Array.len; const TX = @typeInfo(@TypeOf(x)).Array.child; const TY = @typeInfo(@TypeOf(y)).Array.child; -- cgit v1.2.3