diff options
Diffstat (limited to 'lib/std')
78 files changed, 1238 insertions, 1005 deletions
diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index 1eafe28be2..855c44c032 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -460,7 +460,7 @@ const WindowsThreadImpl = struct { errdefer assert(windows.kernel32.HeapFree(heap_handle, 0, alloc_ptr) != 0); const instance_bytes = @ptrCast([*]u8, alloc_ptr)[0..alloc_bytes]; - const instance = std.heap.FixedBufferAllocator.init(instance_bytes).allocator.create(Instance) catch unreachable; + const instance = std.heap.FixedBufferAllocator.init(instance_bytes).allocator().create(Instance) catch unreachable; instance.* = .{ .fn_args = args, .thread = .{ diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig index e787abf1ef..7ebafc0a1b 100644 --- a/lib/std/array_hash_map.zig +++ b/lib/std/array_hash_map.zig @@ -79,7 +79,7 @@ pub fn ArrayHashMap( comptime std.hash_map.verifyContext(Context, K, K, u32); return struct { unmanaged: Unmanaged, - allocator: *Allocator, + allocator: Allocator, ctx: Context, /// The ArrayHashMapUnmanaged type using the same settings as this managed map. @@ -118,12 +118,12 @@ pub fn ArrayHashMap( const Self = @This(); /// Create an ArrayHashMap instance which will use a specified allocator. - pub fn init(allocator: *Allocator) Self { + pub fn init(allocator: Allocator) Self { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call initContext instead."); return initContext(allocator, undefined); } - pub fn initContext(allocator: *Allocator, ctx: Context) Self { + pub fn initContext(allocator: Allocator, ctx: Context) Self { return .{ .unmanaged = .{}, .allocator = allocator, @@ -383,7 +383,7 @@ pub fn ArrayHashMap( /// Create a copy of the hash map which can be modified separately. /// The copy uses the same context as this instance, but the specified /// allocator. - pub fn cloneWithAllocator(self: Self, allocator: *Allocator) !Self { + pub fn cloneWithAllocator(self: Self, allocator: Allocator) !Self { var other = try self.unmanaged.cloneContext(allocator, self.ctx); return other.promoteContext(allocator, self.ctx); } @@ -396,7 +396,7 @@ pub fn ArrayHashMap( } /// Create a copy of the hash map which can be modified separately. /// The copy uses the specified allocator and context. - pub fn cloneWithAllocatorAndContext(self: Self, allocator: *Allocator, ctx: anytype) !ArrayHashMap(K, V, @TypeOf(ctx), store_hash) { + pub fn cloneWithAllocatorAndContext(self: Self, allocator: Allocator, ctx: anytype) !ArrayHashMap(K, V, @TypeOf(ctx), store_hash) { var other = try self.unmanaged.cloneContext(allocator, ctx); return other.promoteContext(allocator, ctx); } @@ -533,12 +533,12 @@ pub fn ArrayHashMapUnmanaged( /// Convert from an unmanaged map to a managed map. After calling this, /// the promoted map should no longer be used. - pub fn promote(self: Self, allocator: *Allocator) Managed { + pub fn promote(self: Self, allocator: Allocator) Managed { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call promoteContext instead."); return self.promoteContext(allocator, undefined); } - pub fn promoteContext(self: Self, allocator: *Allocator, ctx: Context) Managed { + pub fn promoteContext(self: Self, allocator: Allocator, ctx: Context) Managed { return .{ .unmanaged = self, .allocator = allocator, @@ -549,7 +549,7 @@ pub fn ArrayHashMapUnmanaged( /// Frees the backing allocation and leaves the map in an undefined state. /// Note that this does not free keys or values. You must take care of that /// before calling this function, if it is needed. - pub fn deinit(self: *Self, allocator: *Allocator) void { + pub fn deinit(self: *Self, allocator: Allocator) void { self.entries.deinit(allocator); if (self.index_header) |header| { header.free(allocator); @@ -570,7 +570,7 @@ pub fn ArrayHashMapUnmanaged( } /// Clears the map and releases the backing allocation - pub fn clearAndFree(self: *Self, allocator: *Allocator) void { + pub fn clearAndFree(self: *Self, allocator: Allocator) void { self.entries.shrinkAndFree(allocator, 0); if (self.index_header) |header| { header.free(allocator); @@ -633,24 +633,24 @@ pub fn ArrayHashMapUnmanaged( /// Otherwise, puts a new item with undefined value, and /// the `Entry` pointer points to it. Caller should then initialize /// the value (but not the key). - pub fn getOrPut(self: *Self, allocator: *Allocator, key: K) !GetOrPutResult { + pub fn getOrPut(self: *Self, allocator: Allocator, key: K) !GetOrPutResult { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContext instead."); return self.getOrPutContext(allocator, key, undefined); } - pub fn getOrPutContext(self: *Self, allocator: *Allocator, key: K, ctx: Context) !GetOrPutResult { + pub fn getOrPutContext(self: *Self, allocator: Allocator, key: K, ctx: Context) !GetOrPutResult { const gop = try self.getOrPutContextAdapted(allocator, key, ctx, ctx); if (!gop.found_existing) { gop.key_ptr.* = key; } return gop; } - pub fn getOrPutAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult { + pub fn getOrPutAdapted(self: *Self, allocator: Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContextAdapted instead."); return self.getOrPutContextAdapted(allocator, key, key_ctx, undefined); } - pub fn getOrPutContextAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult { + pub fn getOrPutContextAdapted(self: *Self, allocator: Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult { self.ensureTotalCapacityContext(allocator, self.entries.len + 1, ctx) catch |err| { // "If key exists this function cannot fail." const index = self.getIndexAdapted(key, key_ctx) orelse return err; @@ -731,12 +731,12 @@ pub fn ArrayHashMapUnmanaged( } } - pub fn getOrPutValue(self: *Self, allocator: *Allocator, key: K, value: V) !GetOrPutResult { + pub fn getOrPutValue(self: *Self, allocator: Allocator, key: K, value: V) !GetOrPutResult { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutValueContext instead."); return self.getOrPutValueContext(allocator, key, value, undefined); } - pub fn getOrPutValueContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !GetOrPutResult { + pub fn getOrPutValueContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !GetOrPutResult { const res = try self.getOrPutContextAdapted(allocator, key, ctx, ctx); if (!res.found_existing) { res.key_ptr.* = key; @@ -749,12 +749,12 @@ pub fn ArrayHashMapUnmanaged( /// Increases capacity, guaranteeing that insertions up until the /// `expected_count` will not cause an allocation, and therefore cannot fail. - pub fn ensureTotalCapacity(self: *Self, allocator: *Allocator, new_capacity: usize) !void { + pub fn ensureTotalCapacity(self: *Self, allocator: Allocator, new_capacity: usize) !void { if (@sizeOf(ByIndexContext) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureTotalCapacityContext instead."); return self.ensureTotalCapacityContext(allocator, new_capacity, undefined); } - pub fn ensureTotalCapacityContext(self: *Self, allocator: *Allocator, new_capacity: usize, ctx: Context) !void { + pub fn ensureTotalCapacityContext(self: *Self, allocator: Allocator, new_capacity: usize, ctx: Context) !void { if (new_capacity <= linear_scan_max) { try self.entries.ensureTotalCapacity(allocator, new_capacity); return; @@ -781,7 +781,7 @@ pub fn ArrayHashMapUnmanaged( /// therefore cannot fail. pub fn ensureUnusedCapacity( self: *Self, - allocator: *Allocator, + allocator: Allocator, additional_capacity: usize, ) !void { if (@sizeOf(ByIndexContext) != 0) @@ -790,7 +790,7 @@ pub fn ArrayHashMapUnmanaged( } pub fn ensureUnusedCapacityContext( self: *Self, - allocator: *Allocator, + allocator: Allocator, additional_capacity: usize, ctx: Context, ) !void { @@ -808,24 +808,24 @@ pub fn ArrayHashMapUnmanaged( /// Clobbers any existing data. To detect if a put would clobber /// existing data, see `getOrPut`. - pub fn put(self: *Self, allocator: *Allocator, key: K, value: V) !void { + pub fn put(self: *Self, allocator: Allocator, key: K, value: V) !void { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putContext instead."); return self.putContext(allocator, key, value, undefined); } - pub fn putContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void { + pub fn putContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !void { const result = try self.getOrPutContext(allocator, key, ctx); result.value_ptr.* = value; } /// Inserts a key-value pair into the hash map, asserting that no previous /// entry with the same key is already present - pub fn putNoClobber(self: *Self, allocator: *Allocator, key: K, value: V) !void { + pub fn putNoClobber(self: *Self, allocator: Allocator, key: K, value: V) !void { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putNoClobberContext instead."); return self.putNoClobberContext(allocator, key, value, undefined); } - pub fn putNoClobberContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void { + pub fn putNoClobberContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !void { const result = try self.getOrPutContext(allocator, key, ctx); assert(!result.found_existing); result.value_ptr.* = value; @@ -859,12 +859,12 @@ pub fn ArrayHashMapUnmanaged( } /// Inserts a new `Entry` into the hash map, returning the previous one, if any. - pub fn fetchPut(self: *Self, allocator: *Allocator, key: K, value: V) !?KV { + pub fn fetchPut(self: *Self, allocator: Allocator, key: K, value: V) !?KV { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutContext instead."); return self.fetchPutContext(allocator, key, value, undefined); } - pub fn fetchPutContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !?KV { + pub fn fetchPutContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !?KV { const gop = try self.getOrPutContext(allocator, key, ctx); var result: ?KV = null; if (gop.found_existing) { @@ -1132,12 +1132,12 @@ pub fn ArrayHashMapUnmanaged( /// Create a copy of the hash map which can be modified separately. /// The copy uses the same context and allocator as this instance. - pub fn clone(self: Self, allocator: *Allocator) !Self { + pub fn clone(self: Self, allocator: Allocator) !Self { if (@sizeOf(ByIndexContext) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call cloneContext instead."); return self.cloneContext(allocator, undefined); } - pub fn cloneContext(self: Self, allocator: *Allocator, ctx: Context) !Self { + pub fn cloneContext(self: Self, allocator: Allocator, ctx: Context) !Self { var other: Self = .{}; other.entries = try self.entries.clone(allocator); errdefer other.entries.deinit(allocator); @@ -1152,12 +1152,12 @@ pub fn ArrayHashMapUnmanaged( /// Rebuilds the key indexes. If the underlying entries has been modified directly, users /// can call `reIndex` to update the indexes to account for these new entries. - pub fn reIndex(self: *Self, allocator: *Allocator) !void { + pub fn reIndex(self: *Self, allocator: Allocator) !void { if (@sizeOf(ByIndexContext) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call reIndexContext instead."); return self.reIndexContext(allocator, undefined); } - pub fn reIndexContext(self: *Self, allocator: *Allocator, ctx: Context) !void { + pub fn reIndexContext(self: *Self, allocator: Allocator, ctx: Context) !void { if (self.entries.capacity <= linear_scan_max) return; // We're going to rebuild the index header and replace the existing one (if any). The // indexes should sized such that they will be at most 60% full. @@ -1189,12 +1189,12 @@ pub fn ArrayHashMapUnmanaged( /// Shrinks the underlying `Entry` array to `new_len` elements and discards any associated /// index entries. Reduces allocated capacity. - pub fn shrinkAndFree(self: *Self, allocator: *Allocator, new_len: usize) void { + pub fn shrinkAndFree(self: *Self, allocator: Allocator, new_len: usize) void { if (@sizeOf(ByIndexContext) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call shrinkAndFreeContext instead."); return self.shrinkAndFreeContext(allocator, new_len, undefined); } - pub fn shrinkAndFreeContext(self: *Self, allocator: *Allocator, new_len: usize, ctx: Context) void { + pub fn shrinkAndFreeContext(self: *Self, allocator: Allocator, new_len: usize, ctx: Context) void { // Remove index entries from the new length onwards. // Explicitly choose to ONLY remove index entries and not the underlying array list // entries as we're going to remove them in the subsequent shrink call. @@ -1844,7 +1844,7 @@ const IndexHeader = struct { /// Allocates an index header, and fills the entryIndexes array with empty. /// The distance array contents are undefined. - fn alloc(allocator: *Allocator, new_bit_index: u8) !*IndexHeader { + fn alloc(allocator: Allocator, new_bit_index: u8) !*IndexHeader { const len = @as(usize, 1) << @intCast(math.Log2Int(usize), new_bit_index); const index_size = hash_map.capacityIndexSize(new_bit_index); const nbytes = @sizeOf(IndexHeader) + index_size * len; @@ -1858,7 +1858,7 @@ const IndexHeader = struct { } /// Releases the memory for a header and its associated arrays. - fn free(header: *IndexHeader, allocator: *Allocator) void { + fn free(header: *IndexHeader, allocator: Allocator) void { const index_size = hash_map.capacityIndexSize(header.bit_index); const ptr = @ptrCast([*]align(@alignOf(IndexHeader)) u8, header); const slice = ptr[0 .. @sizeOf(IndexHeader) + header.length() * index_size]; diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index 24049dad5c..fe98caf25f 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -42,12 +42,12 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { /// How many T values this list can hold without allocating /// additional memory. capacity: usize, - allocator: *Allocator, + allocator: Allocator, pub const Slice = if (alignment) |a| ([]align(a) T) else []T; /// Deinitialize with `deinit` or use `toOwnedSlice`. - pub fn init(allocator: *Allocator) Self { + pub fn init(allocator: Allocator) Self { return Self{ .items = &[_]T{}, .capacity = 0, @@ -58,7 +58,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { /// Initialize with capacity to hold at least `num` elements. /// The resulting capacity is likely to be equal to `num`. /// Deinitialize with `deinit` or use `toOwnedSlice`. - pub fn initCapacity(allocator: *Allocator, num: usize) !Self { + pub fn initCapacity(allocator: Allocator, num: usize) !Self { var self = Self.init(allocator); try self.ensureTotalCapacityPrecise(num); return self; @@ -74,7 +74,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { /// ArrayList takes ownership of the passed in slice. The slice must have been /// allocated with `allocator`. /// Deinitialize with `deinit` or use `toOwnedSlice`. - pub fn fromOwnedSlice(allocator: *Allocator, slice: Slice) Self { + pub fn fromOwnedSlice(allocator: Allocator, slice: Slice) Self { return Self{ .items = slice, .capacity = slice.len, @@ -457,33 +457,33 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Initialize with capacity to hold at least num elements. /// The resulting capacity is likely to be equal to `num`. /// Deinitialize with `deinit` or use `toOwnedSlice`. - pub fn initCapacity(allocator: *Allocator, num: usize) !Self { + pub fn initCapacity(allocator: Allocator, num: usize) !Self { var self = Self{}; try self.ensureTotalCapacityPrecise(allocator, num); return self; } /// Release all allocated memory. - pub fn deinit(self: *Self, allocator: *Allocator) void { + pub fn deinit(self: *Self, allocator: Allocator) void { allocator.free(self.allocatedSlice()); self.* = undefined; } /// Convert this list into an analogous memory-managed one. /// The returned list has ownership of the underlying memory. - pub fn toManaged(self: *Self, allocator: *Allocator) ArrayListAligned(T, alignment) { + pub fn toManaged(self: *Self, allocator: Allocator) ArrayListAligned(T, alignment) { return .{ .items = self.items, .capacity = self.capacity, .allocator = allocator }; } /// The caller owns the returned memory. ArrayList becomes empty. - pub fn toOwnedSlice(self: *Self, allocator: *Allocator) Slice { + pub fn toOwnedSlice(self: *Self, allocator: Allocator) Slice { const result = allocator.shrink(self.allocatedSlice(), self.items.len); self.* = Self{}; return result; } /// The caller owns the returned memory. ArrayList becomes empty. - pub fn toOwnedSliceSentinel(self: *Self, allocator: *Allocator, comptime sentinel: T) ![:sentinel]T { + pub fn toOwnedSliceSentinel(self: *Self, allocator: Allocator, comptime sentinel: T) ![:sentinel]T { try self.append(allocator, sentinel); const result = self.toOwnedSlice(allocator); return result[0 .. result.len - 1 :sentinel]; @@ -492,7 +492,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Insert `item` at index `n`. Moves `list[n .. list.len]` /// to higher indices to make room. /// This operation is O(N). - pub fn insert(self: *Self, allocator: *Allocator, n: usize, item: T) !void { + pub fn insert(self: *Self, allocator: Allocator, n: usize, item: T) !void { try self.ensureUnusedCapacity(allocator, 1); self.items.len += 1; @@ -503,7 +503,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Insert slice `items` at index `i`. Moves `list[i .. list.len]` to /// higher indicices make room. /// This operation is O(N). - pub fn insertSlice(self: *Self, allocator: *Allocator, i: usize, items: []const T) !void { + pub fn insertSlice(self: *Self, allocator: Allocator, i: usize, items: []const T) !void { try self.ensureUnusedCapacity(allocator, items.len); self.items.len += items.len; @@ -515,14 +515,14 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Grows list if `len < new_items.len`. /// Shrinks list if `len > new_items.len` /// Invalidates pointers if this ArrayList is resized. - pub fn replaceRange(self: *Self, allocator: *Allocator, start: usize, len: usize, new_items: []const T) !void { + pub fn replaceRange(self: *Self, allocator: Allocator, start: usize, len: usize, new_items: []const T) !void { var managed = self.toManaged(allocator); try managed.replaceRange(start, len, new_items); self.* = managed.moveToUnmanaged(); } /// Extend the list by 1 element. Allocates more memory as necessary. - pub fn append(self: *Self, allocator: *Allocator, item: T) !void { + pub fn append(self: *Self, allocator: Allocator, item: T) !void { const new_item_ptr = try self.addOne(allocator); new_item_ptr.* = item; } @@ -563,7 +563,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Append the slice of items to the list. Allocates more /// memory as necessary. - pub fn appendSlice(self: *Self, allocator: *Allocator, items: []const T) !void { + pub fn appendSlice(self: *Self, allocator: Allocator, items: []const T) !void { try self.ensureUnusedCapacity(allocator, items.len); self.appendSliceAssumeCapacity(items); } @@ -580,7 +580,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ pub const WriterContext = struct { self: *Self, - allocator: *Allocator, + allocator: Allocator, }; pub const Writer = if (T != u8) @@ -590,7 +590,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ std.io.Writer(WriterContext, error{OutOfMemory}, appendWrite); /// Initializes a Writer which will append to the list. - pub fn writer(self: *Self, allocator: *Allocator) Writer { + pub fn writer(self: *Self, allocator: Allocator) Writer { return .{ .context = .{ .self = self, .allocator = allocator } }; } @@ -603,7 +603,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Append a value to the list `n` times. /// Allocates more memory as necessary. - pub fn appendNTimes(self: *Self, allocator: *Allocator, value: T, n: usize) !void { + pub fn appendNTimes(self: *Self, allocator: Allocator, value: T, n: usize) !void { const old_len = self.items.len; try self.resize(allocator, self.items.len + n); mem.set(T, self.items[old_len..self.items.len], value); @@ -621,13 +621,13 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Adjust the list's length to `new_len`. /// Does not initialize added items, if any. - pub fn resize(self: *Self, allocator: *Allocator, new_len: usize) !void { + pub fn resize(self: *Self, allocator: Allocator, new_len: usize) !void { try self.ensureTotalCapacity(allocator, new_len); self.items.len = new_len; } /// Reduce allocated capacity to `new_len`. - pub fn shrinkAndFree(self: *Self, allocator: *Allocator, new_len: usize) void { + pub fn shrinkAndFree(self: *Self, allocator: Allocator, new_len: usize) void { assert(new_len <= self.items.len); self.items = allocator.realloc(self.allocatedSlice(), new_len) catch |e| switch (e) { @@ -653,7 +653,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ } /// Invalidates all element pointers. - pub fn clearAndFree(self: *Self, allocator: *Allocator) void { + pub fn clearAndFree(self: *Self, allocator: Allocator) void { allocator.free(self.allocatedSlice()); self.items.len = 0; self.capacity = 0; @@ -663,7 +663,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Modify the array so that it can hold at least `new_capacity` items. /// Invalidates pointers if additional memory is needed. - pub fn ensureTotalCapacity(self: *Self, allocator: *Allocator, new_capacity: usize) !void { + pub fn ensureTotalCapacity(self: *Self, allocator: Allocator, new_capacity: usize) !void { var better_capacity = self.capacity; if (better_capacity >= new_capacity) return; @@ -679,7 +679,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Like `ensureTotalCapacity`, but the resulting capacity is much more likely /// (but not guaranteed) to be equal to `new_capacity`. /// Invalidates pointers if additional memory is needed. - pub fn ensureTotalCapacityPrecise(self: *Self, allocator: *Allocator, new_capacity: usize) !void { + pub fn ensureTotalCapacityPrecise(self: *Self, allocator: Allocator, new_capacity: usize) !void { if (self.capacity >= new_capacity) return; const new_memory = try allocator.reallocAtLeast(self.allocatedSlice(), new_capacity); @@ -691,7 +691,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Invalidates pointers if additional memory is needed. pub fn ensureUnusedCapacity( self: *Self, - allocator: *Allocator, + allocator: Allocator, additional_count: usize, ) !void { return self.ensureTotalCapacity(allocator, self.items.len + additional_count); @@ -706,7 +706,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Increase length by 1, returning pointer to the new item. /// The returned pointer becomes invalid when the list resized. - pub fn addOne(self: *Self, allocator: *Allocator) !*T { + pub fn addOne(self: *Self, allocator: Allocator) !*T { const newlen = self.items.len + 1; try self.ensureTotalCapacity(allocator, newlen); return self.addOneAssumeCapacity(); @@ -726,7 +726,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Resize the array, adding `n` new elements, which have `undefined` values. /// The return value is an array pointing to the newly allocated elements. /// The returned pointer becomes invalid when the list is resized. - pub fn addManyAsArray(self: *Self, allocator: *Allocator, comptime n: usize) !*[n]T { + pub fn addManyAsArray(self: *Self, allocator: Allocator, comptime n: usize) !*[n]T { const prev_len = self.items.len; try self.resize(allocator, self.items.len + n); return self.items[prev_len..][0..n]; @@ -1119,7 +1119,7 @@ test "std.ArrayList/ArrayListUnmanaged.insertSlice" { test "std.ArrayList/ArrayListUnmanaged.replaceRange" { var arena = std.heap.ArenaAllocator.init(testing.allocator); defer arena.deinit(); - const a = &arena.allocator; + const a = arena.allocator(); const init = [_]i32{ 1, 2, 3, 4, 5 }; const new = [_]i32{ 0, 0, 0 }; @@ -1263,7 +1263,7 @@ test "std.ArrayList/ArrayListUnmanaged.shrink still sets length on error.OutOfMe // use an arena allocator to make sure realloc returns error.OutOfMemory var arena = std.heap.ArenaAllocator.init(testing.allocator); defer arena.deinit(); - const a = &arena.allocator; + const a = arena.allocator(); { var list = ArrayList(i32).init(a); @@ -1361,7 +1361,7 @@ test "ArrayListAligned/ArrayListAlignedUnmanaged accepts unaligned slices" { test "std.ArrayList(u0)" { // An ArrayList on zero-sized types should not need to allocate - const a = &testing.FailingAllocator.init(testing.allocator, 0).allocator; + const a = testing.FailingAllocator.init(testing.allocator, 0).allocator(); var list = ArrayList(u0).init(a); defer list.deinit(); diff --git a/lib/std/ascii.zig b/lib/std/ascii.zig index c999162b36..8174361800 100644 --- a/lib/std/ascii.zig +++ b/lib/std/ascii.zig @@ -301,7 +301,7 @@ test "lowerString" { /// Allocates a lower case copy of `ascii_string`. /// Caller owns returned string and must free with `allocator`. -pub fn allocLowerString(allocator: *std.mem.Allocator, ascii_string: []const u8) ![]u8 { +pub fn allocLowerString(allocator: std.mem.Allocator, ascii_string: []const u8) ![]u8 { const result = try allocator.alloc(u8, ascii_string.len); return lowerString(result, ascii_string); } @@ -330,7 +330,7 @@ test "upperString" { /// Allocates an upper case copy of `ascii_string`. /// Caller owns returned string and must free with `allocator`. -pub fn allocUpperString(allocator: *std.mem.Allocator, ascii_string: []const u8) ![]u8 { +pub fn allocUpperString(allocator: std.mem.Allocator, ascii_string: []const u8) ![]u8 { const result = try allocator.alloc(u8, ascii_string.len); return upperString(result, ascii_string); } diff --git a/lib/std/atomic/queue.zig b/lib/std/atomic/queue.zig index 62de8d9f10..6c502ef37e 100644 --- a/lib/std/atomic/queue.zig +++ b/lib/std/atomic/queue.zig @@ -156,7 +156,7 @@ pub fn Queue(comptime T: type) type { } const Context = struct { - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, queue: *Queue(i32), put_sum: isize, get_sum: isize, @@ -176,8 +176,8 @@ test "std.atomic.Queue" { var plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024); defer std.heap.page_allocator.free(plenty_of_memory); - var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory); - var a = &fixed_buffer_allocator.allocator; + var fixed_buffer_allocator = std.heap.FixedBufferAllocator.init(plenty_of_memory); + var a = fixed_buffer_allocator.threadSafeAllocator(); var queue = Queue(i32).init(); var context = Context{ diff --git a/lib/std/atomic/stack.zig b/lib/std/atomic/stack.zig index 35f6914252..a6396bb22b 100644 --- a/lib/std/atomic/stack.zig +++ b/lib/std/atomic/stack.zig @@ -69,7 +69,7 @@ pub fn Stack(comptime T: type) type { } const Context = struct { - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, stack: *Stack(i32), put_sum: isize, get_sum: isize, @@ -88,8 +88,8 @@ test "std.atomic.stack" { var plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024); defer std.heap.page_allocator.free(plenty_of_memory); - var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory); - var a = &fixed_buffer_allocator.allocator; + var fixed_buffer_allocator = std.heap.FixedBufferAllocator.init(plenty_of_memory); + var a = fixed_buffer_allocator.threadSafeAllocator(); var stack = Stack(i32).init(); var context = Context{ diff --git a/lib/std/bit_set.zig b/lib/std/bit_set.zig index af960784f7..2848305819 100644 --- a/lib/std/bit_set.zig +++ b/lib/std/bit_set.zig @@ -476,7 +476,7 @@ pub const DynamicBitSetUnmanaged = struct { /// Creates a bit set with no elements present. /// If bit_length is not zero, deinit must eventually be called. - pub fn initEmpty(bit_length: usize, allocator: *Allocator) !Self { + pub fn initEmpty(bit_length: usize, allocator: Allocator) !Self { var self = Self{}; try self.resize(bit_length, false, allocator); return self; @@ -484,7 +484,7 @@ pub const DynamicBitSetUnmanaged = struct { /// Creates a bit set with all elements present. /// If bit_length is not zero, deinit must eventually be called. - pub fn initFull(bit_length: usize, allocator: *Allocator) !Self { + pub fn initFull(bit_length: usize, allocator: Allocator) !Self { var self = Self{}; try self.resize(bit_length, true, allocator); return self; @@ -493,7 +493,7 @@ pub const DynamicBitSetUnmanaged = struct { /// Resizes to a new bit_length. If the new length is larger /// than the old length, fills any added bits with `fill`. /// If new_len is not zero, deinit must eventually be called. - pub fn resize(self: *@This(), new_len: usize, fill: bool, allocator: *Allocator) !void { + pub fn resize(self: *@This(), new_len: usize, fill: bool, allocator: Allocator) !void { const old_len = self.bit_length; const old_masks = numMasks(old_len); @@ -556,12 +556,12 @@ pub const DynamicBitSetUnmanaged = struct { /// deinitializes the array and releases its memory. /// The passed allocator must be the same one used for /// init* or resize in the past. - pub fn deinit(self: *Self, allocator: *Allocator) void { + pub fn deinit(self: *Self, allocator: Allocator) void { self.resize(0, false, allocator) catch unreachable; } /// Creates a duplicate of this bit set, using the new allocator. - pub fn clone(self: *const Self, new_allocator: *Allocator) !Self { + pub fn clone(self: *const Self, new_allocator: Allocator) !Self { const num_masks = numMasks(self.bit_length); var copy = Self{}; try copy.resize(self.bit_length, false, new_allocator); @@ -742,13 +742,13 @@ pub const DynamicBitSet = struct { pub const ShiftInt = std.math.Log2Int(MaskInt); /// The allocator used by this bit set - allocator: *Allocator, + allocator: Allocator, /// The number of valid items in this bit set unmanaged: DynamicBitSetUnmanaged = .{}, /// Creates a bit set with no elements present. - pub fn initEmpty(bit_length: usize, allocator: *Allocator) !Self { + pub fn initEmpty(bit_length: usize, allocator: Allocator) !Self { return Self{ .unmanaged = try DynamicBitSetUnmanaged.initEmpty(bit_length, allocator), .allocator = allocator, @@ -756,7 +756,7 @@ pub const DynamicBitSet = struct { } /// Creates a bit set with all elements present. - pub fn initFull(bit_length: usize, allocator: *Allocator) !Self { + pub fn initFull(bit_length: usize, allocator: Allocator) !Self { return Self{ .unmanaged = try DynamicBitSetUnmanaged.initFull(bit_length, allocator), .allocator = allocator, @@ -777,7 +777,7 @@ pub const DynamicBitSet = struct { } /// Creates a duplicate of this bit set, using the new allocator. - pub fn clone(self: *const Self, new_allocator: *Allocator) !Self { + pub fn clone(self: *const Self, new_allocator: Allocator) !Self { return Self{ .unmanaged = try self.unmanaged.clone(new_allocator), .allocator = new_allocator, diff --git a/lib/std/buf_map.zig b/lib/std/buf_map.zig index 1e4462e6ae..5b26ae9684 100644 --- a/lib/std/buf_map.zig +++ b/lib/std/buf_map.zig @@ -14,7 +14,7 @@ pub const BufMap = struct { /// Create a BufMap backed by a specific allocator. /// That allocator will be used for both backing allocations /// and string deduplication. - pub fn init(allocator: *Allocator) BufMap { + pub fn init(allocator: Allocator) BufMap { var self = BufMap{ .hash_map = BufMapHashMap.init(allocator) }; return self; } diff --git a/lib/std/buf_set.zig b/lib/std/buf_set.zig index ce2d51b056..e68b24fbcc 100644 --- a/lib/std/buf_set.zig +++ b/lib/std/buf_set.zig @@ -16,7 +16,7 @@ pub const BufSet = struct { /// Create a BufSet using an allocator. The allocator will /// be used internally for both backing allocations and /// string duplication. - pub fn init(a: *Allocator) BufSet { + pub fn init(a: Allocator) BufSet { var self = BufSet{ .hash_map = BufSetHashMap.init(a) }; return self; } @@ -67,7 +67,7 @@ pub const BufSet = struct { } /// Get the allocator used by this set - pub fn allocator(self: *const BufSet) *Allocator { + pub fn allocator(self: *const BufSet) Allocator { return self.hash_map.allocator; } diff --git a/lib/std/build.zig b/lib/std/build.zig index 43a0a83dff..ea6a0e05f1 100644 --- a/lib/std/build.zig +++ b/lib/std/build.zig @@ -28,7 +28,7 @@ pub const OptionsStep = @import("build/OptionsStep.zig"); pub const Builder = struct { install_tls: TopLevelStep, uninstall_tls: TopLevelStep, - allocator: *Allocator, + allocator: Allocator, user_input_options: UserInputOptionsMap, available_options_map: AvailableOptionsMap, available_options_list: ArrayList(AvailableOption), @@ -134,7 +134,7 @@ pub const Builder = struct { }; pub fn create( - allocator: *Allocator, + allocator: Allocator, zig_exe: []const u8, build_root: []const u8, cache_root: []const u8, @@ -1285,7 +1285,7 @@ test "builder.findProgram compiles" { defer arena.deinit(); const builder = try Builder.create( - &arena.allocator, + arena.allocator(), "zig", "zig-cache", "zig-cache", @@ -3080,7 +3080,7 @@ pub const Step = struct { custom, }; - pub fn init(id: Id, name: []const u8, allocator: *Allocator, makeFn: fn (*Step) anyerror!void) Step { + pub fn init(id: Id, name: []const u8, allocator: Allocator, makeFn: fn (*Step) anyerror!void) Step { return Step{ .id = id, .name = allocator.dupe(u8, name) catch unreachable, @@ -3090,7 +3090,7 @@ pub const Step = struct { .done_flag = false, }; } - pub fn initNoOp(id: Id, name: []const u8, allocator: *Allocator) Step { + pub fn initNoOp(id: Id, name: []const u8, allocator: Allocator) Step { return init(id, name, allocator, makeNoOp); } @@ -3117,7 +3117,7 @@ pub const Step = struct { } }; -fn doAtomicSymLinks(allocator: *Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void { +fn doAtomicSymLinks(allocator: Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void { const out_dir = fs.path.dirname(output_path) orelse "."; const out_basename = fs.path.basename(output_path); // sym link for libfoo.so.1 to libfoo.so.1.2.3 @@ -3141,7 +3141,7 @@ fn doAtomicSymLinks(allocator: *Allocator, output_path: []const u8, filename_maj } /// Returned slice must be freed by the caller. -fn findVcpkgRoot(allocator: *Allocator) !?[]const u8 { +fn findVcpkgRoot(allocator: Allocator) !?[]const u8 { const appdata_path = try fs.getAppDataDir(allocator, "vcpkg"); defer allocator.free(appdata_path); @@ -3210,7 +3210,7 @@ test "Builder.dupePkg()" { var arena = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena.deinit(); var builder = try Builder.create( - &arena.allocator, + arena.allocator(), "test", "test", "test", @@ -3255,7 +3255,7 @@ test "LibExeObjStep.addPackage" { defer arena.deinit(); var builder = try Builder.create( - &arena.allocator, + arena.allocator(), "test", "test", "test", diff --git a/lib/std/build/InstallRawStep.zig b/lib/std/build/InstallRawStep.zig index d87ff2fffd..0f921d6622 100644 --- a/lib/std/build/InstallRawStep.zig +++ b/lib/std/build/InstallRawStep.zig @@ -40,7 +40,7 @@ const BinaryElfOutput = struct { self.segments.deinit(); } - pub fn parse(allocator: *Allocator, elf_file: File) !Self { + pub fn parse(allocator: Allocator, elf_file: File) !Self { var self: Self = .{ .segments = ArrayList(*BinaryElfSegment).init(allocator), .sections = ArrayList(*BinaryElfSection).init(allocator), @@ -298,7 +298,7 @@ fn containsValidAddressRange(segments: []*BinaryElfSegment) bool { return true; } -fn emitRaw(allocator: *Allocator, elf_path: []const u8, raw_path: []const u8, format: RawFormat) !void { +fn emitRaw(allocator: Allocator, elf_path: []const u8, raw_path: []const u8, format: RawFormat) !void { var elf_file = try fs.cwd().openFile(elf_path, .{}); defer elf_file.close(); diff --git a/lib/std/build/OptionsStep.zig b/lib/std/build/OptionsStep.zig index dfe512adec..eae5983845 100644 --- a/lib/std/build/OptionsStep.zig +++ b/lib/std/build/OptionsStep.zig @@ -274,7 +274,7 @@ test "OptionsStep" { var arena = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena.deinit(); var builder = try Builder.create( - &arena.allocator, + arena.allocator(), "test", "test", "test", @@ -350,5 +350,5 @@ test "OptionsStep" { \\ , options.contents.items); - _ = try std.zig.parse(&arena.allocator, try options.contents.toOwnedSliceSentinel(0)); + _ = try std.zig.parse(arena.allocator(), try options.contents.toOwnedSliceSentinel(0)); } diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index e7cf56f39d..9ce8c1c38e 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -75,7 +75,7 @@ pub const StackTrace = struct { }; const tty_config = std.debug.detectTTYConfig(); try writer.writeAll("\n"); - std.debug.writeStackTrace(self, writer, &arena.allocator, debug_info, tty_config) catch |err| { + std.debug.writeStackTrace(self, writer, arena.allocator(), debug_info, tty_config) catch |err| { try writer.print("Unable to print stack trace: {s}\n", .{@errorName(err)}); }; try writer.writeAll("\n"); diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig index 2e1dfad00a..13e14c7f34 100644 --- a/lib/std/child_process.zig +++ b/lib/std/child_process.zig @@ -23,7 +23,7 @@ pub const ChildProcess = struct { handle: if (builtin.os.tag == .windows) windows.HANDLE else void, thread_handle: if (builtin.os.tag == .windows) windows.HANDLE else void, - allocator: *mem.Allocator, + allocator: mem.Allocator, stdin: ?File, stdout: ?File, @@ -90,7 +90,7 @@ pub const ChildProcess = struct { /// First argument in argv is the executable. /// On success must call deinit. - pub fn init(argv: []const []const u8, allocator: *mem.Allocator) !*ChildProcess { + pub fn init(argv: []const []const u8, allocator: mem.Allocator) !*ChildProcess { const child = try allocator.create(ChildProcess); child.* = ChildProcess{ .allocator = allocator, @@ -329,7 +329,7 @@ pub const ChildProcess = struct { /// Spawns a child process, waits for it, collecting stdout and stderr, and then returns. /// If it succeeds, the caller owns result.stdout and result.stderr memory. pub fn exec(args: struct { - allocator: *mem.Allocator, + allocator: mem.Allocator, argv: []const []const u8, cwd: ?[]const u8 = null, cwd_dir: ?fs.Dir = null, @@ -541,7 +541,7 @@ pub const ChildProcess = struct { var arena_allocator = std.heap.ArenaAllocator.init(self.allocator); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.allocator(); // The POSIX standard does not allow malloc() between fork() and execve(), // and `self.allocator` may be a libc allocator. @@ -931,7 +931,7 @@ fn windowsCreateProcess(app_name: [*:0]u16, cmd_line: [*:0]u16, envp_ptr: ?[*]u1 } /// Caller must dealloc. -fn windowsCreateCommandLine(allocator: *mem.Allocator, argv: []const []const u8) ![:0]u8 { +fn windowsCreateCommandLine(allocator: mem.Allocator, argv: []const []const u8) ![:0]u8 { var buf = std.ArrayList(u8).init(allocator); defer buf.deinit(); @@ -1081,7 +1081,7 @@ fn readIntFd(fd: i32) !ErrInt { } /// Caller must free result. -pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap) ![]u16 { +pub fn createWindowsEnvBlock(allocator: mem.Allocator, env_map: *const BufMap) ![]u16 { // count bytes needed const max_chars_needed = x: { var max_chars_needed: usize = 4; // 4 for the final 4 null bytes @@ -1117,7 +1117,7 @@ pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap) return allocator.shrink(result, i); } -pub fn createNullDelimitedEnvMap(arena: *mem.Allocator, env_map: *const std.BufMap) ![:null]?[*:0]u8 { +pub fn createNullDelimitedEnvMap(arena: mem.Allocator, env_map: *const std.BufMap) ![:null]?[*:0]u8 { const envp_count = env_map.count(); const envp_buf = try arena.allocSentinel(?[*:0]u8, envp_count, null); { @@ -1149,7 +1149,7 @@ test "createNullDelimitedEnvMap" { var arena = std.heap.ArenaAllocator.init(allocator); defer arena.deinit(); - const environ = try createNullDelimitedEnvMap(&arena.allocator, &envmap); + const environ = try createNullDelimitedEnvMap(arena.allocator(), &envmap); try testing.expectEqual(@as(usize, 5), environ.len); diff --git a/lib/std/coff.zig b/lib/std/coff.zig index 961cd8ade6..2bf0b1c44e 100644 --- a/lib/std/coff.zig +++ b/lib/std/coff.zig @@ -98,7 +98,7 @@ pub const CoffError = error{ // Official documentation of the format: https://docs.microsoft.com/en-us/windows/win32/debug/pe-format pub const Coff = struct { in_file: File, - allocator: *mem.Allocator, + allocator: mem.Allocator, coff_header: CoffHeader, pe_header: OptionalHeader, @@ -107,7 +107,7 @@ pub const Coff = struct { guid: [16]u8, age: u32, - pub fn init(allocator: *mem.Allocator, in_file: File) Coff { + pub fn init(allocator: mem.Allocator, in_file: File) Coff { return Coff{ .in_file = in_file, .allocator = allocator, @@ -324,7 +324,7 @@ pub const Coff = struct { } // Return an owned slice full of the section data - pub fn getSectionData(self: *Coff, comptime name: []const u8, allocator: *mem.Allocator) ![]u8 { + pub fn getSectionData(self: *Coff, comptime name: []const u8, allocator: mem.Allocator) ![]u8 { const sec = for (self.sections.items) |*sec| { if (mem.eql(u8, sec.header.name[0..name.len], name)) { break sec; diff --git a/lib/std/compress/gzip.zig b/lib/std/compress/gzip.zig index 497b07d905..491b888812 100644 --- a/lib/std/compress/gzip.zig +++ b/lib/std/compress/gzip.zig @@ -24,7 +24,7 @@ pub fn GzipStream(comptime ReaderType: type) type { error{ CorruptedData, WrongChecksum }; pub const Reader = io.Reader(*Self, Error, read); - allocator: *mem.Allocator, + allocator: mem.Allocator, inflater: deflate.InflateStream(ReaderType), in_reader: ReaderType, hasher: std.hash.Crc32, @@ -37,7 +37,7 @@ pub fn GzipStream(comptime ReaderType: type) type { modification_time: u32, }, - fn init(allocator: *mem.Allocator, source: ReaderType) !Self { + fn init(allocator: mem.Allocator, source: ReaderType) !Self { // gzip header format is specified in RFC1952 const header = try source.readBytesNoEof(10); @@ -152,7 +152,7 @@ pub fn GzipStream(comptime ReaderType: type) type { }; } -pub fn gzipStream(allocator: *mem.Allocator, reader: anytype) !GzipStream(@TypeOf(reader)) { +pub fn gzipStream(allocator: mem.Allocator, reader: anytype) !GzipStream(@TypeOf(reader)) { return GzipStream(@TypeOf(reader)).init(allocator, reader); } diff --git a/lib/std/compress/zlib.zig b/lib/std/compress/zlib.zig index f0f4ca2ff4..09d9c18e72 100644 --- a/lib/std/compress/zlib.zig +++ b/lib/std/compress/zlib.zig @@ -17,13 +17,13 @@ pub fn ZlibStream(comptime ReaderType: type) type { error{ WrongChecksum, Unsupported }; pub const Reader = io.Reader(*Self, Error, read); - allocator: *mem.Allocator, + allocator: mem.Allocator, inflater: deflate.InflateStream(ReaderType), in_reader: ReaderType, hasher: std.hash.Adler32, window_slice: []u8, - fn init(allocator: *mem.Allocator, source: ReaderType) !Self { + fn init(allocator: mem.Allocator, source: ReaderType) !Self { // Zlib header format is specified in RFC1950 const header = try source.readBytesNoEof(2); @@ -88,7 +88,7 @@ pub fn ZlibStream(comptime ReaderType: type) type { }; } -pub fn zlibStream(allocator: *mem.Allocator, reader: anytype) !ZlibStream(@TypeOf(reader)) { +pub fn zlibStream(allocator: mem.Allocator, reader: anytype) !ZlibStream(@TypeOf(reader)) { return ZlibStream(@TypeOf(reader)).init(allocator, reader); } diff --git a/lib/std/crypto/argon2.zig b/lib/std/crypto/argon2.zig index 66cd8b38f1..493f36ca94 100644 --- a/lib/std/crypto/argon2.zig +++ b/lib/std/crypto/argon2.zig @@ -201,7 +201,7 @@ fn initBlocks( } fn processBlocks( - allocator: *mem.Allocator, + allocator: mem.Allocator, blocks: *Blocks, time: u32, memory: u32, @@ -240,7 +240,7 @@ fn processBlocksSt( } fn processBlocksMt( - allocator: *mem.Allocator, + allocator: mem.Allocator, blocks: *Blocks, time: u32, memory: u32, @@ -480,7 +480,7 @@ fn indexAlpha( /// /// Salt has to be at least 8 bytes length. pub fn kdf( - allocator: *mem.Allocator, + allocator: mem.Allocator, derived_key: []u8, password: []const u8, salt: []const u8, @@ -524,7 +524,7 @@ const PhcFormatHasher = struct { }; pub fn create( - allocator: *mem.Allocator, + allocator: mem.Allocator, password: []const u8, params: Params, mode: Mode, @@ -550,7 +550,7 @@ const PhcFormatHasher = struct { } pub fn verify( - allocator: *mem.Allocator, + allocator: mem.Allocator, str: []const u8, password: []const u8, ) HasherError!void { @@ -579,7 +579,7 @@ const PhcFormatHasher = struct { /// /// Only phc encoding is supported. pub const HashOptions = struct { - allocator: ?*mem.Allocator, + allocator: ?mem.Allocator, params: Params, mode: Mode = .argon2id, encoding: pwhash.Encoding = .phc, @@ -609,7 +609,7 @@ pub fn strHash( /// /// Allocator is required for argon2. pub const VerifyOptions = struct { - allocator: ?*mem.Allocator, + allocator: ?mem.Allocator, }; /// Verify that a previously computed hash is valid for a given password. diff --git a/lib/std/crypto/bcrypt.zig b/lib/std/crypto/bcrypt.zig index d8c4d67453..bd3c9ca7d4 100644 --- a/lib/std/crypto/bcrypt.zig +++ b/lib/std/crypto/bcrypt.zig @@ -368,7 +368,7 @@ const CryptFormatHasher = struct { /// Options for hashing a password. pub const HashOptions = struct { - allocator: ?*mem.Allocator = null, + allocator: ?mem.Allocator = null, params: Params, encoding: pwhash.Encoding, }; @@ -394,7 +394,7 @@ pub fn strHash( /// Options for hash verification. pub const VerifyOptions = struct { - allocator: ?*mem.Allocator = null, + allocator: ?mem.Allocator = null, }; /// Verify that a previously computed hash is valid for a given password. diff --git a/lib/std/crypto/benchmark.zig b/lib/std/crypto/benchmark.zig index 4836de032e..9fd3c42544 100644 --- a/lib/std/crypto/benchmark.zig +++ b/lib/std/crypto/benchmark.zig @@ -363,7 +363,7 @@ pub fn main() !void { var buffer: [1024]u8 = undefined; var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]); - const args = try std.process.argsAlloc(&fixed.allocator); + const args = try std.process.argsAlloc(fixed.allocator()); var filter: ?[]u8 = ""; diff --git a/lib/std/crypto/scrypt.zig b/lib/std/crypto/scrypt.zig index b17952dcd6..e464cca28e 100644 --- a/lib/std/crypto/scrypt.zig +++ b/lib/std/crypto/scrypt.zig @@ -161,7 +161,7 @@ pub const Params = struct { /// /// scrypt is defined in RFC 7914. /// -/// allocator: *mem.Allocator. +/// allocator: mem.Allocator. /// /// derived_key: Slice of appropriate size for generated key. Generally 16 or 32 bytes in length. /// May be uninitialized. All bytes will be overwritten. @@ -173,7 +173,7 @@ pub const Params = struct { /// /// params: Params. pub fn kdf( - allocator: *mem.Allocator, + allocator: mem.Allocator, derived_key: []u8, password: []const u8, salt: []const u8, @@ -406,7 +406,7 @@ const PhcFormatHasher = struct { /// Return a non-deterministic hash of the password encoded as a PHC-format string pub fn create( - allocator: *mem.Allocator, + allocator: mem.Allocator, password: []const u8, params: Params, buf: []u8, @@ -429,7 +429,7 @@ const PhcFormatHasher = struct { /// Verify a password against a PHC-format encoded string pub fn verify( - allocator: *mem.Allocator, + allocator: mem.Allocator, str: []const u8, password: []const u8, ) HasherError!void { @@ -455,7 +455,7 @@ const CryptFormatHasher = struct { /// Return a non-deterministic hash of the password encoded into the modular crypt format pub fn create( - allocator: *mem.Allocator, + allocator: mem.Allocator, password: []const u8, params: Params, buf: []u8, @@ -478,7 +478,7 @@ const CryptFormatHasher = struct { /// Verify a password against a string in modular crypt format pub fn verify( - allocator: *mem.Allocator, + allocator: mem.Allocator, str: []const u8, password: []const u8, ) HasherError!void { @@ -497,7 +497,7 @@ const CryptFormatHasher = struct { /// /// Allocator is required for scrypt. pub const HashOptions = struct { - allocator: ?*mem.Allocator, + allocator: ?mem.Allocator, params: Params, encoding: pwhash.Encoding, }; @@ -520,7 +520,7 @@ pub fn strHash( /// /// Allocator is required for scrypt. pub const VerifyOptions = struct { - allocator: ?*mem.Allocator, + allocator: ?mem.Allocator, }; /// Verify that a previously computed hash is valid for a given password. diff --git a/lib/std/cstr.zig b/lib/std/cstr.zig index 64beb378d9..068fc419ac 100644 --- a/lib/std/cstr.zig +++ b/lib/std/cstr.zig @@ -33,7 +33,7 @@ fn testCStrFnsImpl() !void { /// Returns a mutable, null-terminated slice with the same length as `slice`. /// Caller owns the returned memory. -pub fn addNullByte(allocator: *mem.Allocator, slice: []const u8) ![:0]u8 { +pub fn addNullByte(allocator: mem.Allocator, slice: []const u8) ![:0]u8 { const result = try allocator.alloc(u8, slice.len + 1); mem.copy(u8, result, slice); result[slice.len] = 0; @@ -48,13 +48,13 @@ test "addNullByte" { } pub const NullTerminated2DArray = struct { - allocator: *mem.Allocator, + allocator: mem.Allocator, byte_count: usize, ptr: ?[*:null]?[*:0]u8, /// Takes N lists of strings, concatenates the lists together, and adds a null terminator /// Caller must deinit result - pub fn fromSlices(allocator: *mem.Allocator, slices: []const []const []const u8) !NullTerminated2DArray { + pub fn fromSlices(allocator: mem.Allocator, slices: []const []const []const u8) !NullTerminated2DArray { var new_len: usize = 1; // 1 for the list null var byte_count: usize = 0; for (slices) |slice| { diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 531872581a..64db6eeadc 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -29,7 +29,7 @@ pub const LineInfo = struct { line: u64, column: u64, file_name: []const u8, - allocator: ?*mem.Allocator, + allocator: ?mem.Allocator, pub fn deinit(self: LineInfo) void { const allocator = self.allocator orelse return; @@ -339,7 +339,7 @@ const RESET = "\x1b[0m"; pub fn writeStackTrace( stack_trace: std.builtin.StackTrace, out_stream: anytype, - allocator: *mem.Allocator, + allocator: mem.Allocator, debug_info: *DebugInfo, tty_config: TTY.Config, ) !void { @@ -662,7 +662,7 @@ pub const OpenSelfDebugInfoError = error{ }; /// TODO resources https://github.com/ziglang/zig/issues/4353 -pub fn openSelfDebugInfo(allocator: *mem.Allocator) anyerror!DebugInfo { +pub fn openSelfDebugInfo(allocator: mem.Allocator) anyerror!DebugInfo { nosuspend { if (builtin.strip_debug_info) return error.MissingDebugInfo; @@ -688,7 +688,7 @@ pub fn openSelfDebugInfo(allocator: *mem.Allocator) anyerror!DebugInfo { /// it themselves, even on error. /// TODO resources https://github.com/ziglang/zig/issues/4353 /// TODO it's weird to take ownership even on error, rework this code. -fn readCoffDebugInfo(allocator: *mem.Allocator, coff_file: File) !ModuleDebugInfo { +fn readCoffDebugInfo(allocator: mem.Allocator, coff_file: File) !ModuleDebugInfo { nosuspend { errdefer coff_file.close(); @@ -755,7 +755,7 @@ fn chopSlice(ptr: []const u8, offset: u64, size: u64) ![]const u8 { /// it themselves, even on error. /// TODO resources https://github.com/ziglang/zig/issues/4353 /// TODO it's weird to take ownership even on error, rework this code. -pub fn readElfDebugInfo(allocator: *mem.Allocator, elf_file: File) !ModuleDebugInfo { +pub fn readElfDebugInfo(allocator: mem.Allocator, elf_file: File) !ModuleDebugInfo { nosuspend { const mapped_mem = try mapWholeFile(elf_file); const hdr = @ptrCast(*const elf.Ehdr, &mapped_mem[0]); @@ -827,7 +827,7 @@ pub fn readElfDebugInfo(allocator: *mem.Allocator, elf_file: File) !ModuleDebugI /// This takes ownership of macho_file: users of this function should not close /// it themselves, even on error. /// TODO it's weird to take ownership even on error, rework this code. -fn readMachODebugInfo(allocator: *mem.Allocator, macho_file: File) !ModuleDebugInfo { +fn readMachODebugInfo(allocator: mem.Allocator, macho_file: File) !ModuleDebugInfo { const mapped_mem = try mapWholeFile(macho_file); const hdr = @ptrCast( @@ -1025,10 +1025,10 @@ fn mapWholeFile(file: File) ![]align(mem.page_size) const u8 { } pub const DebugInfo = struct { - allocator: *mem.Allocator, + allocator: mem.Allocator, address_map: std.AutoHashMap(usize, *ModuleDebugInfo), - pub fn init(allocator: *mem.Allocator) DebugInfo { + pub fn init(allocator: mem.Allocator) DebugInfo { return DebugInfo{ .allocator = allocator, .address_map = std.AutoHashMap(usize, *ModuleDebugInfo).init(allocator), @@ -1278,7 +1278,7 @@ pub const ModuleDebugInfo = switch (native_os) { addr_table: std.StringHashMap(u64), }; - pub fn allocator(self: @This()) *mem.Allocator { + pub fn allocator(self: @This()) mem.Allocator { return self.ofiles.allocator; } @@ -1470,7 +1470,7 @@ pub const ModuleDebugInfo = switch (native_os) { debug_data: PdbOrDwarf, coff: *coff.Coff, - pub fn allocator(self: @This()) *mem.Allocator { + pub fn allocator(self: @This()) mem.Allocator { return self.coff.allocator; } @@ -1560,14 +1560,15 @@ fn getSymbolFromDwarf(address: u64, di: *DW.DwarfInfo) !SymbolInfo { } /// TODO multithreaded awareness -var debug_info_allocator: ?*mem.Allocator = null; +var debug_info_allocator: ?mem.Allocator = null; var debug_info_arena_allocator: std.heap.ArenaAllocator = undefined; -fn getDebugInfoAllocator() *mem.Allocator { +fn getDebugInfoAllocator() mem.Allocator { if (debug_info_allocator) |a| return a; debug_info_arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator); - debug_info_allocator = &debug_info_arena_allocator.allocator; - return &debug_info_arena_allocator.allocator; + const allocator = debug_info_arena_allocator.allocator(); + debug_info_allocator = allocator; + return allocator; } /// Whether or not the current target can print useful debug information when a segfault occurs. diff --git a/lib/std/dwarf.zig b/lib/std/dwarf.zig index 26031be662..eb204d15ee 100644 --- a/lib/std/dwarf.zig +++ b/lib/std/dwarf.zig @@ -466,7 +466,7 @@ fn readUnitLength(in_stream: anytype, endian: std.builtin.Endian, is_64: *bool) } // TODO the nosuspends here are workarounds -fn readAllocBytes(allocator: *mem.Allocator, in_stream: anytype, size: usize) ![]u8 { +fn readAllocBytes(allocator: mem.Allocator, in_stream: anytype, size: usize) ![]u8 { const buf = try allocator.alloc(u8, size); errdefer allocator.free(buf); if ((try nosuspend in_stream.read(buf)) < size) return error.EndOfFile; @@ -481,18 +481,18 @@ fn readAddress(in_stream: anytype, endian: std.builtin.Endian, is_64: bool) !u64 @as(u64, try in_stream.readInt(u32, endian)); } -fn parseFormValueBlockLen(allocator: *mem.Allocator, in_stream: anytype, size: usize) !FormValue { +fn parseFormValueBlockLen(allocator: mem.Allocator, in_stream: anytype, size: usize) !FormValue { const buf = try readAllocBytes(allocator, in_stream, size); return FormValue{ .Block = buf }; } // TODO the nosuspends here are workarounds -fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: usize) !FormValue { +fn parseFormValueBlock(allocator: mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: usize) !FormValue { const block_len = try nosuspend in_stream.readVarInt(usize, endian, size); return parseFormValueBlockLen(allocator, in_stream, block_len); } -fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed: bool, endian: std.builtin.Endian, comptime size: i32) !FormValue { +fn parseFormValueConstant(allocator: mem.Allocator, in_stream: anytype, signed: bool, endian: std.builtin.Endian, comptime size: i32) !FormValue { _ = allocator; // TODO: Please forgive me, I've worked around zig not properly spilling some intermediate values here. // `nosuspend` should be removed from all the function calls once it is fixed. @@ -520,7 +520,7 @@ fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed: } // TODO the nosuspends here are workarounds -fn parseFormValueRef(allocator: *mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: i32) !FormValue { +fn parseFormValueRef(allocator: mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: i32) !FormValue { _ = allocator; return FormValue{ .Ref = switch (size) { @@ -535,7 +535,7 @@ fn parseFormValueRef(allocator: *mem.Allocator, in_stream: anytype, endian: std. } // TODO the nosuspends here are workarounds -fn parseFormValue(allocator: *mem.Allocator, in_stream: anytype, form_id: u64, endian: std.builtin.Endian, is_64: bool) anyerror!FormValue { +fn parseFormValue(allocator: mem.Allocator, in_stream: anytype, form_id: u64, endian: std.builtin.Endian, is_64: bool) anyerror!FormValue { return switch (form_id) { FORM.addr => FormValue{ .Address = try readAddress(in_stream, endian, @sizeOf(usize) == 8) }, FORM.block1 => parseFormValueBlock(allocator, in_stream, endian, 1), @@ -604,7 +604,7 @@ pub const DwarfInfo = struct { compile_unit_list: ArrayList(CompileUnit) = undefined, func_list: ArrayList(Func) = undefined, - pub fn allocator(self: DwarfInfo) *mem.Allocator { + pub fn allocator(self: DwarfInfo) mem.Allocator { return self.abbrev_table_list.allocator; } @@ -1092,7 +1092,7 @@ pub const DwarfInfo = struct { /// the DwarfInfo fields before calling. These fields can be left undefined: /// * abbrev_table_list /// * compile_unit_list -pub fn openDwarfDebugInfo(di: *DwarfInfo, allocator: *mem.Allocator) !void { +pub fn openDwarfDebugInfo(di: *DwarfInfo, allocator: mem.Allocator) !void { di.abbrev_table_list = ArrayList(AbbrevTableHeader).init(allocator); di.compile_unit_list = ArrayList(CompileUnit).init(allocator); di.func_list = ArrayList(Func).init(allocator); diff --git a/lib/std/event/group.zig b/lib/std/event/group.zig index 599e8d9496..85eeeaf2b7 100644 --- a/lib/std/event/group.zig +++ b/lib/std/event/group.zig @@ -15,7 +15,7 @@ pub fn Group(comptime ReturnType: type) type { frame_stack: Stack, alloc_stack: AllocStack, lock: Lock, - allocator: *Allocator, + allocator: Allocator, const Self = @This(); @@ -31,7 +31,7 @@ pub fn Group(comptime ReturnType: type) type { handle: anyframe->ReturnType, }; - pub fn init(allocator: *Allocator) Self { + pub fn init(allocator: Allocator) Self { return Self{ .frame_stack = Stack.init(), .alloc_stack = AllocStack.init(), @@ -127,7 +127,7 @@ test "std.event.Group" { _ = async testGroup(std.heap.page_allocator); } -fn testGroup(allocator: *Allocator) callconv(.Async) void { +fn testGroup(allocator: Allocator) callconv(.Async) void { var count: usize = 0; var group = Group(void).init(allocator); var sleep_a_little_frame = async sleepALittle(&count); diff --git a/lib/std/event/loop.zig b/lib/std/event/loop.zig index 042c8bc3cc..23c89aabc5 100644 --- a/lib/std/event/loop.zig +++ b/lib/std/event/loop.zig @@ -173,12 +173,12 @@ pub const Loop = struct { // We need at least one of these in case the fs thread wants to use onNextTick const extra_thread_count = thread_count - 1; const resume_node_count = std.math.max(extra_thread_count, 1); - self.eventfd_resume_nodes = try self.arena.allocator.alloc( + self.eventfd_resume_nodes = try self.arena.allocator().alloc( std.atomic.Stack(ResumeNode.EventFd).Node, resume_node_count, ); - self.extra_threads = try self.arena.allocator.alloc(Thread, extra_thread_count); + self.extra_threads = try self.arena.allocator().alloc(Thread, extra_thread_count); try self.initOsData(extra_thread_count); errdefer self.deinitOsData(); @@ -727,7 +727,7 @@ pub const Loop = struct { /// with `allocator` and freed when the function returns. /// `func` must return void and it can be an async function. /// Yields to the event loop, running the function on the next tick. - pub fn runDetached(self: *Loop, alloc: *mem.Allocator, comptime func: anytype, args: anytype) error{OutOfMemory}!void { + pub fn runDetached(self: *Loop, alloc: mem.Allocator, comptime func: anytype, args: anytype) error{OutOfMemory}!void { if (!std.io.is_async) @compileError("Can't use runDetached in non-async mode!"); if (@TypeOf(@call(.{}, func, args)) != void) { @compileError("`func` must not have a return value"); @@ -735,7 +735,7 @@ pub const Loop = struct { const Wrapper = struct { const Args = @TypeOf(args); - fn run(func_args: Args, loop: *Loop, allocator: *mem.Allocator) void { + fn run(func_args: Args, loop: *Loop, allocator: mem.Allocator) void { loop.beginOneEvent(); loop.yield(); @call(.{}, func, func_args); // compile error when called with non-void ret type diff --git a/lib/std/event/rwlock.zig b/lib/std/event/rwlock.zig index fd42842a3a..c19330d5a9 100644 --- a/lib/std/event/rwlock.zig +++ b/lib/std/event/rwlock.zig @@ -226,7 +226,7 @@ test "std.event.RwLock" { const expected_result = [1]i32{shared_it_count * @intCast(i32, shared_test_data.len)} ** shared_test_data.len; try testing.expectEqualSlices(i32, expected_result, shared_test_data); } -fn testLock(allocator: *Allocator, lock: *RwLock) callconv(.Async) void { +fn testLock(allocator: Allocator, lock: *RwLock) callconv(.Async) void { var read_nodes: [100]Loop.NextTickNode = undefined; for (read_nodes) |*read_node| { const frame = allocator.create(@Frame(readRunner)) catch @panic("memory"); diff --git a/lib/std/fifo.zig b/lib/std/fifo.zig index d5b6285c00..b7c8f761d3 100644 --- a/lib/std/fifo.zig +++ b/lib/std/fifo.zig @@ -33,7 +33,7 @@ pub fn LinearFifo( }; return struct { - allocator: if (buffer_type == .Dynamic) *Allocator else void, + allocator: if (buffer_type == .Dynamic) Allocator else void, buf: if (buffer_type == .Static) [buffer_type.Static]T else []T, head: usize, count: usize, @@ -69,7 +69,7 @@ pub fn LinearFifo( } }, .Dynamic => struct { - pub fn init(allocator: *Allocator) Self { + pub fn init(allocator: Allocator) Self { return .{ .allocator = allocator, .buf = &[_]T{}, diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig index 24f5daa095..97dfcc78ba 100644 --- a/lib/std/fmt.zig +++ b/lib/std/fmt.zig @@ -1803,7 +1803,7 @@ pub fn count(comptime fmt: []const u8, args: anytype) u64 { pub const AllocPrintError = error{OutOfMemory}; -pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![]u8 { +pub fn allocPrint(allocator: mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![]u8 { const size = math.cast(usize, count(fmt, args)) catch |err| switch (err) { // Output too long. Can't possibly allocate enough memory to display it. error.Overflow => return error.OutOfMemory, @@ -1816,7 +1816,7 @@ pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: any pub const allocPrint0 = @compileError("deprecated; use allocPrintZ"); -pub fn allocPrintZ(allocator: *mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![:0]u8 { +pub fn allocPrintZ(allocator: mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![:0]u8 { const result = try allocPrint(allocator, fmt ++ "\x00", args); return result[0 .. result.len - 1 :0]; } diff --git a/lib/std/fs.zig b/lib/std/fs.zig index c10ded3bdc..4d900d2e67 100644 --- a/lib/std/fs.zig +++ b/lib/std/fs.zig @@ -64,7 +64,7 @@ pub const need_async_thread = std.io.is_async and switch (builtin.os.tag) { }; /// TODO remove the allocator requirement from this API -pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) !void { +pub fn atomicSymLink(allocator: Allocator, existing_path: []const u8, new_path: []const u8) !void { if (cwd().symLink(existing_path, new_path, .{})) { return; } else |err| switch (err) { @@ -875,7 +875,7 @@ pub const Dir = struct { /// Must call `Walker.deinit` when done. /// The order of returned file system entries is undefined. /// `self` will not be closed after walking it. - pub fn walk(self: Dir, allocator: *Allocator) !Walker { + pub fn walk(self: Dir, allocator: Allocator) !Walker { var name_buffer = std.ArrayList(u8).init(allocator); errdefer name_buffer.deinit(); @@ -1393,7 +1393,7 @@ pub const Dir = struct { /// Same as `Dir.realpath` except caller must free the returned memory. /// See also `Dir.realpath`. - pub fn realpathAlloc(self: Dir, allocator: *Allocator, pathname: []const u8) ![]u8 { + pub fn realpathAlloc(self: Dir, allocator: Allocator, pathname: []const u8) ![]u8 { // Use of MAX_PATH_BYTES here is valid as the realpath function does not // have a variant that takes an arbitrary-size buffer. // TODO(#4812): Consider reimplementing realpath or using the POSIX.1-2008 @@ -1804,7 +1804,7 @@ pub const Dir = struct { /// On success, caller owns returned buffer. /// If the file is larger than `max_bytes`, returns `error.FileTooBig`. - pub fn readFileAlloc(self: Dir, allocator: *mem.Allocator, file_path: []const u8, max_bytes: usize) ![]u8 { + pub fn readFileAlloc(self: Dir, allocator: mem.Allocator, file_path: []const u8, max_bytes: usize) ![]u8 { return self.readFileAllocOptions(allocator, file_path, max_bytes, null, @alignOf(u8), null); } @@ -1815,7 +1815,7 @@ pub const Dir = struct { /// Allows specifying alignment and a sentinel value. pub fn readFileAllocOptions( self: Dir, - allocator: *mem.Allocator, + allocator: mem.Allocator, file_path: []const u8, max_bytes: usize, size_hint: ?usize, @@ -2464,7 +2464,7 @@ pub const SelfExePathError = os.ReadLinkError || os.SysCtlError || os.RealPathEr /// `selfExePath` except allocates the result on the heap. /// Caller owns returned memory. -pub fn selfExePathAlloc(allocator: *Allocator) ![]u8 { +pub fn selfExePathAlloc(allocator: Allocator) ![]u8 { // Use of MAX_PATH_BYTES here is justified as, at least on one tested Linux // system, readlink will completely fail to return a result larger than // PATH_MAX even if given a sufficiently large buffer. This makes it @@ -2573,7 +2573,7 @@ pub fn selfExePathW() [:0]const u16 { /// `selfExeDirPath` except allocates the result on the heap. /// Caller owns returned memory. -pub fn selfExeDirPathAlloc(allocator: *Allocator) ![]u8 { +pub fn selfExeDirPathAlloc(allocator: Allocator) ![]u8 { // Use of MAX_PATH_BYTES here is justified as, at least on one tested Linux // system, readlink will completely fail to return a result larger than // PATH_MAX even if given a sufficiently large buffer. This makes it @@ -2596,7 +2596,7 @@ pub fn selfExeDirPath(out_buffer: []u8) SelfExePathError![]const u8 { /// `realpath`, except caller must free the returned memory. /// See also `Dir.realpath`. -pub fn realpathAlloc(allocator: *Allocator, pathname: []const u8) ![]u8 { +pub fn realpathAlloc(allocator: Allocator, pathname: []const u8) ![]u8 { // Use of MAX_PATH_BYTES here is valid as the realpath function does not // have a variant that takes an arbitrary-size buffer. // TODO(#4812): Consider reimplementing realpath or using the POSIX.1-2008 diff --git a/lib/std/fs/file.zig b/lib/std/fs/file.zig index 268de8f3c8..6fa46579fd 100644 --- a/lib/std/fs/file.zig +++ b/lib/std/fs/file.zig @@ -420,7 +420,7 @@ pub const File = struct { /// Reads all the bytes from the current position to the end of the file. /// On success, caller owns returned buffer. /// If the file is larger than `max_bytes`, returns `error.FileTooBig`. - pub fn readToEndAlloc(self: File, allocator: *mem.Allocator, max_bytes: usize) ![]u8 { + pub fn readToEndAlloc(self: File, allocator: mem.Allocator, max_bytes: usize) ![]u8 { return self.readToEndAllocOptions(allocator, max_bytes, null, @alignOf(u8), null); } @@ -432,7 +432,7 @@ pub const File = struct { /// Allows specifying alignment and a sentinel value. pub fn readToEndAllocOptions( self: File, - allocator: *mem.Allocator, + allocator: mem.Allocator, max_bytes: usize, size_hint: ?usize, comptime alignment: u29, diff --git a/lib/std/fs/get_app_data_dir.zig b/lib/std/fs/get_app_data_dir.zig index 2501a5194b..e2a9c5438f 100644 --- a/lib/std/fs/get_app_data_dir.zig +++ b/lib/std/fs/get_app_data_dir.zig @@ -12,7 +12,7 @@ pub const GetAppDataDirError = error{ /// Caller owns returned memory. /// TODO determine if we can remove the allocator requirement -pub fn getAppDataDir(allocator: *mem.Allocator, appname: []const u8) GetAppDataDirError![]u8 { +pub fn getAppDataDir(allocator: mem.Allocator, appname: []const u8) GetAppDataDirError![]u8 { switch (builtin.os.tag) { .windows => { var dir_path_ptr: [*:0]u16 = undefined; diff --git a/lib/std/fs/path.zig b/lib/std/fs/path.zig index 6372757d37..323f974255 100644 --- a/lib/std/fs/path.zig +++ b/lib/std/fs/path.zig @@ -35,7 +35,7 @@ pub fn isSep(byte: u8) bool { /// This is different from mem.join in that the separator will not be repeated if /// it is found at the end or beginning of a pair of consecutive paths. -fn joinSepMaybeZ(allocator: *Allocator, separator: u8, sepPredicate: fn (u8) bool, paths: []const []const u8, zero: bool) ![]u8 { +fn joinSepMaybeZ(allocator: Allocator, separator: u8, sepPredicate: fn (u8) bool, paths: []const []const u8, zero: bool) ![]u8 { if (paths.len == 0) return if (zero) try allocator.dupe(u8, &[1]u8{0}) else &[0]u8{}; // Find first non-empty path index. @@ -99,13 +99,13 @@ fn joinSepMaybeZ(allocator: *Allocator, separator: u8, sepPredicate: fn (u8) boo /// Naively combines a series of paths with the native path seperator. /// Allocates memory for the result, which must be freed by the caller. -pub fn join(allocator: *Allocator, paths: []const []const u8) ![]u8 { +pub fn join(allocator: Allocator, paths: []const []const u8) ![]u8 { return joinSepMaybeZ(allocator, sep, isSep, paths, false); } /// Naively combines a series of paths with the native path seperator and null terminator. /// Allocates memory for the result, which must be freed by the caller. -pub fn joinZ(allocator: *Allocator, paths: []const []const u8) ![:0]u8 { +pub fn joinZ(allocator: Allocator, paths: []const []const u8) ![:0]u8 { const out = try joinSepMaybeZ(allocator, sep, isSep, paths, true); return out[0 .. out.len - 1 :0]; } @@ -445,7 +445,7 @@ fn asciiEqlIgnoreCase(s1: []const u8, s2: []const u8) bool { } /// On Windows, this calls `resolveWindows` and on POSIX it calls `resolvePosix`. -pub fn resolve(allocator: *Allocator, paths: []const []const u8) ![]u8 { +pub fn resolve(allocator: Allocator, paths: []const []const u8) ![]u8 { if (native_os == .windows) { return resolveWindows(allocator, paths); } else { @@ -461,7 +461,7 @@ pub fn resolve(allocator: *Allocator, paths: []const []const u8) ![]u8 { /// Path separators are canonicalized to '\\' and drives are canonicalized to capital letters. /// Note: all usage of this function should be audited due to the existence of symlinks. /// Without performing actual syscalls, resolving `..` could be incorrect. -pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 { +pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 { if (paths.len == 0) { assert(native_os == .windows); // resolveWindows called on non windows can't use getCwd return process.getCwdAlloc(allocator); @@ -647,7 +647,7 @@ pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 { /// If all paths are relative it uses the current working directory as a starting point. /// Note: all usage of this function should be audited due to the existence of symlinks. /// Without performing actual syscalls, resolving `..` could be incorrect. -pub fn resolvePosix(allocator: *Allocator, paths: []const []const u8) ![]u8 { +pub fn resolvePosix(allocator: Allocator, paths: []const []const u8) ![]u8 { if (paths.len == 0) { assert(native_os != .windows); // resolvePosix called on windows can't use getCwd return process.getCwdAlloc(allocator); @@ -1058,7 +1058,7 @@ fn testBasenameWindows(input: []const u8, expected_output: []const u8) !void { /// resolve to the same path (after calling `resolve` on each), a zero-length /// string is returned. /// On Windows this canonicalizes the drive to a capital letter and paths to `\\`. -pub fn relative(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 { +pub fn relative(allocator: Allocator, from: []const u8, to: []const u8) ![]u8 { if (native_os == .windows) { return relativeWindows(allocator, from, to); } else { @@ -1066,7 +1066,7 @@ pub fn relative(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 { } } -pub fn relativeWindows(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 { +pub fn relativeWindows(allocator: Allocator, from: []const u8, to: []const u8) ![]u8 { const resolved_from = try resolveWindows(allocator, &[_][]const u8{from}); defer allocator.free(resolved_from); @@ -1139,7 +1139,7 @@ pub fn relativeWindows(allocator: *Allocator, from: []const u8, to: []const u8) return [_]u8{}; } -pub fn relativePosix(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 { +pub fn relativePosix(allocator: Allocator, from: []const u8, to: []const u8) ![]u8 { const resolved_from = try resolvePosix(allocator, &[_][]const u8{from}); defer allocator.free(resolved_from); diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index f2b584d6d4..1ab6608327 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -52,9 +52,11 @@ test "accessAbsolute" { var arena = ArenaAllocator.init(testing.allocator); defer arena.deinit(); + const allocator = arena.allocator(); + const base_path = blk: { - const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] }); - break :blk try fs.realpathAlloc(&arena.allocator, relative_path); + const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] }); + break :blk try fs.realpathAlloc(allocator, relative_path); }; try fs.accessAbsolute(base_path, .{}); @@ -69,9 +71,11 @@ test "openDirAbsolute" { try tmp.dir.makeDir("subdir"); var arena = ArenaAllocator.init(testing.allocator); defer arena.deinit(); + const allocator = arena.allocator(); + const base_path = blk: { - const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..], "subdir" }); - break :blk try fs.realpathAlloc(&arena.allocator, relative_path); + const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..], "subdir" }); + break :blk try fs.realpathAlloc(allocator, relative_path); }; { @@ -80,8 +84,8 @@ test "openDirAbsolute" { } for ([_][]const u8{ ".", ".." }) |sub_path| { - const dir_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, sub_path }); - defer arena.allocator.free(dir_path); + const dir_path = try fs.path.join(allocator, &[_][]const u8{ base_path, sub_path }); + defer allocator.free(dir_path); var dir = try fs.openDirAbsolute(dir_path, .{}); defer dir.close(); } @@ -107,12 +111,12 @@ test "readLinkAbsolute" { // Get base abs path var arena = ArenaAllocator.init(testing.allocator); defer arena.deinit(); + const allocator = arena.allocator(); const base_path = blk: { - const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] }); - break :blk try fs.realpathAlloc(&arena.allocator, relative_path); + const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] }); + break :blk try fs.realpathAlloc(allocator, relative_path); }; - const allocator = &arena.allocator; { const target_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "file.txt" }); @@ -158,15 +162,16 @@ test "Dir.Iterator" { var arena = ArenaAllocator.init(testing.allocator); defer arena.deinit(); + const allocator = arena.allocator(); - var entries = std.ArrayList(Dir.Entry).init(&arena.allocator); + var entries = std.ArrayList(Dir.Entry).init(allocator); // Create iterator. var iter = tmp_dir.dir.iterate(); while (try iter.next()) |entry| { // We cannot just store `entry` as on Windows, we're re-using the name buffer // which means we'll actually share the `name` pointer between entries! - const name = try arena.allocator.dupe(u8, entry.name); + const name = try allocator.dupe(u8, entry.name); try entries.append(Dir.Entry{ .name = name, .kind = entry.kind }); } @@ -202,25 +207,26 @@ test "Dir.realpath smoke test" { var arena = ArenaAllocator.init(testing.allocator); defer arena.deinit(); + const allocator = arena.allocator(); const base_path = blk: { - const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] }); - break :blk try fs.realpathAlloc(&arena.allocator, relative_path); + const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] }); + break :blk try fs.realpathAlloc(allocator, relative_path); }; // First, test non-alloc version { var buf1: [fs.MAX_PATH_BYTES]u8 = undefined; const file_path = try tmp_dir.dir.realpath("test_file", buf1[0..]); - const expected_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "test_file" }); + const expected_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "test_file" }); try testing.expect(mem.eql(u8, file_path, expected_path)); } // Next, test alloc version { - const file_path = try tmp_dir.dir.realpathAlloc(&arena.allocator, "test_file"); - const expected_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "test_file" }); + const file_path = try tmp_dir.dir.realpathAlloc(allocator, "test_file"); + const expected_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "test_file" }); try testing.expect(mem.eql(u8, file_path, expected_path)); } @@ -476,11 +482,11 @@ test "renameAbsolute" { // Get base abs path var arena = ArenaAllocator.init(testing.allocator); defer arena.deinit(); - const allocator = &arena.allocator; + const allocator = arena.allocator(); const base_path = blk: { - const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] }); - break :blk try fs.realpathAlloc(&arena.allocator, relative_path); + const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] }); + break :blk try fs.realpathAlloc(allocator, relative_path); }; try testing.expectError(error.FileNotFound, fs.renameAbsolute( @@ -987,11 +993,11 @@ test ". and .. in absolute functions" { var arena = ArenaAllocator.init(testing.allocator); defer arena.deinit(); - const allocator = &arena.allocator; + const allocator = arena.allocator(); const base_path = blk: { - const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] }); - break :blk try fs.realpathAlloc(&arena.allocator, relative_path); + const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] }); + break :blk try fs.realpathAlloc(allocator, relative_path); }; const subdir_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "./subdir" }); diff --git a/lib/std/fs/wasi.zig b/lib/std/fs/wasi.zig index 528ccfc0f1..1a033653d3 100644 --- a/lib/std/fs/wasi.zig +++ b/lib/std/fs/wasi.zig @@ -80,7 +80,7 @@ pub const PreopenList = struct { pub const Error = error{ OutOfMemory, Overflow } || os.UnexpectedError; /// Deinitialize with `deinit`. - pub fn init(allocator: *Allocator) Self { + pub fn init(allocator: Allocator) Self { return Self{ .buffer = InnerList.init(allocator) }; } diff --git a/lib/std/fs/watch.zig b/lib/std/fs/watch.zig index 56544162c8..c103925bdd 100644 --- a/lib/std/fs/watch.zig +++ b/lib/std/fs/watch.zig @@ -30,7 +30,7 @@ pub fn Watch(comptime V: type) type { return struct { channel: event.Channel(Event.Error!Event), os_data: OsData, - allocator: *Allocator, + allocator: Allocator, const OsData = switch (builtin.os.tag) { // TODO https://github.com/ziglang/zig/issues/3778 @@ -96,7 +96,7 @@ pub fn Watch(comptime V: type) type { pub const Error = WatchEventError; }; - pub fn init(allocator: *Allocator, event_buf_count: usize) !*Self { + pub fn init(allocator: Allocator, event_buf_count: usize) !*Self { const self = try allocator.create(Self); errdefer allocator.destroy(self); @@ -648,7 +648,7 @@ test "write a file, watch it, write it again, delete it" { return testWriteWatchWriteDelete(std.testing.allocator); } -fn testWriteWatchWriteDelete(allocator: *Allocator) !void { +fn testWriteWatchWriteDelete(allocator: Allocator) !void { const file_path = try std.fs.path.join(allocator, &[_][]const u8{ test_tmp_dir, "file.txt" }); defer allocator.free(file_path); diff --git a/lib/std/hash/auto_hash.zig b/lib/std/hash/auto_hash.zig index 5663bed249..22fd6526f4 100644 --- a/lib/std/hash/auto_hash.zig +++ b/lib/std/hash/auto_hash.zig @@ -309,7 +309,7 @@ test "hash struct deep" { const Self = @This(); - pub fn init(allocator: *mem.Allocator, a_: u32, b_: u16, c_: bool) !Self { + pub fn init(allocator: mem.Allocator, a_: u32, b_: u16, c_: bool) !Self { const ptr = try allocator.create(bool); ptr.* = c_; return Self{ .a = a_, .b = b_, .c = ptr }; diff --git a/lib/std/hash/benchmark.zig b/lib/std/hash/benchmark.zig index c145fcbae2..f6f1da1894 100644 --- a/lib/std/hash/benchmark.zig +++ b/lib/std/hash/benchmark.zig @@ -165,7 +165,7 @@ pub fn main() !void { var buffer: [1024]u8 = undefined; var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]); - const args = try std.process.argsAlloc(&fixed.allocator); + const args = try std.process.argsAlloc(fixed.allocator()); var filter: ?[]u8 = ""; var count: usize = mode(128 * MiB); diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index cd23ccd39e..5356bbff1a 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -363,7 +363,7 @@ pub fn HashMap( comptime verifyContext(Context, K, K, u64); return struct { unmanaged: Unmanaged, - allocator: *Allocator, + allocator: Allocator, ctx: Context, /// The type of the unmanaged hash map underlying this wrapper @@ -390,7 +390,7 @@ pub fn HashMap( /// Create a managed hash map with an empty context. /// If the context is not zero-sized, you must use /// initContext(allocator, ctx) instead. - pub fn init(allocator: *Allocator) Self { + pub fn init(allocator: Allocator) Self { if (@sizeOf(Context) != 0) { @compileError("Context must be specified! Call initContext(allocator, ctx) instead."); } @@ -402,7 +402,7 @@ pub fn HashMap( } /// Create a managed hash map with a context - pub fn initContext(allocator: *Allocator, ctx: Context) Self { + pub fn initContext(allocator: Allocator, ctx: Context) Self { return .{ .unmanaged = .{}, .allocator = allocator, @@ -636,7 +636,7 @@ pub fn HashMap( } /// Creates a copy of this map, using a specified allocator - pub fn cloneWithAllocator(self: Self, new_allocator: *Allocator) !Self { + pub fn cloneWithAllocator(self: Self, new_allocator: Allocator) !Self { var other = try self.unmanaged.cloneContext(new_allocator, self.ctx); return other.promoteContext(new_allocator, self.ctx); } @@ -650,7 +650,7 @@ pub fn HashMap( /// Creates a copy of this map, using a specified allocator and context. pub fn cloneWithAllocatorAndContext( self: Self, - new_allocator: *Allocator, + new_allocator: Allocator, new_ctx: anytype, ) !HashMap(K, V, @TypeOf(new_ctx), max_load_percentage) { var other = try self.unmanaged.cloneContext(new_allocator, new_ctx); @@ -841,13 +841,13 @@ pub fn HashMapUnmanaged( pub const Managed = HashMap(K, V, Context, max_load_percentage); - pub fn promote(self: Self, allocator: *Allocator) Managed { + pub fn promote(self: Self, allocator: Allocator) Managed { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call promoteContext instead."); return promoteContext(self, allocator, undefined); } - pub fn promoteContext(self: Self, allocator: *Allocator, ctx: Context) Managed { + pub fn promoteContext(self: Self, allocator: Allocator, ctx: Context) Managed { return .{ .unmanaged = self, .allocator = allocator, @@ -859,7 +859,7 @@ pub fn HashMapUnmanaged( return size * 100 < max_load_percentage * cap; } - pub fn deinit(self: *Self, allocator: *Allocator) void { + pub fn deinit(self: *Self, allocator: Allocator) void { self.deallocate(allocator); self.* = undefined; } @@ -872,20 +872,20 @@ pub fn HashMapUnmanaged( pub const ensureCapacity = @compileError("deprecated; call `ensureUnusedCapacity` or `ensureTotalCapacity`"); - pub fn ensureTotalCapacity(self: *Self, allocator: *Allocator, new_size: Size) !void { + pub fn ensureTotalCapacity(self: *Self, allocator: Allocator, new_size: Size) !void { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureTotalCapacityContext instead."); return ensureTotalCapacityContext(self, allocator, new_size, undefined); } - pub fn ensureTotalCapacityContext(self: *Self, allocator: *Allocator, new_size: Size, ctx: Context) !void { + pub fn ensureTotalCapacityContext(self: *Self, allocator: Allocator, new_size: Size, ctx: Context) !void { if (new_size > self.size) try self.growIfNeeded(allocator, new_size - self.size, ctx); } - pub fn ensureUnusedCapacity(self: *Self, allocator: *Allocator, additional_size: Size) !void { + pub fn ensureUnusedCapacity(self: *Self, allocator: Allocator, additional_size: Size) !void { return ensureUnusedCapacityContext(self, allocator, additional_size, undefined); } - pub fn ensureUnusedCapacityContext(self: *Self, allocator: *Allocator, additional_size: Size, ctx: Context) !void { + pub fn ensureUnusedCapacityContext(self: *Self, allocator: Allocator, additional_size: Size, ctx: Context) !void { return ensureTotalCapacityContext(self, allocator, self.count() + additional_size, ctx); } @@ -897,7 +897,7 @@ pub fn HashMapUnmanaged( } } - pub fn clearAndFree(self: *Self, allocator: *Allocator) void { + pub fn clearAndFree(self: *Self, allocator: Allocator) void { self.deallocate(allocator); self.size = 0; self.available = 0; @@ -962,12 +962,12 @@ pub fn HashMapUnmanaged( } /// Insert an entry in the map. Assumes it is not already present. - pub fn putNoClobber(self: *Self, allocator: *Allocator, key: K, value: V) !void { + pub fn putNoClobber(self: *Self, allocator: Allocator, key: K, value: V) !void { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putNoClobberContext instead."); return self.putNoClobberContext(allocator, key, value, undefined); } - pub fn putNoClobberContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void { + pub fn putNoClobberContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !void { assert(!self.containsContext(key, ctx)); try self.growIfNeeded(allocator, 1, ctx); @@ -1021,12 +1021,12 @@ pub fn HashMapUnmanaged( } /// Inserts a new `Entry` into the hash map, returning the previous one, if any. - pub fn fetchPut(self: *Self, allocator: *Allocator, key: K, value: V) !?KV { + pub fn fetchPut(self: *Self, allocator: Allocator, key: K, value: V) !?KV { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutContext instead."); return self.fetchPutContext(allocator, key, value, undefined); } - pub fn fetchPutContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !?KV { + pub fn fetchPutContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !?KV { const gop = try self.getOrPutContext(allocator, key, ctx); var result: ?KV = null; if (gop.found_existing) { @@ -1157,12 +1157,12 @@ pub fn HashMapUnmanaged( } /// Insert an entry if the associated key is not already present, otherwise update preexisting value. - pub fn put(self: *Self, allocator: *Allocator, key: K, value: V) !void { + pub fn put(self: *Self, allocator: Allocator, key: K, value: V) !void { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putContext instead."); return self.putContext(allocator, key, value, undefined); } - pub fn putContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void { + pub fn putContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !void { const result = try self.getOrPutContext(allocator, key, ctx); result.value_ptr.* = value; } @@ -1231,24 +1231,24 @@ pub fn HashMapUnmanaged( return null; } - pub fn getOrPut(self: *Self, allocator: *Allocator, key: K) !GetOrPutResult { + pub fn getOrPut(self: *Self, allocator: Allocator, key: K) !GetOrPutResult { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContext instead."); return self.getOrPutContext(allocator, key, undefined); } - pub fn getOrPutContext(self: *Self, allocator: *Allocator, key: K, ctx: Context) !GetOrPutResult { + pub fn getOrPutContext(self: *Self, allocator: Allocator, key: K, ctx: Context) !GetOrPutResult { const gop = try self.getOrPutContextAdapted(allocator, key, ctx, ctx); if (!gop.found_existing) { gop.key_ptr.* = key; } return gop; } - pub fn getOrPutAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult { + pub fn getOrPutAdapted(self: *Self, allocator: Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContextAdapted instead."); return self.getOrPutContextAdapted(allocator, key, key_ctx, undefined); } - pub fn getOrPutContextAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult { + pub fn getOrPutContextAdapted(self: *Self, allocator: Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult { self.growIfNeeded(allocator, 1, ctx) catch |err| { // If allocation fails, try to do the lookup anyway. // If we find an existing item, we can return it. @@ -1341,12 +1341,12 @@ pub fn HashMapUnmanaged( }; } - pub fn getOrPutValue(self: *Self, allocator: *Allocator, key: K, value: V) !Entry { + pub fn getOrPutValue(self: *Self, allocator: Allocator, key: K, value: V) !Entry { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutValueContext instead."); return self.getOrPutValueContext(allocator, key, value, undefined); } - pub fn getOrPutValueContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !Entry { + pub fn getOrPutValueContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !Entry { const res = try self.getOrPutAdapted(allocator, key, ctx); if (!res.found_existing) { res.key_ptr.* = key; @@ -1403,18 +1403,18 @@ pub fn HashMapUnmanaged( return @truncate(Size, max_load - self.available); } - fn growIfNeeded(self: *Self, allocator: *Allocator, new_count: Size, ctx: Context) !void { + fn growIfNeeded(self: *Self, allocator: Allocator, new_count: Size, ctx: Context) !void { if (new_count > self.available) { try self.grow(allocator, capacityForSize(self.load() + new_count), ctx); } } - pub fn clone(self: Self, allocator: *Allocator) !Self { + pub fn clone(self: Self, allocator: Allocator) !Self { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call cloneContext instead."); return self.cloneContext(allocator, @as(Context, undefined)); } - pub fn cloneContext(self: Self, allocator: *Allocator, new_ctx: anytype) !HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage) { + pub fn cloneContext(self: Self, allocator: Allocator, new_ctx: anytype) !HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage) { var other = HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage){}; if (self.size == 0) return other; @@ -1439,7 +1439,7 @@ pub fn HashMapUnmanaged( return other; } - fn grow(self: *Self, allocator: *Allocator, new_capacity: Size, ctx: Context) !void { + fn grow(self: *Self, allocator: Allocator, new_capacity: Size, ctx: Context) !void { @setCold(true); const new_cap = std.math.max(new_capacity, minimal_capacity); assert(new_cap > self.capacity()); @@ -1470,7 +1470,7 @@ pub fn HashMapUnmanaged( std.mem.swap(Self, self, &map); } - fn allocate(self: *Self, allocator: *Allocator, new_capacity: Size) !void { + fn allocate(self: *Self, allocator: Allocator, new_capacity: Size) !void { const header_align = @alignOf(Header); const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K); const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V); @@ -1503,7 +1503,7 @@ pub fn HashMapUnmanaged( self.metadata = @intToPtr([*]Metadata, metadata); } - fn deallocate(self: *Self, allocator: *Allocator) void { + fn deallocate(self: *Self, allocator: Allocator) void { if (self.metadata == null) return; const header_align = @alignOf(Header); diff --git a/lib/std/heap.zig b/lib/std/heap.zig index fcea90d751..4ea0ff718f 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -97,13 +97,12 @@ const CAllocator = struct { } fn alloc( - allocator: *Allocator, + _: *c_void, len: usize, alignment: u29, len_align: u29, return_address: usize, ) error{OutOfMemory}![]u8 { - _ = allocator; _ = return_address; assert(len > 0); assert(std.math.isPowerOfTwo(alignment)); @@ -124,20 +123,15 @@ const CAllocator = struct { } fn resize( - allocator: *Allocator, + _: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, return_address: usize, - ) Allocator.Error!usize { - _ = allocator; + ) ?usize { _ = buf_align; _ = return_address; - if (new_len == 0) { - alignedFree(buf.ptr); - return 0; - } if (new_len <= buf.len) { return mem.alignAllocLen(buf.len, new_len, len_align); } @@ -147,17 +141,32 @@ const CAllocator = struct { return mem.alignAllocLen(full_len, new_len, len_align); } } - return error.OutOfMemory; + return null; + } + + fn free( + _: *c_void, + buf: []u8, + buf_align: u29, + return_address: usize, + ) void { + _ = buf_align; + _ = return_address; + alignedFree(buf.ptr); } }; /// Supports the full Allocator interface, including alignment, and exploiting /// `malloc_usable_size` if available. For an allocator that directly calls /// `malloc`/`free`, see `raw_c_allocator`. -pub const c_allocator = &c_allocator_state; -var c_allocator_state = Allocator{ - .allocFn = CAllocator.alloc, - .resizeFn = CAllocator.resize, +pub const c_allocator = Allocator{ + .ptr = undefined, + .vtable = &c_allocator_vtable, +}; +const c_allocator_vtable = Allocator.VTable{ + .alloc = CAllocator.alloc, + .resize = CAllocator.resize, + .free = CAllocator.free, }; /// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly calls @@ -165,20 +174,23 @@ var c_allocator_state = Allocator{ /// This allocator is safe to use as the backing allocator with /// `ArenaAllocator` for example and is more optimal in such a case /// than `c_allocator`. -pub const raw_c_allocator = &raw_c_allocator_state; -var raw_c_allocator_state = Allocator{ - .allocFn = rawCAlloc, - .resizeFn = rawCResize, +pub const raw_c_allocator = Allocator{ + .ptr = undefined, + .vtable = &raw_c_allocator_vtable, +}; +const raw_c_allocator_vtable = Allocator.VTable{ + .alloc = rawCAlloc, + .resize = rawCResize, + .free = rawCFree, }; fn rawCAlloc( - self: *Allocator, + _: *c_void, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize, ) Allocator.Error![]u8 { - _ = self; _ = len_align; _ = ret_addr; assert(ptr_align <= @alignOf(std.c.max_align_t)); @@ -187,43 +199,46 @@ fn rawCAlloc( } fn rawCResize( - self: *Allocator, + _: *c_void, buf: []u8, old_align: u29, new_len: usize, len_align: u29, ret_addr: usize, -) Allocator.Error!usize { - _ = self; +) ?usize { _ = old_align; _ = ret_addr; - if (new_len == 0) { - c.free(buf.ptr); - return 0; - } if (new_len <= buf.len) { return mem.alignAllocLen(buf.len, new_len, len_align); } - return error.OutOfMemory; + return null; +} + +fn rawCFree( + _: *c_void, + buf: []u8, + old_align: u29, + ret_addr: usize, +) void { + _ = old_align; + _ = ret_addr; + c.free(buf.ptr); } /// This allocator makes a syscall directly for every allocation and free. /// Thread-safe and lock-free. pub const page_allocator = if (builtin.target.isWasm()) - &wasm_page_allocator_state + Allocator{ + .ptr = undefined, + .vtable = &WasmPageAllocator.vtable, + } else if (builtin.target.os.tag == .freestanding) root.os.heap.page_allocator else - &page_allocator_state; - -var page_allocator_state = Allocator{ - .allocFn = PageAllocator.alloc, - .resizeFn = PageAllocator.resize, -}; -var wasm_page_allocator_state = Allocator{ - .allocFn = WasmPageAllocator.alloc, - .resizeFn = WasmPageAllocator.resize, -}; + Allocator{ + .ptr = undefined, + .vtable = &PageAllocator.vtable, + }; /// Verifies that the adjusted length will still map to the full length pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize { @@ -236,8 +251,13 @@ pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize { pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null; const PageAllocator = struct { - fn alloc(allocator: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 { - _ = allocator; + const vtable = Allocator.VTable{ + .alloc = alloc, + .resize = resize, + .free = free, + }; + + fn alloc(_: *c_void, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 { _ = ra; assert(n > 0); const aligned_len = mem.alignForward(n, mem.page_size); @@ -335,30 +355,19 @@ const PageAllocator = struct { } fn resize( - allocator: *Allocator, + _: *c_void, buf_unaligned: []u8, buf_align: u29, new_size: usize, len_align: u29, return_address: usize, - ) Allocator.Error!usize { - _ = allocator; + ) ?usize { _ = buf_align; _ = return_address; const new_size_aligned = mem.alignForward(new_size, mem.page_size); if (builtin.os.tag == .windows) { const w = os.windows; - if (new_size == 0) { - // From the docs: - // "If the dwFreeType parameter is MEM_RELEASE, this parameter - // must be 0 (zero). The function frees the entire region that - // is reserved in the initial allocation call to VirtualAlloc." - // So we can only use MEM_RELEASE when actually releasing the - // whole allocation. - w.VirtualFree(buf_unaligned.ptr, 0, w.MEM_RELEASE); - return 0; - } if (new_size <= buf_unaligned.len) { const base_addr = @ptrToInt(buf_unaligned.ptr); const old_addr_end = base_addr + buf_unaligned.len; @@ -378,7 +387,7 @@ const PageAllocator = struct { if (new_size_aligned <= old_size_aligned) { return alignPageAllocLen(new_size_aligned, new_size, len_align); } - return error.OutOfMemory; + return null; } const buf_aligned_len = mem.alignForward(buf_unaligned.len, mem.page_size); @@ -389,14 +398,25 @@ const PageAllocator = struct { const ptr = @alignCast(mem.page_size, buf_unaligned.ptr + new_size_aligned); // TODO: if the next_mmap_addr_hint is within the unmapped range, update it os.munmap(ptr[0 .. buf_aligned_len - new_size_aligned]); - if (new_size_aligned == 0) - return 0; return alignPageAllocLen(new_size_aligned, new_size, len_align); } // TODO: call mremap // TODO: if the next_mmap_addr_hint is within the remapped range, update it - return error.OutOfMemory; + return null; + } + + fn free(_: *c_void, buf_unaligned: []u8, buf_align: u29, return_address: usize) void { + _ = buf_align; + _ = return_address; + + if (builtin.os.tag == .windows) { + os.windows.VirtualFree(buf_unaligned.ptr, 0, os.windows.MEM_RELEASE); + } else { + const buf_aligned_len = mem.alignForward(buf_unaligned.len, mem.page_size); + const ptr = @alignCast(mem.page_size, buf_unaligned.ptr); + os.munmap(ptr[0..buf_aligned_len]); + } } }; @@ -407,6 +427,12 @@ const WasmPageAllocator = struct { } } + const vtable = Allocator.VTable{ + .alloc = alloc, + .resize = resize, + .free = free, + }; + const PageStatus = enum(u1) { used = 0, free = 1, @@ -492,8 +518,7 @@ const WasmPageAllocator = struct { return mem.alignForward(memsize, mem.page_size) / mem.page_size; } - fn alloc(allocator: *Allocator, len: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 { - _ = allocator; + fn alloc(_: *c_void, len: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 { _ = ra; const page_count = nPages(len); const page_idx = try allocPages(page_count, alignment); @@ -548,45 +573,57 @@ const WasmPageAllocator = struct { } fn resize( - allocator: *Allocator, + _: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, return_address: usize, - ) error{OutOfMemory}!usize { - _ = allocator; + ) ?usize { _ = buf_align; _ = return_address; const aligned_len = mem.alignForward(buf.len, mem.page_size); - if (new_len > aligned_len) return error.OutOfMemory; + if (new_len > aligned_len) return null; const current_n = nPages(aligned_len); const new_n = nPages(new_len); if (new_n != current_n) { const base = nPages(@ptrToInt(buf.ptr)); freePages(base + new_n, base + current_n); } - return if (new_len == 0) 0 else alignPageAllocLen(new_n * mem.page_size, new_len, len_align); + return alignPageAllocLen(new_n * mem.page_size, new_len, len_align); + } + + fn free( + _: *c_void, + buf: []u8, + buf_align: u29, + return_address: usize, + ) void { + _ = buf_align; + _ = return_address; + const aligned_len = mem.alignForward(buf.len, mem.page_size); + const current_n = nPages(aligned_len); + const base = nPages(@ptrToInt(buf.ptr)); + freePages(base, base + current_n); } }; pub const HeapAllocator = switch (builtin.os.tag) { .windows => struct { - allocator: Allocator, heap_handle: ?HeapHandle, const HeapHandle = os.windows.HANDLE; pub fn init() HeapAllocator { return HeapAllocator{ - .allocator = Allocator{ - .allocFn = alloc, - .resizeFn = resize, - }, .heap_handle = null, }; } + pub fn allocator(self: *HeapAllocator) Allocator { + return Allocator.init(self, alloc, resize, free); + } + pub fn deinit(self: *HeapAllocator) void { if (self.heap_handle) |heap_handle| { os.windows.HeapDestroy(heap_handle); @@ -598,14 +635,13 @@ pub const HeapAllocator = switch (builtin.os.tag) { } fn alloc( - allocator: *Allocator, + self: *HeapAllocator, n: usize, ptr_align: u29, len_align: u29, return_address: usize, ) error{OutOfMemory}![]u8 { _ = return_address; - const self = @fieldParentPtr(HeapAllocator, "allocator", allocator); const amt = n + ptr_align - 1 + @sizeOf(usize); const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, .SeqCst); @@ -632,20 +668,15 @@ pub const HeapAllocator = switch (builtin.os.tag) { } fn resize( - allocator: *Allocator, + self: *HeapAllocator, buf: []u8, buf_align: u29, new_size: usize, len_align: u29, return_address: usize, - ) error{OutOfMemory}!usize { + ) ?usize { _ = buf_align; _ = return_address; - const self = @fieldParentPtr(HeapAllocator, "allocator", allocator); - if (new_size == 0) { - os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void, getRecordPtr(buf).*)); - return 0; - } const root_addr = getRecordPtr(buf).*; const align_offset = @ptrToInt(buf.ptr) - root_addr; @@ -655,7 +686,7 @@ pub const HeapAllocator = switch (builtin.os.tag) { os.windows.HEAP_REALLOC_IN_PLACE_ONLY, @intToPtr(*c_void, root_addr), amt, - ) orelse return error.OutOfMemory; + ) orelse return null; assert(new_ptr == @intToPtr(*c_void, root_addr)); const return_len = init: { if (len_align == 0) break :init new_size; @@ -667,6 +698,17 @@ pub const HeapAllocator = switch (builtin.os.tag) { getRecordPtr(buf.ptr[0..return_len]).* = root_addr; return return_len; } + + fn free( + self: *HeapAllocator, + buf: []u8, + buf_align: u29, + return_address: usize, + ) void { + _ = buf_align; + _ = return_address; + os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void, getRecordPtr(buf).*)); + } }, else => @compileError("Unsupported OS"), }; @@ -682,21 +724,32 @@ fn sliceContainsSlice(container: []u8, slice: []u8) bool { } pub const FixedBufferAllocator = struct { - allocator: Allocator, end_index: usize, buffer: []u8, pub fn init(buffer: []u8) FixedBufferAllocator { return FixedBufferAllocator{ - .allocator = Allocator{ - .allocFn = alloc, - .resizeFn = resize, - }, .buffer = buffer, .end_index = 0, }; } + /// *WARNING* using this at the same time as the interface returned by `threadSafeAllocator` is not thread safe + pub fn allocator(self: *FixedBufferAllocator) Allocator { + return Allocator.init(self, alloc, resize, free); + } + + /// Provides a lock free thread safe `Allocator` interface to the underlying `FixedBufferAllocator` + /// *WARNING* using this at the same time as the interface returned by `getAllocator` is not thread safe + pub fn threadSafeAllocator(self: *FixedBufferAllocator) Allocator { + return Allocator.init( + self, + threadSafeAlloc, + Allocator.NoResize(FixedBufferAllocator).noResize, + Allocator.NoOpFree(FixedBufferAllocator).noOpFree, + ); + } + pub fn ownsPtr(self: *FixedBufferAllocator, ptr: [*]u8) bool { return sliceContainsPtr(self.buffer, ptr); } @@ -707,15 +760,14 @@ pub const FixedBufferAllocator = struct { /// NOTE: this will not work in all cases, if the last allocation had an adjusted_index /// then we won't be able to determine what the last allocation was. This is because - /// the alignForward operation done in alloc is not reverisible. + /// the alignForward operation done in alloc is not reversible. pub fn isLastAllocation(self: *FixedBufferAllocator, buf: []u8) bool { return buf.ptr + buf.len == self.buffer.ptr + self.end_index; } - fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 { + fn alloc(self: *FixedBufferAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 { _ = len_align; _ = ra; - const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator); const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse return error.OutOfMemory; const adjusted_index = self.end_index + adjust_off; @@ -730,97 +782,78 @@ pub const FixedBufferAllocator = struct { } fn resize( - allocator: *Allocator, + self: *FixedBufferAllocator, buf: []u8, buf_align: u29, new_size: usize, len_align: u29, return_address: usize, - ) Allocator.Error!usize { + ) ?usize { _ = buf_align; _ = return_address; - const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator); assert(self.ownsSlice(buf)); // sanity check if (!self.isLastAllocation(buf)) { - if (new_size > buf.len) - return error.OutOfMemory; - return if (new_size == 0) 0 else mem.alignAllocLen(buf.len, new_size, len_align); + if (new_size > buf.len) return null; + return mem.alignAllocLen(buf.len, new_size, len_align); } if (new_size <= buf.len) { const sub = buf.len - new_size; self.end_index -= sub; - return if (new_size == 0) 0 else mem.alignAllocLen(buf.len - sub, new_size, len_align); + return mem.alignAllocLen(buf.len - sub, new_size, len_align); } const add = new_size - buf.len; - if (add + self.end_index > self.buffer.len) { - return error.OutOfMemory; - } + if (add + self.end_index > self.buffer.len) return null; + self.end_index += add; return new_size; } - pub fn reset(self: *FixedBufferAllocator) void { - self.end_index = 0; - } -}; + fn free( + self: *FixedBufferAllocator, + buf: []u8, + buf_align: u29, + return_address: usize, + ) void { + _ = buf_align; + _ = return_address; + assert(self.ownsSlice(buf)); // sanity check -pub const ThreadSafeFixedBufferAllocator = blk: { - if (builtin.single_threaded) { - break :blk FixedBufferAllocator; - } else { - // lock free - break :blk struct { - allocator: Allocator, - end_index: usize, - buffer: []u8, - - pub fn init(buffer: []u8) ThreadSafeFixedBufferAllocator { - return ThreadSafeFixedBufferAllocator{ - .allocator = Allocator{ - .allocFn = alloc, - .resizeFn = Allocator.noResize, - }, - .buffer = buffer, - .end_index = 0, - }; - } + if (self.isLastAllocation(buf)) { + self.end_index -= buf.len; + } + } - fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 { - _ = len_align; - _ = ra; - const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator); - var end_index = @atomicLoad(usize, &self.end_index, .SeqCst); - while (true) { - const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse - return error.OutOfMemory; - const adjusted_index = end_index + adjust_off; - const new_end_index = adjusted_index + n; - if (new_end_index > self.buffer.len) { - return error.OutOfMemory; - } - end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .SeqCst, .SeqCst) orelse return self.buffer[adjusted_index..new_end_index]; - } + fn threadSafeAlloc(self: *FixedBufferAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 { + _ = len_align; + _ = ra; + var end_index = @atomicLoad(usize, &self.end_index, .SeqCst); + while (true) { + const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse + return error.OutOfMemory; + const adjusted_index = end_index + adjust_off; + const new_end_index = adjusted_index + n; + if (new_end_index > self.buffer.len) { + return error.OutOfMemory; } + end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .SeqCst, .SeqCst) orelse return self.buffer[adjusted_index..new_end_index]; + } + } - pub fn reset(self: *ThreadSafeFixedBufferAllocator) void { - self.end_index = 0; - } - }; + pub fn reset(self: *FixedBufferAllocator) void { + self.end_index = 0; } }; -pub fn stackFallback(comptime size: usize, fallback_allocator: *Allocator) StackFallbackAllocator(size) { +pub const ThreadSafeFixedBufferAllocator = @compileError("ThreadSafeFixedBufferAllocator has been replaced with `threadSafeAllocator` on FixedBufferAllocator"); + +pub fn stackFallback(comptime size: usize, fallback_allocator: Allocator) StackFallbackAllocator(size) { return StackFallbackAllocator(size){ .buffer = undefined, .fallback_allocator = fallback_allocator, .fixed_buffer_allocator = undefined, - .allocator = Allocator{ - .allocFn = StackFallbackAllocator(size).alloc, - .resizeFn = StackFallbackAllocator(size).resize, - }, }; } @@ -829,40 +862,51 @@ pub fn StackFallbackAllocator(comptime size: usize) type { const Self = @This(); buffer: [size]u8, - allocator: Allocator, - fallback_allocator: *Allocator, + fallback_allocator: Allocator, fixed_buffer_allocator: FixedBufferAllocator, - pub fn get(self: *Self) *Allocator { + /// WARNING: This functions both fetches a `std.mem.Allocator` interface to this allocator *and* resets the internal buffer allocator + pub fn get(self: *Self) Allocator { self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]); - return &self.allocator; + return Allocator.init(self, alloc, resize, free); } fn alloc( - allocator: *Allocator, + self: *Self, len: usize, ptr_align: u29, len_align: u29, return_address: usize, ) error{OutOfMemory}![]u8 { - const self = @fieldParentPtr(Self, "allocator", allocator); - return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator.allocator, len, ptr_align, len_align, return_address) catch - return self.fallback_allocator.allocFn(self.fallback_allocator, len, ptr_align, len_align, return_address); + return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, ptr_align, len_align, return_address) catch + return self.fallback_allocator.rawAlloc(len, ptr_align, len_align, return_address); } fn resize( - allocator: *Allocator, + self: *Self, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, return_address: usize, - ) error{OutOfMemory}!usize { - const self = @fieldParentPtr(Self, "allocator", allocator); + ) ?usize { if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { - return FixedBufferAllocator.resize(&self.fixed_buffer_allocator.allocator, buf, buf_align, new_len, len_align, return_address); + return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, buf_align, new_len, len_align, return_address); } else { - return self.fallback_allocator.resizeFn(self.fallback_allocator, buf, buf_align, new_len, len_align, return_address); + return self.fallback_allocator.rawResize(buf, buf_align, new_len, len_align, return_address); + } + } + + fn free( + self: *Self, + buf: []u8, + buf_align: u29, + return_address: usize, + ) void { + if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { + return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, buf_align, return_address); + } else { + return self.fallback_allocator.rawFree(buf, buf_align, return_address); } } }; @@ -950,8 +994,8 @@ test "HeapAllocator" { if (builtin.os.tag == .windows) { var heap_allocator = HeapAllocator.init(); defer heap_allocator.deinit(); + const allocator = heap_allocator.allocator(); - const allocator = &heap_allocator.allocator; try testAllocator(allocator); try testAllocatorAligned(allocator); try testAllocatorLargeAlignment(allocator); @@ -962,36 +1006,39 @@ test "HeapAllocator" { test "ArenaAllocator" { var arena_allocator = ArenaAllocator.init(page_allocator); defer arena_allocator.deinit(); + const allocator = arena_allocator.allocator(); - try testAllocator(&arena_allocator.allocator); - try testAllocatorAligned(&arena_allocator.allocator); - try testAllocatorLargeAlignment(&arena_allocator.allocator); - try testAllocatorAlignedShrink(&arena_allocator.allocator); + try testAllocator(allocator); + try testAllocatorAligned(allocator); + try testAllocatorLargeAlignment(allocator); + try testAllocatorAlignedShrink(allocator); } var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined; test "FixedBufferAllocator" { var fixed_buffer_allocator = mem.validationWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..])); + const allocator = fixed_buffer_allocator.allocator(); - try testAllocator(&fixed_buffer_allocator.allocator); - try testAllocatorAligned(&fixed_buffer_allocator.allocator); - try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator); - try testAllocatorAlignedShrink(&fixed_buffer_allocator.allocator); + try testAllocator(allocator); + try testAllocatorAligned(allocator); + try testAllocatorLargeAlignment(allocator); + try testAllocatorAlignedShrink(allocator); } test "FixedBufferAllocator.reset" { var buf: [8]u8 align(@alignOf(u64)) = undefined; var fba = FixedBufferAllocator.init(buf[0..]); + const allocator = fba.allocator(); const X = 0xeeeeeeeeeeeeeeee; const Y = 0xffffffffffffffff; - var x = try fba.allocator.create(u64); + var x = try allocator.create(u64); x.* = X; - try testing.expectError(error.OutOfMemory, fba.allocator.create(u64)); + try testing.expectError(error.OutOfMemory, allocator.create(u64)); fba.reset(); - var y = try fba.allocator.create(u64); + var y = try allocator.create(u64); y.* = Y; // we expect Y to have overwritten X. @@ -1014,23 +1061,25 @@ test "FixedBufferAllocator Reuse memory on realloc" { // check if we re-use the memory { var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]); + const allocator = fixed_buffer_allocator.allocator(); - var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 5); + var slice0 = try allocator.alloc(u8, 5); try testing.expect(slice0.len == 5); - var slice1 = try fixed_buffer_allocator.allocator.realloc(slice0, 10); + var slice1 = try allocator.realloc(slice0, 10); try testing.expect(slice1.ptr == slice0.ptr); try testing.expect(slice1.len == 10); - try testing.expectError(error.OutOfMemory, fixed_buffer_allocator.allocator.realloc(slice1, 11)); + try testing.expectError(error.OutOfMemory, allocator.realloc(slice1, 11)); } // check that we don't re-use the memory if it's not the most recent block { var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]); + const allocator = fixed_buffer_allocator.allocator(); - var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 2); + var slice0 = try allocator.alloc(u8, 2); slice0[0] = 1; slice0[1] = 2; - var slice1 = try fixed_buffer_allocator.allocator.alloc(u8, 2); - var slice2 = try fixed_buffer_allocator.allocator.realloc(slice0, 4); + var slice1 = try allocator.alloc(u8, 2); + var slice2 = try allocator.realloc(slice0, 4); try testing.expect(slice0.ptr != slice2.ptr); try testing.expect(slice1.ptr != slice2.ptr); try testing.expect(slice2[0] == 1); @@ -1038,19 +1087,19 @@ test "FixedBufferAllocator Reuse memory on realloc" { } } -test "ThreadSafeFixedBufferAllocator" { - var fixed_buffer_allocator = ThreadSafeFixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]); +test "Thread safe FixedBufferAllocator" { + var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]); - try testAllocator(&fixed_buffer_allocator.allocator); - try testAllocatorAligned(&fixed_buffer_allocator.allocator); - try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator); - try testAllocatorAlignedShrink(&fixed_buffer_allocator.allocator); + try testAllocator(fixed_buffer_allocator.threadSafeAllocator()); + try testAllocatorAligned(fixed_buffer_allocator.threadSafeAllocator()); + try testAllocatorLargeAlignment(fixed_buffer_allocator.threadSafeAllocator()); + try testAllocatorAlignedShrink(fixed_buffer_allocator.threadSafeAllocator()); } /// This one should not try alignments that exceed what C malloc can handle. -pub fn testAllocator(base_allocator: *mem.Allocator) !void { +pub fn testAllocator(base_allocator: mem.Allocator) !void { var validationAllocator = mem.validationWrap(base_allocator); - const allocator = &validationAllocator.allocator; + const allocator = validationAllocator.allocator(); var slice = try allocator.alloc(*i32, 100); try testing.expect(slice.len == 100); @@ -1094,9 +1143,9 @@ pub fn testAllocator(base_allocator: *mem.Allocator) !void { allocator.free(oversize); } -pub fn testAllocatorAligned(base_allocator: *mem.Allocator) !void { +pub fn testAllocatorAligned(base_allocator: mem.Allocator) !void { var validationAllocator = mem.validationWrap(base_allocator); - const allocator = &validationAllocator.allocator; + const allocator = validationAllocator.allocator(); // Test a few alignment values, smaller and bigger than the type's one inline for ([_]u29{ 1, 2, 4, 8, 16, 32, 64 }) |alignment| { @@ -1124,9 +1173,9 @@ pub fn testAllocatorAligned(base_allocator: *mem.Allocator) !void { } } -pub fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) !void { +pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void { var validationAllocator = mem.validationWrap(base_allocator); - const allocator = &validationAllocator.allocator; + const allocator = validationAllocator.allocator(); //Maybe a platform's page_size is actually the same as or // very near usize? @@ -1156,12 +1205,12 @@ pub fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) !void { allocator.free(slice); } -pub fn testAllocatorAlignedShrink(base_allocator: *mem.Allocator) !void { +pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void { var validationAllocator = mem.validationWrap(base_allocator); - const allocator = &validationAllocator.allocator; + const allocator = validationAllocator.allocator(); var debug_buffer: [1000]u8 = undefined; - const debug_allocator = &FixedBufferAllocator.init(&debug_buffer).allocator; + const debug_allocator = FixedBufferAllocator.init(&debug_buffer).allocator(); const alloc_size = mem.page_size * 2 + 50; var slice = try allocator.alignedAlloc(u8, 16, alloc_size); diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig index d61f66ce4a..4bc5d58c1a 100644 --- a/lib/std/heap/arena_allocator.zig +++ b/lib/std/heap/arena_allocator.zig @@ -6,9 +6,7 @@ const Allocator = std.mem.Allocator; /// This allocator takes an existing allocator, wraps it, and provides an interface /// where you can allocate without freeing, and then free it all together. pub const ArenaAllocator = struct { - allocator: Allocator, - - child_allocator: *Allocator, + child_allocator: Allocator, state: State, /// Inner state of ArenaAllocator. Can be stored rather than the entire ArenaAllocator @@ -17,21 +15,21 @@ pub const ArenaAllocator = struct { buffer_list: std.SinglyLinkedList([]u8) = @as(std.SinglyLinkedList([]u8), .{}), end_index: usize = 0, - pub fn promote(self: State, child_allocator: *Allocator) ArenaAllocator { + pub fn promote(self: State, child_allocator: Allocator) ArenaAllocator { return .{ - .allocator = Allocator{ - .allocFn = alloc, - .resizeFn = resize, - }, .child_allocator = child_allocator, .state = self, }; } }; + pub fn allocator(self: *ArenaAllocator) Allocator { + return Allocator.init(self, alloc, resize, free); + } + const BufNode = std.SinglyLinkedList([]u8).Node; - pub fn init(child_allocator: *Allocator) ArenaAllocator { + pub fn init(child_allocator: Allocator) ArenaAllocator { return (State{}).promote(child_allocator); } @@ -49,7 +47,7 @@ pub const ArenaAllocator = struct { const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16); const big_enough_len = prev_len + actual_min_size; const len = big_enough_len + big_enough_len / 2; - const buf = try self.child_allocator.allocFn(self.child_allocator, len, @alignOf(BufNode), 1, @returnAddress()); + const buf = try self.child_allocator.rawAlloc(len, @alignOf(BufNode), 1, @returnAddress()); const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), buf.ptr)); buf_node.* = BufNode{ .data = buf, @@ -60,10 +58,9 @@ pub const ArenaAllocator = struct { return buf_node; } - fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 { + fn alloc(self: *ArenaAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 { _ = len_align; _ = ra; - const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator); var cur_node = if (self.state.buffer_list.first) |first_node| first_node else try self.createNode(0, n + ptr_align); while (true) { @@ -81,27 +78,23 @@ pub const ArenaAllocator = struct { const bigger_buf_size = @sizeOf(BufNode) + new_end_index; // Try to grow the buffer in-place - cur_node.data = self.child_allocator.resize(cur_node.data, bigger_buf_size) catch |err| switch (err) { - error.OutOfMemory => { - // Allocate a new node if that's not possible - cur_node = try self.createNode(cur_buf.len, n + ptr_align); - continue; - }, + cur_node.data = self.child_allocator.resize(cur_node.data, bigger_buf_size) orelse { + // Allocate a new node if that's not possible + cur_node = try self.createNode(cur_buf.len, n + ptr_align); + continue; }; } } - fn resize(allocator: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Allocator.Error!usize { + fn resize(self: *ArenaAllocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize { _ = buf_align; _ = len_align; _ = ret_addr; - const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator); - const cur_node = self.state.buffer_list.first orelse return error.OutOfMemory; + const cur_node = self.state.buffer_list.first orelse return null; const cur_buf = cur_node.data[@sizeOf(BufNode)..]; if (@ptrToInt(cur_buf.ptr) + self.state.end_index != @ptrToInt(buf.ptr) + buf.len) { - if (new_len > buf.len) - return error.OutOfMemory; + if (new_len > buf.len) return null; return new_len; } @@ -112,7 +105,19 @@ pub const ArenaAllocator = struct { self.state.end_index += new_len - buf.len; return new_len; } else { - return error.OutOfMemory; + return null; + } + } + + fn free(self: *ArenaAllocator, buf: []u8, buf_align: u29, ret_addr: usize) void { + _ = buf_align; + _ = ret_addr; + + const cur_node = self.state.buffer_list.first orelse return; + const cur_buf = cur_node.data[@sizeOf(BufNode)..]; + + if (@ptrToInt(cur_buf.ptr) + self.state.end_index == @ptrToInt(buf.ptr) + buf.len) { + self.state.end_index -= buf.len; } } }; diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig index c3c28a53b6..8160bc2a66 100644 --- a/lib/std/heap/general_purpose_allocator.zig +++ b/lib/std/heap/general_purpose_allocator.zig @@ -172,11 +172,7 @@ pub const Config = struct { pub fn GeneralPurposeAllocator(comptime config: Config) type { return struct { - allocator: Allocator = Allocator{ - .allocFn = alloc, - .resizeFn = resize, - }, - backing_allocator: *Allocator = std.heap.page_allocator, + backing_allocator: Allocator = std.heap.page_allocator, buckets: [small_bucket_count]?*BucketHeader = [1]?*BucketHeader{null} ** small_bucket_count, large_allocations: LargeAllocTable = .{}, empty_buckets: if (config.retain_metadata) ?*BucketHeader else void = @@ -284,6 +280,10 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { } }; + pub fn allocator(self: *Self) Allocator { + return Allocator.init(self, alloc, resize, free); + } + fn bucketStackTrace( bucket: *BucketHeader, size_class: usize, @@ -388,7 +388,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { var it = self.large_allocations.iterator(); while (it.next()) |large| { if (large.value_ptr.freed) { - _ = self.backing_allocator.resizeFn(self.backing_allocator, large.value_ptr.bytes, large.value_ptr.ptr_align, 0, 0, @returnAddress()) catch unreachable; + self.backing_allocator.rawFree(large.value_ptr.bytes, large.value_ptr.ptr_align, @returnAddress()); } } } @@ -517,7 +517,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { new_size: usize, len_align: u29, ret_addr: usize, - ) Error!usize { + ) ?usize { const entry = self.large_allocations.getEntry(@ptrToInt(old_mem.ptr)) orelse { if (config.safety) { @panic("Invalid free"); @@ -529,9 +529,6 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { if (config.retain_metadata and entry.value_ptr.freed) { if (config.safety) { reportDoubleFree(ret_addr, entry.value_ptr.getStackTrace(.alloc), entry.value_ptr.getStackTrace(.free)); - // Recoverable if this is a free. - if (new_size == 0) - return @as(usize, 0); @panic("Unrecoverable double free"); } else { unreachable; @@ -555,12 +552,12 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { // Do memory limit accounting with requested sizes rather than what backing_allocator returns // because if we want to return error.OutOfMemory, we have to leave allocation untouched, and - // that is impossible to guarantee after calling backing_allocator.resizeFn. + // that is impossible to guarantee after calling backing_allocator.rawResize. const prev_req_bytes = self.total_requested_bytes; if (config.enable_memory_limit) { const new_req_bytes = prev_req_bytes + new_size - entry.value_ptr.requested_size; if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) { - return error.OutOfMemory; + return null; } self.total_requested_bytes = new_req_bytes; } @@ -568,29 +565,12 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { self.total_requested_bytes = prev_req_bytes; }; - const result_len = if (config.never_unmap and new_size == 0) - 0 - else - try self.backing_allocator.resizeFn(self.backing_allocator, old_mem, old_align, new_size, len_align, ret_addr); + const result_len = self.backing_allocator.rawResize(old_mem, old_align, new_size, len_align, ret_addr) orelse return null; if (config.enable_memory_limit) { entry.value_ptr.requested_size = new_size; } - if (result_len == 0) { - if (config.verbose_log) { - log.info("large free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr }); - } - - if (!config.retain_metadata) { - assert(self.large_allocations.remove(@ptrToInt(old_mem.ptr))); - } else { - entry.value_ptr.freed = true; - entry.value_ptr.captureStackTrace(ret_addr, .free); - } - return 0; - } - if (config.verbose_log) { log.info("large resize {d} bytes at {*} to {d}", .{ old_mem.len, old_mem.ptr, new_size, @@ -601,20 +581,76 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { return result_len; } + /// This function assumes the object is in the large object storage regardless + /// of the parameters. + fn freeLarge( + self: *Self, + old_mem: []u8, + old_align: u29, + ret_addr: usize, + ) void { + _ = old_align; + + const entry = self.large_allocations.getEntry(@ptrToInt(old_mem.ptr)) orelse { + if (config.safety) { + @panic("Invalid free"); + } else { + unreachable; + } + }; + + if (config.retain_metadata and entry.value_ptr.freed) { + if (config.safety) { + reportDoubleFree(ret_addr, entry.value_ptr.getStackTrace(.alloc), entry.value_ptr.getStackTrace(.free)); + return; + } else { + unreachable; + } + } + + if (config.safety and old_mem.len != entry.value_ptr.bytes.len) { + var addresses: [stack_n]usize = [1]usize{0} ** stack_n; + var free_stack_trace = StackTrace{ + .instruction_addresses = &addresses, + .index = 0, + }; + std.debug.captureStackTrace(ret_addr, &free_stack_trace); + log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {s} Free: {s}", .{ + entry.value_ptr.bytes.len, + old_mem.len, + entry.value_ptr.getStackTrace(.alloc), + free_stack_trace, + }); + } + + if (config.enable_memory_limit) { + self.total_requested_bytes -= entry.value_ptr.requested_size; + } + + if (config.verbose_log) { + log.info("large free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr }); + } + + if (!config.retain_metadata) { + assert(self.large_allocations.remove(@ptrToInt(old_mem.ptr))); + } else { + entry.value_ptr.freed = true; + entry.value_ptr.captureStackTrace(ret_addr, .free); + } + } + pub fn setRequestedMemoryLimit(self: *Self, limit: usize) void { self.requested_memory_limit = limit; } fn resize( - allocator: *Allocator, + self: *Self, old_mem: []u8, old_align: u29, new_size: usize, len_align: u29, ret_addr: usize, - ) Error!usize { - const self = @fieldParentPtr(Self, "allocator", allocator); - + ) ?usize { self.mutex.lock(); defer self.mutex.unlock(); @@ -658,9 +694,6 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { if (!is_used) { if (config.safety) { reportDoubleFree(ret_addr, bucketStackTrace(bucket, size_class, slot_index, .alloc), bucketStackTrace(bucket, size_class, slot_index, .free)); - // Recoverable if this is a free. - if (new_size == 0) - return @as(usize, 0); @panic("Unrecoverable double free"); } else { unreachable; @@ -672,7 +705,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { if (config.enable_memory_limit) { const new_req_bytes = prev_req_bytes + new_size - old_mem.len; if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) { - return error.OutOfMemory; + return null; } self.total_requested_bytes = new_req_bytes; } @@ -680,52 +713,6 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { self.total_requested_bytes = prev_req_bytes; }; - if (new_size == 0) { - // Capture stack trace to be the "first free", in case a double free happens. - bucket.captureStackTrace(ret_addr, size_class, slot_index, .free); - - used_byte.* &= ~(@as(u8, 1) << used_bit_index); - bucket.used_count -= 1; - if (bucket.used_count == 0) { - if (bucket.next == bucket) { - // it's the only bucket and therefore the current one - self.buckets[bucket_index] = null; - } else { - bucket.next.prev = bucket.prev; - bucket.prev.next = bucket.next; - self.buckets[bucket_index] = bucket.prev; - } - if (!config.never_unmap) { - self.backing_allocator.free(bucket.page[0..page_size]); - } - if (!config.retain_metadata) { - self.freeBucket(bucket, size_class); - } else { - // move alloc_cursor to end so we can tell size_class later - const slot_count = @divExact(page_size, size_class); - bucket.alloc_cursor = @truncate(SlotIndex, slot_count); - if (self.empty_buckets) |prev_bucket| { - // empty_buckets is ordered newest to oldest through prev so that if - // config.never_unmap is false and backing_allocator reuses freed memory - // then searchBuckets will always return the newer, relevant bucket - bucket.prev = prev_bucket; - bucket.next = prev_bucket.next; - prev_bucket.next = bucket; - bucket.next.prev = bucket; - } else { - bucket.prev = bucket; - bucket.next = bucket; - } - self.empty_buckets = bucket; - } - } else { - @memset(old_mem.ptr, undefined, old_mem.len); - } - if (config.verbose_log) { - log.info("small free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr }); - } - return @as(usize, 0); - } const new_aligned_size = math.max(new_size, old_align); const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size); if (new_size_class <= size_class) { @@ -739,7 +726,115 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { } return new_size; } - return error.OutOfMemory; + return null; + } + + fn free( + self: *Self, + old_mem: []u8, + old_align: u29, + ret_addr: usize, + ) void { + self.mutex.lock(); + defer self.mutex.unlock(); + + assert(old_mem.len != 0); + + const aligned_size = math.max(old_mem.len, old_align); + if (aligned_size > largest_bucket_object_size) { + self.freeLarge(old_mem, old_align, ret_addr); + return; + } + const size_class_hint = math.ceilPowerOfTwoAssert(usize, aligned_size); + + var bucket_index = math.log2(size_class_hint); + var size_class: usize = size_class_hint; + const bucket = while (bucket_index < small_bucket_count) : (bucket_index += 1) { + if (searchBucket(self.buckets[bucket_index], @ptrToInt(old_mem.ptr))) |bucket| { + // move bucket to head of list to optimize search for nearby allocations + self.buckets[bucket_index] = bucket; + break bucket; + } + size_class *= 2; + } else blk: { + if (config.retain_metadata) { + if (!self.large_allocations.contains(@ptrToInt(old_mem.ptr))) { + // object not in active buckets or a large allocation, so search empty buckets + if (searchBucket(self.empty_buckets, @ptrToInt(old_mem.ptr))) |bucket| { + // bucket is empty so is_used below will always be false and we exit there + break :blk bucket; + } else { + @panic("Invalid free"); + } + } + } + self.freeLarge(old_mem, old_align, ret_addr); + return; + }; + const byte_offset = @ptrToInt(old_mem.ptr) - @ptrToInt(bucket.page); + const slot_index = @intCast(SlotIndex, byte_offset / size_class); + const used_byte_index = slot_index / 8; + const used_bit_index = @intCast(u3, slot_index % 8); + const used_byte = bucket.usedBits(used_byte_index); + const is_used = @truncate(u1, used_byte.* >> used_bit_index) != 0; + if (!is_used) { + if (config.safety) { + reportDoubleFree(ret_addr, bucketStackTrace(bucket, size_class, slot_index, .alloc), bucketStackTrace(bucket, size_class, slot_index, .free)); + // Recoverable if this is a free. + return; + } else { + unreachable; + } + } + + // Definitely an in-use small alloc now. + if (config.enable_memory_limit) { + self.total_requested_bytes -= old_mem.len; + } + + // Capture stack trace to be the "first free", in case a double free happens. + bucket.captureStackTrace(ret_addr, size_class, slot_index, .free); + + used_byte.* &= ~(@as(u8, 1) << used_bit_index); + bucket.used_count -= 1; + if (bucket.used_count == 0) { + if (bucket.next == bucket) { + // it's the only bucket and therefore the current one + self.buckets[bucket_index] = null; + } else { + bucket.next.prev = bucket.prev; + bucket.prev.next = bucket.next; + self.buckets[bucket_index] = bucket.prev; + } + if (!config.never_unmap) { + self.backing_allocator.free(bucket.page[0..page_size]); + } + if (!config.retain_metadata) { + self.freeBucket(bucket, size_class); + } else { + // move alloc_cursor to end so we can tell size_class later + const slot_count = @divExact(page_size, size_class); + bucket.alloc_cursor = @truncate(SlotIndex, slot_count); + if (self.empty_buckets) |prev_bucket| { + // empty_buckets is ordered newest to oldest through prev so that if + // config.never_unmap is false and backing_allocator reuses freed memory + // then searchBuckets will always return the newer, relevant bucket + bucket.prev = prev_bucket; + bucket.next = prev_bucket.next; + prev_bucket.next = bucket; + bucket.next.prev = bucket; + } else { + bucket.prev = bucket; + bucket.next = bucket; + } + self.empty_buckets = bucket; + } + } else { + @memset(old_mem.ptr, undefined, old_mem.len); + } + if (config.verbose_log) { + log.info("small free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr }); + } } // Returns true if an allocation of `size` bytes is within the specified @@ -755,9 +850,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { return true; } - fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 { - const self = @fieldParentPtr(Self, "allocator", allocator); - + fn alloc(self: *Self, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 { self.mutex.lock(); defer self.mutex.unlock(); @@ -768,7 +861,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { const new_aligned_size = math.max(len, ptr_align); if (new_aligned_size > largest_bucket_object_size) { try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1); - const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align, ret_addr); + const slice = try self.backing_allocator.rawAlloc(len, ptr_align, len_align, ret_addr); const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr)); if (config.retain_metadata and !config.never_unmap) { @@ -834,7 +927,7 @@ const test_config = Config{}; test "small allocations - free in same order" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = &gpa.allocator; + const allocator = gpa.allocator(); var list = std.ArrayList(*u64).init(std.testing.allocator); defer list.deinit(); @@ -853,7 +946,7 @@ test "small allocations - free in same order" { test "small allocations - free in reverse order" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = &gpa.allocator; + const allocator = gpa.allocator(); var list = std.ArrayList(*u64).init(std.testing.allocator); defer list.deinit(); @@ -872,7 +965,7 @@ test "small allocations - free in reverse order" { test "large allocations" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = &gpa.allocator; + const allocator = gpa.allocator(); const ptr1 = try allocator.alloc(u64, 42768); const ptr2 = try allocator.alloc(u64, 52768); @@ -885,7 +978,7 @@ test "large allocations" { test "realloc" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = &gpa.allocator; + const allocator = gpa.allocator(); var slice = try allocator.alignedAlloc(u8, @alignOf(u32), 1); defer allocator.free(slice); @@ -907,7 +1000,7 @@ test "realloc" { test "shrink" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = &gpa.allocator; + const allocator = gpa.allocator(); var slice = try allocator.alloc(u8, 20); defer allocator.free(slice); @@ -930,7 +1023,7 @@ test "shrink" { test "large object - grow" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = &gpa.allocator; + const allocator = gpa.allocator(); var slice1 = try allocator.alloc(u8, page_size * 2 - 20); defer allocator.free(slice1); @@ -948,7 +1041,7 @@ test "large object - grow" { test "realloc small object to large object" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = &gpa.allocator; + const allocator = gpa.allocator(); var slice = try allocator.alloc(u8, 70); defer allocator.free(slice); @@ -965,14 +1058,14 @@ test "realloc small object to large object" { test "shrink large object to large object" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = &gpa.allocator; + const allocator = gpa.allocator(); var slice = try allocator.alloc(u8, page_size * 2 + 50); defer allocator.free(slice); slice[0] = 0x12; slice[60] = 0x34; - slice = try allocator.resize(slice, page_size * 2 + 1); + slice = allocator.resize(slice, page_size * 2 + 1) orelse return; try std.testing.expect(slice[0] == 0x12); try std.testing.expect(slice[60] == 0x34); @@ -988,10 +1081,10 @@ test "shrink large object to large object" { test "shrink large object to large object with larger alignment" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = &gpa.allocator; + const allocator = gpa.allocator(); var debug_buffer: [1000]u8 = undefined; - const debug_allocator = &std.heap.FixedBufferAllocator.init(&debug_buffer).allocator; + const debug_allocator = std.heap.FixedBufferAllocator.init(&debug_buffer).allocator(); const alloc_size = page_size * 2 + 50; var slice = try allocator.alignedAlloc(u8, 16, alloc_size); @@ -1023,7 +1116,7 @@ test "shrink large object to large object with larger alignment" { test "realloc large object to small object" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = &gpa.allocator; + const allocator = gpa.allocator(); var slice = try allocator.alloc(u8, page_size * 2 + 50); defer allocator.free(slice); @@ -1041,7 +1134,7 @@ test "overrideable mutexes" { .mutex = std.Thread.Mutex{}, }; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = &gpa.allocator; + const allocator = gpa.allocator(); const ptr = try allocator.create(i32); defer allocator.destroy(ptr); @@ -1050,7 +1143,7 @@ test "overrideable mutexes" { test "non-page-allocator backing allocator" { var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = std.testing.allocator }; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = &gpa.allocator; + const allocator = gpa.allocator(); const ptr = try allocator.create(i32); defer allocator.destroy(ptr); @@ -1059,10 +1152,10 @@ test "non-page-allocator backing allocator" { test "realloc large object to larger alignment" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = &gpa.allocator; + const allocator = gpa.allocator(); var debug_buffer: [1000]u8 = undefined; - const debug_allocator = &std.heap.FixedBufferAllocator.init(&debug_buffer).allocator; + const debug_allocator = std.heap.FixedBufferAllocator.init(&debug_buffer).allocator(); var slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50); defer allocator.free(slice); @@ -1098,9 +1191,9 @@ test "realloc large object to larger alignment" { test "large object shrinks to small but allocation fails during shrink" { var failing_allocator = std.testing.FailingAllocator.init(std.heap.page_allocator, 3); - var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = &failing_allocator.allocator }; + var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = failing_allocator.allocator() }; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = &gpa.allocator; + const allocator = gpa.allocator(); var slice = try allocator.alloc(u8, page_size * 2 + 50); defer allocator.free(slice); @@ -1117,7 +1210,7 @@ test "large object shrinks to small but allocation fails during shrink" { test "objects of size 1024 and 2048" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = &gpa.allocator; + const allocator = gpa.allocator(); const slice = try allocator.alloc(u8, 1025); const slice2 = try allocator.alloc(u8, 3000); @@ -1129,7 +1222,7 @@ test "objects of size 1024 and 2048" { test "setting a memory cap" { var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = &gpa.allocator; + const allocator = gpa.allocator(); gpa.setRequestedMemoryLimit(1010); @@ -1158,9 +1251,9 @@ test "double frees" { defer std.testing.expect(!backing_gpa.deinit()) catch @panic("leak"); const GPA = GeneralPurposeAllocator(.{ .safety = true, .never_unmap = true, .retain_metadata = true }); - var gpa = GPA{ .backing_allocator = &backing_gpa.allocator }; + var gpa = GPA{ .backing_allocator = backing_gpa.allocator() }; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = &gpa.allocator; + const allocator = gpa.allocator(); // detect a small allocation double free, even though bucket is emptied const index: usize = 6; @@ -1195,10 +1288,12 @@ test "double frees" { test "bug 9995 fix, large allocs count requested size not backing size" { // with AtLeast, buffer likely to be larger than requested, especially when shrinking var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){}; - var buf = try gpa.allocator.allocAdvanced(u8, 1, page_size + 1, .at_least); + const allocator = gpa.allocator(); + + var buf = try allocator.allocAdvanced(u8, 1, page_size + 1, .at_least); try std.testing.expect(gpa.total_requested_bytes == page_size + 1); - buf = try gpa.allocator.reallocAtLeast(buf, 1); + buf = try allocator.reallocAtLeast(buf, 1); try std.testing.expect(gpa.total_requested_bytes == 1); - buf = try gpa.allocator.reallocAtLeast(buf, 2); + buf = try allocator.reallocAtLeast(buf, 2); try std.testing.expect(gpa.total_requested_bytes == 2); } diff --git a/lib/std/heap/log_to_writer_allocator.zig b/lib/std/heap/log_to_writer_allocator.zig index cf9c4162a7..c63c1a826f 100644 --- a/lib/std/heap/log_to_writer_allocator.zig +++ b/lib/std/heap/log_to_writer_allocator.zig @@ -5,33 +5,31 @@ const Allocator = std.mem.Allocator; /// on every call to the allocator. Writer errors are ignored. pub fn LogToWriterAllocator(comptime Writer: type) type { return struct { - allocator: Allocator, - parent_allocator: *Allocator, + parent_allocator: Allocator, writer: Writer, const Self = @This(); - pub fn init(parent_allocator: *Allocator, writer: Writer) Self { + pub fn init(parent_allocator: Allocator, writer: Writer) Self { return Self{ - .allocator = Allocator{ - .allocFn = alloc, - .resizeFn = resize, - }, .parent_allocator = parent_allocator, .writer = writer, }; } + pub fn allocator(self: *Self) Allocator { + return Allocator.init(self, alloc, resize, free); + } + fn alloc( - allocator: *Allocator, + self: *Self, len: usize, ptr_align: u29, len_align: u29, ra: usize, ) error{OutOfMemory}![]u8 { - const self = @fieldParentPtr(Self, "allocator", allocator); self.writer.print("alloc : {}", .{len}) catch {}; - const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ra); + const result = self.parent_allocator.rawAlloc(len, ptr_align, len_align, ra); if (result) |_| { self.writer.print(" success!\n", .{}) catch {}; } else |_| { @@ -41,31 +39,39 @@ pub fn LogToWriterAllocator(comptime Writer: type) type { } fn resize( - allocator: *Allocator, + self: *Self, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ra: usize, - ) error{OutOfMemory}!usize { - const self = @fieldParentPtr(Self, "allocator", allocator); - if (new_len == 0) { - self.writer.print("free : {}\n", .{buf.len}) catch {}; - } else if (new_len <= buf.len) { + ) ?usize { + if (new_len <= buf.len) { self.writer.print("shrink: {} to {}\n", .{ buf.len, new_len }) catch {}; } else { self.writer.print("expand: {} to {}", .{ buf.len, new_len }) catch {}; } - if (self.parent_allocator.resizeFn(self.parent_allocator, buf, buf_align, new_len, len_align, ra)) |resized_len| { + + if (self.parent_allocator.rawResize(buf, buf_align, new_len, len_align, ra)) |resized_len| { if (new_len > buf.len) { self.writer.print(" success!\n", .{}) catch {}; } return resized_len; - } else |e| { - std.debug.assert(new_len > buf.len); - self.writer.print(" failure!\n", .{}) catch {}; - return e; } + + std.debug.assert(new_len > buf.len); + self.writer.print(" failure!\n", .{}) catch {}; + return null; + } + + fn free( + self: *Self, + buf: []u8, + buf_align: u29, + ra: usize, + ) void { + self.writer.print("free : {}\n", .{buf.len}) catch {}; + self.parent_allocator.rawFree(buf, buf_align, ra); } }; } @@ -73,7 +79,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type { /// This allocator is used in front of another allocator and logs to the provided writer /// on every call to the allocator. Writer errors are ignored. pub fn logToWriterAllocator( - parent_allocator: *Allocator, + parent_allocator: Allocator, writer: anytype, ) LogToWriterAllocator(@TypeOf(writer)) { return LogToWriterAllocator(@TypeOf(writer)).init(parent_allocator, writer); @@ -85,12 +91,12 @@ test "LogToWriterAllocator" { var allocator_buf: [10]u8 = undefined; var fixedBufferAllocator = std.mem.validationWrap(std.heap.FixedBufferAllocator.init(&allocator_buf)); - const allocator = &logToWriterAllocator(&fixedBufferAllocator.allocator, fbs.writer()).allocator; + const allocator = logToWriterAllocator(fixedBufferAllocator.allocator(), fbs.writer()).allocator(); var a = try allocator.alloc(u8, 10); a = allocator.shrink(a, 5); try std.testing.expect(a.len == 5); - try std.testing.expectError(error.OutOfMemory, allocator.resize(a, 20)); + try std.testing.expect(allocator.resize(a, 20) == null); allocator.free(a); try std.testing.expectEqualSlices(u8, diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig index 0c6224b7ce..0bd0755cfc 100644 --- a/lib/std/heap/logging_allocator.zig +++ b/lib/std/heap/logging_allocator.zig @@ -22,21 +22,20 @@ pub fn ScopedLoggingAllocator( const log = std.log.scoped(scope); return struct { - allocator: Allocator, - parent_allocator: *Allocator, + parent_allocator: Allocator, const Self = @This(); - pub fn init(parent_allocator: *Allocator) Self { + pub fn init(parent_allocator: Allocator) Self { return .{ - .allocator = Allocator{ - .allocFn = alloc, - .resizeFn = resize, - }, .parent_allocator = parent_allocator, }; } + pub fn allocator(self: *Self) Allocator { + return Allocator.init(self, alloc, resize, free); + } + // This function is required as the `std.log.log` function is not public inline fn logHelper(comptime log_level: std.log.Level, comptime format: []const u8, args: anytype) void { switch (log_level) { @@ -48,14 +47,13 @@ pub fn ScopedLoggingAllocator( } fn alloc( - allocator: *Allocator, + self: *Self, len: usize, ptr_align: u29, len_align: u29, ra: usize, ) error{OutOfMemory}![]u8 { - const self = @fieldParentPtr(Self, "allocator", allocator); - const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ra); + const result = self.parent_allocator.rawAlloc(len, ptr_align, len_align, ra); if (result) |_| { logHelper( success_log_level, @@ -73,19 +71,15 @@ pub fn ScopedLoggingAllocator( } fn resize( - allocator: *Allocator, + self: *Self, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ra: usize, - ) error{OutOfMemory}!usize { - const self = @fieldParentPtr(Self, "allocator", allocator); - - if (self.parent_allocator.resizeFn(self.parent_allocator, buf, buf_align, new_len, len_align, ra)) |resized_len| { - if (new_len == 0) { - logHelper(success_log_level, "free - success - len: {}", .{buf.len}); - } else if (new_len <= buf.len) { + ) ?usize { + if (self.parent_allocator.rawResize(buf, buf_align, new_len, len_align, ra)) |resized_len| { + if (new_len <= buf.len) { logHelper( success_log_level, "shrink - success - {} to {}, len_align: {}, buf_align: {}", @@ -100,15 +94,25 @@ pub fn ScopedLoggingAllocator( } return resized_len; - } else |err| { - std.debug.assert(new_len > buf.len); - logHelper( - failure_log_level, - "expand - failure: {s} - {} to {}, len_align: {}, buf_align: {}", - .{ @errorName(err), buf.len, new_len, len_align, buf_align }, - ); - return err; } + + std.debug.assert(new_len > buf.len); + logHelper( + failure_log_level, + "expand - failure - {} to {}, len_align: {}, buf_align: {}", + .{ buf.len, new_len, len_align, buf_align }, + ); + return null; + } + + fn free( + self: *Self, + buf: []u8, + buf_align: u29, + ra: usize, + ) void { + self.parent_allocator.rawFree(buf, buf_align, ra); + logHelper(success_log_level, "free - len: {}", .{buf.len}); } }; } @@ -116,6 +120,6 @@ pub fn ScopedLoggingAllocator( /// This allocator is used in front of another allocator and logs to `std.log` /// on every call to the allocator. /// For logging to a `std.io.Writer` see `std.heap.LogToWriterAllocator` -pub fn loggingAllocator(parent_allocator: *Allocator) LoggingAllocator(.debug, .err) { +pub fn loggingAllocator(parent_allocator: Allocator) LoggingAllocator(.debug, .err) { return LoggingAllocator(.debug, .err).init(parent_allocator); } diff --git a/lib/std/io/buffered_atomic_file.zig b/lib/std/io/buffered_atomic_file.zig index 5b27ba78f1..71edabb20a 100644 --- a/lib/std/io/buffered_atomic_file.zig +++ b/lib/std/io/buffered_atomic_file.zig @@ -7,7 +7,7 @@ pub const BufferedAtomicFile = struct { atomic_file: fs.AtomicFile, file_writer: File.Writer, buffered_writer: BufferedWriter, - allocator: *mem.Allocator, + allocator: mem.Allocator, pub const buffer_size = 4096; pub const BufferedWriter = std.io.BufferedWriter(buffer_size, File.Writer); @@ -16,7 +16,7 @@ pub const BufferedAtomicFile = struct { /// TODO when https://github.com/ziglang/zig/issues/2761 is solved /// this API will not need an allocator pub fn create( - allocator: *mem.Allocator, + allocator: mem.Allocator, dir: fs.Dir, dest_path: []const u8, atomic_file_options: fs.Dir.AtomicFileOptions, diff --git a/lib/std/io/peek_stream.zig b/lib/std/io/peek_stream.zig index c77052f975..8779e22250 100644 --- a/lib/std/io/peek_stream.zig +++ b/lib/std/io/peek_stream.zig @@ -38,7 +38,7 @@ pub fn PeekStream( } }, .Dynamic => struct { - pub fn init(base: ReaderType, allocator: *mem.Allocator) Self { + pub fn init(base: ReaderType, allocator: mem.Allocator) Self { return .{ .unbuffered_reader = base, .fifo = FifoType.init(allocator), diff --git a/lib/std/io/reader.zig b/lib/std/io/reader.zig index 3da053e4fb..2839552668 100644 --- a/lib/std/io/reader.zig +++ b/lib/std/io/reader.zig @@ -88,7 +88,7 @@ pub fn Reader( /// memory would be greater than `max_size`, returns `error.StreamTooLong`. /// Caller owns returned memory. /// If this function returns an error, the contents from the stream read so far are lost. - pub fn readAllAlloc(self: Self, allocator: *mem.Allocator, max_size: usize) ![]u8 { + pub fn readAllAlloc(self: Self, allocator: mem.Allocator, max_size: usize) ![]u8 { var array_list = std.ArrayList(u8).init(allocator); defer array_list.deinit(); try self.readAllArrayList(&array_list, max_size); @@ -127,7 +127,7 @@ pub fn Reader( /// If this function returns an error, the contents from the stream read so far are lost. pub fn readUntilDelimiterAlloc( self: Self, - allocator: *mem.Allocator, + allocator: mem.Allocator, delimiter: u8, max_size: usize, ) ![]u8 { @@ -163,7 +163,7 @@ pub fn Reader( /// If this function returns an error, the contents from the stream read so far are lost. pub fn readUntilDelimiterOrEofAlloc( self: Self, - allocator: *mem.Allocator, + allocator: mem.Allocator, delimiter: u8, max_size: usize, ) !?[]u8 { diff --git a/lib/std/json.zig b/lib/std/json.zig index ff37bc4162..658fec6b79 100644 --- a/lib/std/json.zig +++ b/lib/std/json.zig @@ -1476,7 +1476,7 @@ fn parsedEqual(a: anytype, b: @TypeOf(a)) bool { } pub const ParseOptions = struct { - allocator: ?*Allocator = null, + allocator: ?Allocator = null, /// Behaviour when a duplicate field is encountered. duplicate_field_behavior: enum { @@ -2033,7 +2033,7 @@ test "parse into tagged union" { { // failing allocations should be bubbled up instantly without trying next member var fail_alloc = testing.FailingAllocator.init(testing.allocator, 0); - const options = ParseOptions{ .allocator = &fail_alloc.allocator }; + const options = ParseOptions{ .allocator = fail_alloc.allocator() }; const T = union(enum) { // both fields here match the input string: []const u8, @@ -2081,7 +2081,7 @@ test "parse union bubbles up AllocatorRequired" { test "parseFree descends into tagged union" { var fail_alloc = testing.FailingAllocator.init(testing.allocator, 1); - const options = ParseOptions{ .allocator = &fail_alloc.allocator }; + const options = ParseOptions{ .allocator = fail_alloc.allocator() }; const T = union(enum) { int: i32, float: f64, @@ -2328,7 +2328,7 @@ test "parse into double recursive union definition" { /// A non-stream JSON parser which constructs a tree of Value's. pub const Parser = struct { - allocator: *Allocator, + allocator: Allocator, state: State, copy_strings: bool, // Stores parent nodes and un-combined Values. @@ -2341,7 +2341,7 @@ pub const Parser = struct { Simple, }; - pub fn init(allocator: *Allocator, copy_strings: bool) Parser { + pub fn init(allocator: Allocator, copy_strings: bool) Parser { return Parser{ .allocator = allocator, .state = .Simple, @@ -2364,9 +2364,10 @@ pub const Parser = struct { var arena = ArenaAllocator.init(p.allocator); errdefer arena.deinit(); + const allocator = arena.allocator(); while (try s.next()) |token| { - try p.transition(&arena.allocator, input, s.i - 1, token); + try p.transition(allocator, input, s.i - 1, token); } debug.assert(p.stack.items.len == 1); @@ -2379,7 +2380,7 @@ pub const Parser = struct { // Even though p.allocator exists, we take an explicit allocator so that allocation state // can be cleaned up on error correctly during a `parse` on call. - fn transition(p: *Parser, allocator: *Allocator, input: []const u8, i: usize, token: Token) !void { + fn transition(p: *Parser, allocator: Allocator, input: []const u8, i: usize, token: Token) !void { switch (p.state) { .ObjectKey => switch (token) { .ObjectEnd => { @@ -2536,7 +2537,7 @@ pub const Parser = struct { } } - fn parseString(p: *Parser, allocator: *Allocator, s: std.meta.TagPayload(Token, Token.String), input: []const u8, i: usize) !Value { + fn parseString(p: *Parser, allocator: Allocator, s: std.meta.TagPayload(Token, Token.String), input: []const u8, i: usize) !Value { const slice = s.slice(input, i); switch (s.escapes) { .None => return Value{ .String = if (p.copy_strings) try allocator.dupe(u8, slice) else slice }, @@ -2737,7 +2738,7 @@ test "write json then parse it" { try testing.expect(mem.eql(u8, tree.root.Object.get("str").?.String, "hello")); } -fn testParse(arena_allocator: *std.mem.Allocator, json_str: []const u8) !Value { +fn testParse(arena_allocator: std.mem.Allocator, json_str: []const u8) !Value { var p = Parser.init(arena_allocator, false); return (try p.parse(json_str)).root; } @@ -2745,13 +2746,13 @@ fn testParse(arena_allocator: *std.mem.Allocator, json_str: []const u8) !Value { test "parsing empty string gives appropriate error" { var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena_allocator.deinit(); - try testing.expectError(error.UnexpectedEndOfJson, testParse(&arena_allocator.allocator, "")); + try testing.expectError(error.UnexpectedEndOfJson, testParse(arena_allocator.allocator(), "")); } test "integer after float has proper type" { var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena_allocator.deinit(); - const json = try testParse(&arena_allocator.allocator, + const json = try testParse(arena_allocator.allocator(), \\{ \\ "float": 3.14, \\ "ints": [1, 2, 3] @@ -2786,7 +2787,7 @@ test "escaped characters" { \\} ; - const obj = (try testParse(&arena_allocator.allocator, input)).Object; + const obj = (try testParse(arena_allocator.allocator(), input)).Object; try testing.expectEqualSlices(u8, obj.get("backslash").?.String, "\\"); try testing.expectEqualSlices(u8, obj.get("forwardslash").?.String, "/"); @@ -2812,11 +2813,12 @@ test "string copy option" { var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena_allocator.deinit(); + const allocator = arena_allocator.allocator(); - const tree_nocopy = try Parser.init(&arena_allocator.allocator, false).parse(input); + const tree_nocopy = try Parser.init(allocator, false).parse(input); const obj_nocopy = tree_nocopy.root.Object; - const tree_copy = try Parser.init(&arena_allocator.allocator, true).parse(input); + const tree_copy = try Parser.init(allocator, true).parse(input); const obj_copy = tree_copy.root.Object; for ([_][]const u8{ "noescape", "simple", "unicode", "surrogatepair" }) |field_name| { diff --git a/lib/std/json/write_stream.zig b/lib/std/json/write_stream.zig index 61da6ec49b..3393f8a6ee 100644 --- a/lib/std/json/write_stream.zig +++ b/lib/std/json/write_stream.zig @@ -243,7 +243,7 @@ test "json write stream" { try w.beginObject(); try w.objectField("object"); - try w.emitJson(try getJsonObject(&arena_allocator.allocator)); + try w.emitJson(try getJsonObject(arena_allocator.allocator())); try w.objectField("string"); try w.emitString("This is a string"); @@ -286,7 +286,7 @@ test "json write stream" { try std.testing.expect(std.mem.eql(u8, expected, result)); } -fn getJsonObject(allocator: *std.mem.Allocator) !std.json.Value { +fn getJsonObject(allocator: std.mem.Allocator) !std.json.Value { var value = std.json.Value{ .Object = std.json.ObjectMap.init(allocator) }; try value.Object.put("one", std.json.Value{ .Integer = @intCast(i64, 1) }); try value.Object.put("two", std.json.Value{ .Float = 2.0 }); diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index a8ad58be02..d7bcf9badc 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -142,7 +142,7 @@ pub const Mutable = struct { /// Asserts that the allocator owns the limbs memory. If this is not the case, /// use `toConst().toManaged()`. - pub fn toManaged(self: Mutable, allocator: *Allocator) Managed { + pub fn toManaged(self: Mutable, allocator: Allocator) Managed { return .{ .allocator = allocator, .limbs = self.limbs, @@ -283,7 +283,7 @@ pub const Mutable = struct { base: u8, value: []const u8, limbs_buffer: []Limb, - allocator: ?*Allocator, + allocator: ?Allocator, ) error{InvalidCharacter}!void { assert(base >= 2 and base <= 16); @@ -608,7 +608,7 @@ pub const Mutable = struct { /// rma is given by `a.limbs.len + b.limbs.len`. /// /// `limbs_buffer` is used for temporary storage. The amount required is given by `calcMulLimbsBufferLen`. - pub fn mul(rma: *Mutable, a: Const, b: Const, limbs_buffer: []Limb, allocator: ?*Allocator) void { + pub fn mul(rma: *Mutable, a: Const, b: Const, limbs_buffer: []Limb, allocator: ?Allocator) void { var buf_index: usize = 0; const a_copy = if (rma.limbs.ptr == a.limbs.ptr) blk: { @@ -638,7 +638,7 @@ pub const Mutable = struct { /// /// If `allocator` is provided, it will be used for temporary storage to improve /// multiplication performance. `error.OutOfMemory` is handled with a fallback algorithm. - pub fn mulNoAlias(rma: *Mutable, a: Const, b: Const, allocator: ?*Allocator) void { + pub fn mulNoAlias(rma: *Mutable, a: Const, b: Const, allocator: ?Allocator) void { assert(rma.limbs.ptr != a.limbs.ptr); // illegal aliasing assert(rma.limbs.ptr != b.limbs.ptr); // illegal aliasing @@ -674,7 +674,7 @@ pub const Mutable = struct { signedness: Signedness, bit_count: usize, limbs_buffer: []Limb, - allocator: ?*Allocator, + allocator: ?Allocator, ) void { var buf_index: usize = 0; const req_limbs = calcTwosCompLimbCount(bit_count); @@ -714,7 +714,7 @@ pub const Mutable = struct { b: Const, signedness: Signedness, bit_count: usize, - allocator: ?*Allocator, + allocator: ?Allocator, ) void { assert(rma.limbs.ptr != a.limbs.ptr); // illegal aliasing assert(rma.limbs.ptr != b.limbs.ptr); // illegal aliasing @@ -763,7 +763,7 @@ pub const Mutable = struct { /// /// If `allocator` is provided, it will be used for temporary storage to improve /// multiplication performance. `error.OutOfMemory` is handled with a fallback algorithm. - pub fn sqrNoAlias(rma: *Mutable, a: Const, opt_allocator: ?*Allocator) void { + pub fn sqrNoAlias(rma: *Mutable, a: Const, opt_allocator: ?Allocator) void { _ = opt_allocator; assert(rma.limbs.ptr != a.limbs.ptr); // illegal aliasing @@ -1660,7 +1660,7 @@ pub const Const = struct { positive: bool, /// The result is an independent resource which is managed by the caller. - pub fn toManaged(self: Const, allocator: *Allocator) Allocator.Error!Managed { + pub fn toManaged(self: Const, allocator: Allocator) Allocator.Error!Managed { const limbs = try allocator.alloc(Limb, math.max(Managed.default_capacity, self.limbs.len)); mem.copy(Limb, limbs, self.limbs); return Managed{ @@ -1873,7 +1873,7 @@ pub const Const = struct { /// Caller owns returned memory. /// Asserts that `base` is in the range [2, 16]. /// See also `toString`, a lower level function than this. - pub fn toStringAlloc(self: Const, allocator: *Allocator, base: u8, case: std.fmt.Case) Allocator.Error![]u8 { + pub fn toStringAlloc(self: Const, allocator: Allocator, base: u8, case: std.fmt.Case) Allocator.Error![]u8 { assert(base >= 2); assert(base <= 16); @@ -2092,7 +2092,7 @@ pub const Managed = struct { pub const default_capacity = 4; /// Allocator used by the Managed when requesting memory. - allocator: *Allocator, + allocator: Allocator, /// Raw digits. These are: /// @@ -2109,7 +2109,7 @@ pub const Managed = struct { /// Creates a new `Managed`. `default_capacity` limbs will be allocated immediately. /// The integer value after initializing is `0`. - pub fn init(allocator: *Allocator) !Managed { + pub fn init(allocator: Allocator) !Managed { return initCapacity(allocator, default_capacity); } @@ -2131,7 +2131,7 @@ pub const Managed = struct { /// Creates a new `Managed` with value `value`. /// /// This is identical to an `init`, followed by a `set`. - pub fn initSet(allocator: *Allocator, value: anytype) !Managed { + pub fn initSet(allocator: Allocator, value: anytype) !Managed { var s = try Managed.init(allocator); try s.set(value); return s; @@ -2140,7 +2140,7 @@ pub const Managed = struct { /// Creates a new Managed with a specific capacity. If capacity < default_capacity then the /// default capacity will be used instead. /// The integer value after initializing is `0`. - pub fn initCapacity(allocator: *Allocator, capacity: usize) !Managed { + pub fn initCapacity(allocator: Allocator, capacity: usize) !Managed { return Managed{ .allocator = allocator, .metadata = 1, @@ -2206,7 +2206,7 @@ pub const Managed = struct { return other.cloneWithDifferentAllocator(other.allocator); } - pub fn cloneWithDifferentAllocator(other: Managed, allocator: *Allocator) !Managed { + pub fn cloneWithDifferentAllocator(other: Managed, allocator: Allocator) !Managed { return Managed{ .allocator = allocator, .metadata = other.metadata, @@ -2347,7 +2347,7 @@ pub const Managed = struct { /// Converts self to a string in the requested base. Memory is allocated from the provided /// allocator and not the one present in self. - pub fn toString(self: Managed, allocator: *Allocator, base: u8, case: std.fmt.Case) ![]u8 { + pub fn toString(self: Managed, allocator: Allocator, base: u8, case: std.fmt.Case) ![]u8 { _ = allocator; if (base < 2 or base > 16) return error.InvalidBase; return self.toConst().toStringAlloc(self.allocator, base, case); @@ -2784,7 +2784,7 @@ const AccOp = enum { /// r MUST NOT alias any of a or b. /// /// The result is computed modulo `r.len`. When `r.len >= a.len + b.len`, no overflow occurs. -fn llmulacc(comptime op: AccOp, opt_allocator: ?*Allocator, r: []Limb, a: []const Limb, b: []const Limb) void { +fn llmulacc(comptime op: AccOp, opt_allocator: ?Allocator, r: []Limb, a: []const Limb, b: []const Limb) void { @setRuntimeSafety(debug_safety); assert(r.len >= a.len); assert(r.len >= b.len); @@ -2819,7 +2819,7 @@ fn llmulacc(comptime op: AccOp, opt_allocator: ?*Allocator, r: []Limb, a: []cons /// The result is computed modulo `r.len`. When `r.len >= a.len + b.len`, no overflow occurs. fn llmulaccKaratsuba( comptime op: AccOp, - allocator: *Allocator, + allocator: Allocator, r: []Limb, a: []const Limb, b: []const Limb, diff --git a/lib/std/math/big/rational.zig b/lib/std/math/big/rational.zig index 1f66417496..de6804ca01 100644 --- a/lib/std/math/big/rational.zig +++ b/lib/std/math/big/rational.zig @@ -29,7 +29,7 @@ pub const Rational = struct { /// Create a new Rational. A small amount of memory will be allocated on initialization. /// This will be 2 * Int.default_capacity. - pub fn init(a: *Allocator) !Rational { + pub fn init(a: Allocator) !Rational { return Rational{ .p = try Int.init(a), .q = try Int.initSet(a, 1), diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 0390733b3d..c310835b61 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -37,24 +37,26 @@ pub const Allocator = @import("mem/Allocator.zig"); pub fn ValidationAllocator(comptime T: type) type { return struct { const Self = @This(); - allocator: Allocator, + underlying_allocator: T, - pub fn init(allocator: T) @This() { + + pub fn init(underlying_allocator: T) @This() { return .{ - .allocator = .{ - .allocFn = alloc, - .resizeFn = resize, - }, - .underlying_allocator = allocator, + .underlying_allocator = underlying_allocator, }; } - fn getUnderlyingAllocatorPtr(self: *@This()) *Allocator { - if (T == *Allocator) return self.underlying_allocator; - if (*T == *Allocator) return &self.underlying_allocator; - return &self.underlying_allocator.allocator; + + pub fn allocator(self: *Self) Allocator { + return Allocator.init(self, alloc, resize, free); } + + fn getUnderlyingAllocatorPtr(self: *Self) Allocator { + if (T == Allocator) return self.underlying_allocator; + return self.underlying_allocator.allocator(); + } + pub fn alloc( - allocator: *Allocator, + self: *Self, n: usize, ptr_align: u29, len_align: u29, @@ -67,9 +69,8 @@ pub fn ValidationAllocator(comptime T: type) type { assert(n >= len_align); } - const self = @fieldParentPtr(@This(), "allocator", allocator); const underlying = self.getUnderlyingAllocatorPtr(); - const result = try underlying.allocFn(underlying, n, ptr_align, len_align, ret_addr); + const result = try underlying.rawAlloc(n, ptr_align, len_align, ret_addr); assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align)); if (len_align == 0) { assert(result.len == n); @@ -79,22 +80,22 @@ pub fn ValidationAllocator(comptime T: type) type { } return result; } + pub fn resize( - allocator: *Allocator, + self: *Self, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize, - ) Allocator.Error!usize { + ) ?usize { assert(buf.len > 0); if (len_align != 0) { assert(mem.isAlignedAnyAlign(new_len, len_align)); assert(new_len >= len_align); } - const self = @fieldParentPtr(@This(), "allocator", allocator); const underlying = self.getUnderlyingAllocatorPtr(); - const result = try underlying.resizeFn(underlying, buf, buf_align, new_len, len_align, ret_addr); + const result = underlying.rawResize(buf, buf_align, new_len, len_align, ret_addr) orelse return null; if (len_align == 0) { assert(result == new_len); } else { @@ -103,7 +104,20 @@ pub fn ValidationAllocator(comptime T: type) type { } return result; } - pub usingnamespace if (T == *Allocator or !@hasDecl(T, "reset")) struct {} else struct { + + pub fn free( + self: *Self, + buf: []u8, + buf_align: u29, + ret_addr: usize, + ) void { + _ = self; + _ = buf_align; + _ = ret_addr; + assert(buf.len > 0); + } + + pub usingnamespace if (T == Allocator or !@hasDecl(T, "reset")) struct {} else struct { pub fn reset(self: *Self) void { self.underlying_allocator.reset(); } @@ -130,12 +144,18 @@ pub fn alignAllocLen(full_len: usize, alloc_len: usize, len_align: u29) usize { return adjusted; } -var failAllocator = Allocator{ - .allocFn = failAllocatorAlloc, - .resizeFn = Allocator.noResize, +const fail_allocator = Allocator{ + .ptr = undefined, + .vtable = &failAllocator_vtable, +}; + +const failAllocator_vtable = Allocator.VTable{ + .alloc = failAllocatorAlloc, + .resize = Allocator.NoResize(c_void).noResize, + .free = Allocator.NoOpFree(c_void).noOpFree, }; -fn failAllocatorAlloc(self: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 { - _ = self; + +fn failAllocatorAlloc(_: *c_void, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 { _ = n; _ = alignment; _ = len_align; @@ -144,8 +164,8 @@ fn failAllocatorAlloc(self: *Allocator, n: usize, alignment: u29, len_align: u29 } test "mem.Allocator basics" { - try testing.expectError(error.OutOfMemory, failAllocator.alloc(u8, 1)); - try testing.expectError(error.OutOfMemory, failAllocator.allocSentinel(u8, 1, 0)); + try testing.expectError(error.OutOfMemory, fail_allocator.alloc(u8, 1)); + try testing.expectError(error.OutOfMemory, fail_allocator.allocSentinel(u8, 1, 0)); } test "Allocator.resize" { @@ -168,7 +188,7 @@ test "Allocator.resize" { defer testing.allocator.free(values); for (values) |*v, i| v.* = @intCast(T, i); - values = try testing.allocator.resize(values, values.len + 10); + values = testing.allocator.resize(values, values.len + 10) orelse return error.OutOfMemory; try testing.expect(values.len == 110); } @@ -183,7 +203,7 @@ test "Allocator.resize" { defer testing.allocator.free(values); for (values) |*v, i| v.* = @intToFloat(T, i); - values = try testing.allocator.resize(values, values.len + 10); + values = testing.allocator.resize(values, values.len + 10) orelse return error.OutOfMemory; try testing.expect(values.len == 110); } } @@ -1786,18 +1806,18 @@ pub fn SplitIterator(comptime T: type) type { /// Naively combines a series of slices with a separator. /// Allocates memory for the result, which must be freed by the caller. -pub fn join(allocator: *Allocator, separator: []const u8, slices: []const []const u8) ![]u8 { +pub fn join(allocator: Allocator, separator: []const u8, slices: []const []const u8) ![]u8 { return joinMaybeZ(allocator, separator, slices, false); } /// Naively combines a series of slices with a separator and null terminator. /// Allocates memory for the result, which must be freed by the caller. -pub fn joinZ(allocator: *Allocator, separator: []const u8, slices: []const []const u8) ![:0]u8 { +pub fn joinZ(allocator: Allocator, separator: []const u8, slices: []const []const u8) ![:0]u8 { const out = try joinMaybeZ(allocator, separator, slices, true); return out[0 .. out.len - 1 :0]; } -fn joinMaybeZ(allocator: *Allocator, separator: []const u8, slices: []const []const u8, zero: bool) ![]u8 { +fn joinMaybeZ(allocator: Allocator, separator: []const u8, slices: []const []const u8, zero: bool) ![]u8 { if (slices.len == 0) return if (zero) try allocator.dupe(u8, &[1]u8{0}) else &[0]u8{}; const total_len = blk: { @@ -1876,7 +1896,7 @@ test "mem.joinZ" { } /// Copies each T from slices into a new slice that exactly holds all the elements. -pub fn concat(allocator: *Allocator, comptime T: type, slices: []const []const T) ![]T { +pub fn concat(allocator: Allocator, comptime T: type, slices: []const []const T) ![]T { if (slices.len == 0) return &[0]T{}; const total_len = blk: { @@ -2318,7 +2338,7 @@ test "replacementSize" { } /// Perform a replacement on an allocated buffer of pre-determined size. Caller must free returned memory. -pub fn replaceOwned(comptime T: type, allocator: *Allocator, input: []const T, needle: []const T, replacement: []const T) Allocator.Error![]T { +pub fn replaceOwned(comptime T: type, allocator: Allocator, input: []const T, needle: []const T, replacement: []const T) Allocator.Error![]T { var output = try allocator.alloc(T, replacementSize(T, input, needle, replacement)); _ = replace(T, input, needle, replacement, output); return output; diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig index a3c0995496..29fbf7c2c1 100644 --- a/lib/std/mem/Allocator.zig +++ b/lib/std/mem/Allocator.zig @@ -5,155 +5,168 @@ const assert = std.debug.assert; const math = std.math; const mem = std.mem; const Allocator = @This(); +const builtin = @import("builtin"); pub const Error = error{OutOfMemory}; -/// Attempt to allocate at least `len` bytes aligned to `ptr_align`. -/// -/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes, -/// otherwise, the length must be aligned to `len_align`. -/// -/// `len` must be greater than or equal to `len_align` and must be aligned by `len_align`. -/// -/// `ret_addr` is optionally provided as the first return address of the allocation call stack. -/// If the value is `0` it means no return address has been provided. -allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8, +// The type erased pointer to the allocator implementation +ptr: *c_void, +vtable: *const VTable, + +pub const VTable = struct { + /// Attempt to allocate at least `len` bytes aligned to `ptr_align`. + /// + /// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes, + /// otherwise, the length must be aligned to `len_align`. + /// + /// `len` must be greater than or equal to `len_align` and must be aligned by `len_align`. + /// + /// `ret_addr` is optionally provided as the first return address of the allocation call stack. + /// If the value is `0` it means no return address has been provided. + alloc: fn (ptr: *c_void, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8, + + /// Attempt to expand or shrink memory in place. `buf.len` must equal the most recent + /// length returned by `alloc` or `resize`. `buf_align` must equal the same value + /// that was passed as the `ptr_align` parameter to the original `alloc` call. + /// + /// `null` can only be returned if `new_len` is greater than `buf.len`. + /// If `buf` cannot be expanded to accomodate `new_len`, then the allocation MUST be + /// unmodified and `null` MUST be returned. + /// + /// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes, + /// otherwise, the length must be aligned to `len_align`. Note that `len_align` does *not* + /// provide a way to modify the alignment of a pointer. Rather it provides an API for + /// accepting more bytes of memory from the allocator than requested. + /// + /// `new_len` must be greater than zero, greater than or equal to `len_align` and must be aligned by `len_align`. + /// + /// `ret_addr` is optionally provided as the first return address of the allocation call stack. + /// If the value is `0` it means no return address has been provided. + resize: fn (ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize, + + /// Free and invalidate a buffer. `buf.len` must equal the most recent length returned by `alloc` or `resize`. + /// `buf_align` must equal the same value that was passed as the `ptr_align` parameter to the original `alloc` call. + /// + /// `ret_addr` is optionally provided as the first return address of the allocation call stack. + /// If the value is `0` it means no return address has been provided. + free: fn (ptr: *c_void, buf: []u8, buf_align: u29, ret_addr: usize) void, +}; + +pub fn init( + pointer: anytype, + comptime allocFn: fn (ptr: @TypeOf(pointer), len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8, + comptime resizeFn: fn (ptr: @TypeOf(pointer), buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize, + comptime freeFn: fn (ptr: @TypeOf(pointer), buf: []u8, buf_align: u29, ret_addr: usize) void, +) Allocator { + const Ptr = @TypeOf(pointer); + const ptr_info = @typeInfo(Ptr); + + assert(ptr_info == .Pointer); // Must be a pointer + assert(ptr_info.Pointer.size == .One); // Must be a single-item pointer + + const alignment = ptr_info.Pointer.alignment; + + const gen = struct { + fn alloc(ptr: *c_void, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 { + const self = @ptrCast(Ptr, @alignCast(alignment, ptr)); + return @call(.{ .modifier = .always_inline }, allocFn, .{ self, len, ptr_align, len_align, ret_addr }); + } + fn resize(ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize { + assert(new_len != 0); + const self = @ptrCast(Ptr, @alignCast(alignment, ptr)); + return @call(.{ .modifier = .always_inline }, resizeFn, .{ self, buf, buf_align, new_len, len_align, ret_addr }); + } + fn free(ptr: *c_void, buf: []u8, buf_align: u29, ret_addr: usize) void { + const self = @ptrCast(Ptr, @alignCast(alignment, ptr)); + @call(.{ .modifier = .always_inline }, freeFn, .{ self, buf, buf_align, ret_addr }); + } + }; -/// Attempt to expand or shrink memory in place. `buf.len` must equal the most recent -/// length returned by `allocFn` or `resizeFn`. `buf_align` must equal the same value -/// that was passed as the `ptr_align` parameter to the original `allocFn` call. -/// -/// Passing a `new_len` of 0 frees and invalidates the buffer such that it can no -/// longer be passed to `resizeFn`. -/// -/// error.OutOfMemory can only be returned if `new_len` is greater than `buf.len`. -/// If `buf` cannot be expanded to accomodate `new_len`, then the allocation MUST be -/// unmodified and error.OutOfMemory MUST be returned. -/// -/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes, -/// otherwise, the length must be aligned to `len_align`. Note that `len_align` does *not* -/// provide a way to modify the alignment of a pointer. Rather it provides an API for -/// accepting more bytes of memory from the allocator than requested. -/// -/// `new_len` must be greater than or equal to `len_align` and must be aligned by `len_align`. -/// -/// `ret_addr` is optionally provided as the first return address of the allocation call stack. -/// If the value is `0` it means no return address has been provided. -resizeFn: fn (self: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize, + const vtable = VTable{ + .alloc = gen.alloc, + .resize = gen.resize, + .free = gen.free, + }; -/// Set to resizeFn if in-place resize is not supported. -pub fn noResize( - self: *Allocator, - buf: []u8, - buf_align: u29, - new_len: usize, - len_align: u29, - ret_addr: usize, -) Error!usize { - _ = self; - _ = buf_align; - _ = len_align; - _ = ret_addr; - if (new_len > buf.len) - return error.OutOfMemory; - return new_len; + return .{ + .ptr = pointer, + .vtable = &vtable, + }; } -/// Realloc is used to modify the size or alignment of an existing allocation, -/// as well as to provide the allocator with an opportunity to move an allocation -/// to a better location. -/// When the size/alignment is greater than the previous allocation, this function -/// returns `error.OutOfMemory` when the requested new allocation could not be granted. -/// When the size/alignment is less than or equal to the previous allocation, -/// this function returns `error.OutOfMemory` when the allocator decides the client -/// would be better off keeping the extra alignment/size. Clients will call -/// `resizeFn` when they require the allocator to track a new alignment/size, -/// and so this function should only return success when the allocator considers -/// the reallocation desirable from the allocator's perspective. -/// As an example, `std.ArrayList` tracks a "capacity", and therefore can handle -/// reallocation failure, even when `new_n` <= `old_mem.len`. A `FixedBufferAllocator` -/// would always return `error.OutOfMemory` for `reallocFn` when the size/alignment -/// is less than or equal to the old allocation, because it cannot reclaim the memory, -/// and thus the `std.ArrayList` would be better off retaining its capacity. -/// When `reallocFn` returns, -/// `return_value[0..min(old_mem.len, new_byte_count)]` must be the same -/// as `old_mem` was when `reallocFn` is called. The bytes of -/// `return_value[old_mem.len..]` have undefined values. -/// The returned slice must have its pointer aligned at least to `new_alignment` bytes. -pub fn reallocBytes( - self: *Allocator, - /// Guaranteed to be the same as what was returned from most recent call to - /// `allocFn` or `resizeFn`. - /// If `old_mem.len == 0` then this is a new allocation and `new_byte_count` - /// is guaranteed to be >= 1. - old_mem: []u8, - /// If `old_mem.len == 0` then this is `undefined`, otherwise: - /// Guaranteed to be the same as what was passed to `allocFn`. - /// Guaranteed to be >= 1. - /// Guaranteed to be a power of 2. - old_alignment: u29, - /// If `new_byte_count` is 0 then this is a free and it is guaranteed that - /// `old_mem.len != 0`. - new_byte_count: usize, - /// Guaranteed to be >= 1. - /// Guaranteed to be a power of 2. - /// Returned slice's pointer must have this alignment. - new_alignment: u29, - /// 0 indicates the length of the slice returned MUST match `new_byte_count` exactly - /// non-zero means the length of the returned slice must be aligned by `len_align` - /// `new_len` must be aligned by `len_align` - len_align: u29, - return_address: usize, -) Error![]u8 { - if (old_mem.len == 0) { - const new_mem = try self.allocFn(self, new_byte_count, new_alignment, len_align, return_address); - // TODO: https://github.com/ziglang/zig/issues/4298 - @memset(new_mem.ptr, undefined, new_byte_count); - return new_mem; - } +/// Set resizeFn to `NoResize(AllocatorType).noResize` if in-place resize is not supported. +pub fn NoResize(comptime AllocatorType: type) type { + return struct { + pub fn noResize( + self: *AllocatorType, + buf: []u8, + buf_align: u29, + new_len: usize, + len_align: u29, + ret_addr: usize, + ) ?usize { + _ = self; + _ = buf_align; + _ = len_align; + _ = ret_addr; + return if (new_len > buf.len) null else new_len; + } + }; +} - if (mem.isAligned(@ptrToInt(old_mem.ptr), new_alignment)) { - if (new_byte_count <= old_mem.len) { - const shrunk_len = self.shrinkBytes(old_mem, old_alignment, new_byte_count, len_align, return_address); - return old_mem.ptr[0..shrunk_len]; +/// Set freeFn to `NoOpFree(AllocatorType).noOpFree` if free is a no-op. +pub fn NoOpFree(comptime AllocatorType: type) type { + return struct { + pub fn noOpFree( + self: *AllocatorType, + buf: []u8, + buf_align: u29, + ret_addr: usize, + ) void { + _ = self; + _ = buf; + _ = buf_align; + _ = ret_addr; } - if (self.resizeFn(self, old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| { - assert(resized_len >= new_byte_count); - // TODO: https://github.com/ziglang/zig/issues/4298 - @memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count); - return old_mem.ptr[0..resized_len]; - } else |_| {} - } - if (new_byte_count <= old_mem.len and new_alignment <= old_alignment) { - return error.OutOfMemory; - } - return self.moveBytes(old_mem, old_alignment, new_byte_count, new_alignment, len_align, return_address); + }; } -/// Move the given memory to a new location in the given allocator to accomodate a new -/// size and alignment. -fn moveBytes( - self: *Allocator, - old_mem: []u8, - old_align: u29, - new_len: usize, - new_alignment: u29, - len_align: u29, - return_address: usize, -) Error![]u8 { - assert(old_mem.len > 0); - assert(new_len > 0); - const new_mem = try self.allocFn(self, new_len, new_alignment, len_align, return_address); - @memcpy(new_mem.ptr, old_mem.ptr, math.min(new_len, old_mem.len)); - // TODO https://github.com/ziglang/zig/issues/4298 - @memset(old_mem.ptr, undefined, old_mem.len); - _ = self.shrinkBytes(old_mem, old_align, 0, 0, return_address); - return new_mem; +/// Set freeFn to `PanicFree(AllocatorType).noOpFree` if free is not a supported operation. +pub fn PanicFree(comptime AllocatorType: type) type { + return struct { + pub fn noOpFree( + self: *AllocatorType, + buf: []u8, + buf_align: u29, + ret_addr: usize, + ) void { + _ = self; + _ = buf; + _ = buf_align; + _ = ret_addr; + @panic("free is not a supported operation for the allocator: " ++ @typeName(AllocatorType)); + } + }; +} + +/// This function is not intended to be called except from within the implementation of an Allocator +pub inline fn rawAlloc(self: Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 { + return self.vtable.alloc(self.ptr, len, ptr_align, len_align, ret_addr); +} + +/// This function is not intended to be called except from within the implementation of an Allocator +pub inline fn rawResize(self: Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize { + return self.vtable.resize(self.ptr, buf, buf_align, new_len, len_align, ret_addr); +} + +/// This function is not intended to be called except from within the implementation of an Allocator +pub inline fn rawFree(self: Allocator, buf: []u8, buf_align: u29, ret_addr: usize) void { + return self.vtable.free(self.ptr, buf, buf_align, ret_addr); } /// Returns a pointer to undefined memory. /// Call `destroy` with the result to free the memory. -pub fn create(self: *Allocator, comptime T: type) Error!*T { +pub fn create(self: Allocator, comptime T: type) Error!*T { if (@sizeOf(T) == 0) return @as(*T, undefined); const slice = try self.allocAdvancedWithRetAddr(T, null, 1, .exact, @returnAddress()); return &slice[0]; @@ -161,12 +174,12 @@ pub fn create(self: *Allocator, comptime T: type) Error!*T { /// `ptr` should be the return value of `create`, or otherwise /// have the same address and alignment property. -pub fn destroy(self: *Allocator, ptr: anytype) void { +pub fn destroy(self: Allocator, ptr: anytype) void { const info = @typeInfo(@TypeOf(ptr)).Pointer; const T = info.child; if (@sizeOf(T) == 0) return; const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr)); - _ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], info.alignment, 0, 0, @returnAddress()); + self.rawFree(non_const_ptr[0..@sizeOf(T)], info.alignment, @returnAddress()); } /// Allocates an array of `n` items of type `T` and sets all the @@ -177,12 +190,12 @@ pub fn destroy(self: *Allocator, ptr: anytype) void { /// call `free` when done. /// /// For allocating a single item, see `create`. -pub fn alloc(self: *Allocator, comptime T: type, n: usize) Error![]T { +pub fn alloc(self: Allocator, comptime T: type, n: usize) Error![]T { return self.allocAdvancedWithRetAddr(T, null, n, .exact, @returnAddress()); } pub fn allocWithOptions( - self: *Allocator, + self: Allocator, comptime Elem: type, n: usize, /// null means naturally aligned @@ -193,7 +206,7 @@ pub fn allocWithOptions( } pub fn allocWithOptionsRetAddr( - self: *Allocator, + self: Allocator, comptime Elem: type, n: usize, /// null means naturally aligned @@ -227,7 +240,7 @@ fn AllocWithOptionsPayload(comptime Elem: type, comptime alignment: ?u29, compti /// /// For allocating a single item, see `create`. pub fn allocSentinel( - self: *Allocator, + self: Allocator, comptime Elem: type, n: usize, comptime sentinel: Elem, @@ -236,7 +249,7 @@ pub fn allocSentinel( } pub fn alignedAlloc( - self: *Allocator, + self: Allocator, comptime T: type, /// null means naturally aligned comptime alignment: ?u29, @@ -246,7 +259,7 @@ pub fn alignedAlloc( } pub fn allocAdvanced( - self: *Allocator, + self: Allocator, comptime T: type, /// null means naturally aligned comptime alignment: ?u29, @@ -259,7 +272,7 @@ pub fn allocAdvanced( pub const Exact = enum { exact, at_least }; pub fn allocAdvancedWithRetAddr( - self: *Allocator, + self: Allocator, comptime T: type, /// null means naturally aligned comptime alignment: ?u29, @@ -285,7 +298,7 @@ pub fn allocAdvancedWithRetAddr( .exact => 0, .at_least => size_of_T, }; - const byte_slice = try self.allocFn(self, byte_count, a, len_align, return_address); + const byte_slice = try self.rawAlloc(byte_count, a, len_align, return_address); switch (exact) { .exact => assert(byte_slice.len == byte_count), .at_least => assert(byte_slice.len >= byte_count), @@ -301,7 +314,7 @@ pub fn allocAdvancedWithRetAddr( } /// Increases or decreases the size of an allocation. It is guaranteed to not move the pointer. -pub fn resize(self: *Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(old_mem) { +pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) ?@TypeOf(old_mem) { const Slice = @typeInfo(@TypeOf(old_mem)).Pointer; const T = Slice.child; if (new_n == 0) { @@ -309,8 +322,8 @@ pub fn resize(self: *Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(ol return &[0]T{}; } const old_byte_slice = mem.sliceAsBytes(old_mem); - const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory; - const rc = try self.resizeFn(self, old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress()); + const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return null; + const rc = self.rawResize(old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress()) orelse return null; assert(rc == new_byte_count); const new_byte_slice = old_byte_slice.ptr[0..new_byte_count]; return mem.bytesAsSlice(T, new_byte_slice); @@ -326,7 +339,7 @@ pub fn resize(self: *Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(ol /// in `std.ArrayList.shrink`. /// If you need guaranteed success, call `shrink`. /// If `new_n` is 0, this is the same as `free` and it always succeeds. -pub fn realloc(self: *Allocator, old_mem: anytype, new_n: usize) t: { +pub fn realloc(self: Allocator, old_mem: anytype, new_n: usize) t: { const Slice = @typeInfo(@TypeOf(old_mem)).Pointer; break :t Error![]align(Slice.alignment) Slice.child; } { @@ -334,7 +347,7 @@ pub fn realloc(self: *Allocator, old_mem: anytype, new_n: usize) t: { return self.reallocAdvancedWithRetAddr(old_mem, old_alignment, new_n, .exact, @returnAddress()); } -pub fn reallocAtLeast(self: *Allocator, old_mem: anytype, new_n: usize) t: { +pub fn reallocAtLeast(self: Allocator, old_mem: anytype, new_n: usize) t: { const Slice = @typeInfo(@TypeOf(old_mem)).Pointer; break :t Error![]align(Slice.alignment) Slice.child; } { @@ -346,7 +359,7 @@ pub fn reallocAtLeast(self: *Allocator, old_mem: anytype, new_n: usize) t: { /// a new alignment, which can be larger, smaller, or the same as the old /// allocation. pub fn reallocAdvanced( - self: *Allocator, + self: Allocator, old_mem: anytype, comptime new_alignment: u29, new_n: usize, @@ -356,7 +369,7 @@ pub fn reallocAdvanced( } pub fn reallocAdvancedWithRetAddr( - self: *Allocator, + self: Allocator, old_mem: anytype, comptime new_alignment: u29, new_n: usize, @@ -380,8 +393,31 @@ pub fn reallocAdvancedWithRetAddr( .exact => 0, .at_least => @sizeOf(T), }; - const new_byte_slice = try self.reallocBytes(old_byte_slice, Slice.alignment, byte_count, new_alignment, len_align, return_address); - return mem.bytesAsSlice(T, @alignCast(new_alignment, new_byte_slice)); + + if (mem.isAligned(@ptrToInt(old_byte_slice.ptr), new_alignment)) { + if (byte_count <= old_byte_slice.len) { + const shrunk_len = self.shrinkBytes(old_byte_slice, Slice.alignment, byte_count, len_align, return_address); + return mem.bytesAsSlice(T, @alignCast(new_alignment, old_byte_slice.ptr[0..shrunk_len])); + } + + if (self.rawResize(old_byte_slice, Slice.alignment, byte_count, len_align, return_address)) |resized_len| { + // TODO: https://github.com/ziglang/zig/issues/4298 + @memset(old_byte_slice.ptr + byte_count, undefined, resized_len - byte_count); + return mem.bytesAsSlice(T, @alignCast(new_alignment, old_byte_slice.ptr[0..resized_len])); + } + } + + if (byte_count <= old_byte_slice.len and new_alignment <= Slice.alignment) { + return error.OutOfMemory; + } + + const new_mem = try self.rawAlloc(byte_count, new_alignment, len_align, return_address); + @memcpy(new_mem.ptr, old_byte_slice.ptr, math.min(byte_count, old_byte_slice.len)); + // TODO https://github.com/ziglang/zig/issues/4298 + @memset(old_byte_slice.ptr, undefined, old_byte_slice.len); + self.rawFree(old_byte_slice, Slice.alignment, return_address); + + return mem.bytesAsSlice(T, @alignCast(new_alignment, new_mem)); } /// Prefer calling realloc to shrink if you can tolerate failure, such as @@ -389,7 +425,7 @@ pub fn reallocAdvancedWithRetAddr( /// Shrink always succeeds, and `new_n` must be <= `old_mem.len`. /// Returned slice has same alignment as old_mem. /// Shrinking to 0 is the same as calling `free`. -pub fn shrink(self: *Allocator, old_mem: anytype, new_n: usize) t: { +pub fn shrink(self: Allocator, old_mem: anytype, new_n: usize) t: { const Slice = @typeInfo(@TypeOf(old_mem)).Pointer; break :t []align(Slice.alignment) Slice.child; } { @@ -401,7 +437,7 @@ pub fn shrink(self: *Allocator, old_mem: anytype, new_n: usize) t: { /// a new alignment, which must be smaller or the same as the old /// allocation. pub fn alignedShrink( - self: *Allocator, + self: Allocator, old_mem: anytype, comptime new_alignment: u29, new_n: usize, @@ -413,7 +449,7 @@ pub fn alignedShrink( /// the return address of the first stack frame, which may be relevant for /// allocators which collect stack traces. pub fn alignedShrinkWithRetAddr( - self: *Allocator, + self: Allocator, old_mem: anytype, comptime new_alignment: u29, new_n: usize, @@ -424,6 +460,11 @@ pub fn alignedShrinkWithRetAddr( if (new_n == old_mem.len) return old_mem; + if (new_n == 0) { + self.free(old_mem); + return @as([*]align(new_alignment) T, undefined)[0..0]; + } + assert(new_n < old_mem.len); assert(new_alignment <= Slice.alignment); @@ -440,7 +481,7 @@ pub fn alignedShrinkWithRetAddr( /// Free an array allocated with `alloc`. To free a single item, /// see `destroy`. -pub fn free(self: *Allocator, memory: anytype) void { +pub fn free(self: Allocator, memory: anytype) void { const Slice = @typeInfo(@TypeOf(memory)).Pointer; const bytes = mem.sliceAsBytes(memory); const bytes_len = bytes.len + if (Slice.sentinel != null) @sizeOf(Slice.child) else 0; @@ -448,30 +489,30 @@ pub fn free(self: *Allocator, memory: anytype) void { const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr)); // TODO: https://github.com/ziglang/zig/issues/4298 @memset(non_const_ptr, undefined, bytes_len); - _ = self.shrinkBytes(non_const_ptr[0..bytes_len], Slice.alignment, 0, 0, @returnAddress()); + self.rawFree(non_const_ptr[0..bytes_len], Slice.alignment, @returnAddress()); } /// Copies `m` to newly allocated memory. Caller owns the memory. -pub fn dupe(allocator: *Allocator, comptime T: type, m: []const T) ![]T { +pub fn dupe(allocator: Allocator, comptime T: type, m: []const T) ![]T { const new_buf = try allocator.alloc(T, m.len); mem.copy(T, new_buf, m); return new_buf; } /// Copies `m` to newly allocated memory, with a null-terminated element. Caller owns the memory. -pub fn dupeZ(allocator: *Allocator, comptime T: type, m: []const T) ![:0]T { +pub fn dupeZ(allocator: Allocator, comptime T: type, m: []const T) ![:0]T { const new_buf = try allocator.alloc(T, m.len + 1); mem.copy(T, new_buf, m); new_buf[m.len] = 0; return new_buf[0..m.len :0]; } -/// Call `resizeFn`, but caller guarantees that `new_len` <= `buf.len` meaning -/// error.OutOfMemory should be impossible. +/// Call `vtable.resize`, but caller guarantees that `new_len` <= `buf.len` meaning +/// than a `null` return value should be impossible. /// This function allows a runtime `buf_align` value. Callers should generally prefer /// to call `shrink` directly. pub fn shrinkBytes( - self: *Allocator, + self: Allocator, buf: []u8, buf_align: u29, new_len: usize, @@ -479,5 +520,5 @@ pub fn shrinkBytes( return_address: usize, ) usize { assert(new_len <= buf.len); - return self.resizeFn(self, buf, buf_align, new_len, len_align, return_address) catch unreachable; + return self.rawResize(buf, buf_align, new_len, len_align, return_address) orelse unreachable; } diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig index 2e36eacd7f..a651076aba 100644 --- a/lib/std/multi_array_list.zig +++ b/lib/std/multi_array_list.zig @@ -59,7 +59,7 @@ pub fn MultiArrayList(comptime S: type) type { }; } - pub fn deinit(self: *Slice, gpa: *Allocator) void { + pub fn deinit(self: *Slice, gpa: Allocator) void { var other = self.toMultiArrayList(); other.deinit(gpa); self.* = undefined; @@ -106,7 +106,7 @@ pub fn MultiArrayList(comptime S: type) type { }; /// Release all allocated memory. - pub fn deinit(self: *Self, gpa: *Allocator) void { + pub fn deinit(self: *Self, gpa: Allocator) void { gpa.free(self.allocatedBytes()); self.* = undefined; } @@ -161,7 +161,7 @@ pub fn MultiArrayList(comptime S: type) type { } /// Extend the list by 1 element. Allocates more memory as necessary. - pub fn append(self: *Self, gpa: *Allocator, elem: S) !void { + pub fn append(self: *Self, gpa: Allocator, elem: S) !void { try self.ensureUnusedCapacity(gpa, 1); self.appendAssumeCapacity(elem); } @@ -188,7 +188,7 @@ pub fn MultiArrayList(comptime S: type) type { /// after and including the specified index back by one and /// sets the given index to the specified element. May reallocate /// and invalidate iterators. - pub fn insert(self: *Self, gpa: *Allocator, index: usize, elem: S) void { + pub fn insert(self: *Self, gpa: Allocator, index: usize, elem: S) void { try self.ensureUnusedCapacity(gpa, 1); self.insertAssumeCapacity(index, elem); } @@ -242,7 +242,7 @@ pub fn MultiArrayList(comptime S: type) type { /// Adjust the list's length to `new_len`. /// Does not initialize added items, if any. - pub fn resize(self: *Self, gpa: *Allocator, new_len: usize) !void { + pub fn resize(self: *Self, gpa: Allocator, new_len: usize) !void { try self.ensureTotalCapacity(gpa, new_len); self.len = new_len; } @@ -250,7 +250,7 @@ pub fn MultiArrayList(comptime S: type) type { /// Attempt to reduce allocated capacity to `new_len`. /// If `new_len` is greater than zero, this may fail to reduce the capacity, /// but the data remains intact and the length is updated to new_len. - pub fn shrinkAndFree(self: *Self, gpa: *Allocator, new_len: usize) void { + pub fn shrinkAndFree(self: *Self, gpa: Allocator, new_len: usize) void { if (new_len == 0) { gpa.free(self.allocatedBytes()); self.* = .{}; @@ -314,7 +314,7 @@ pub fn MultiArrayList(comptime S: type) type { /// Modify the array so that it can hold at least `new_capacity` items. /// Implements super-linear growth to achieve amortized O(1) append operations. /// Invalidates pointers if additional memory is needed. - pub fn ensureTotalCapacity(self: *Self, gpa: *Allocator, new_capacity: usize) !void { + pub fn ensureTotalCapacity(self: *Self, gpa: Allocator, new_capacity: usize) !void { var better_capacity = self.capacity; if (better_capacity >= new_capacity) return; @@ -328,14 +328,14 @@ pub fn MultiArrayList(comptime S: type) type { /// Modify the array so that it can hold at least `additional_count` **more** items. /// Invalidates pointers if additional memory is needed. - pub fn ensureUnusedCapacity(self: *Self, gpa: *Allocator, additional_count: usize) !void { + pub fn ensureUnusedCapacity(self: *Self, gpa: Allocator, additional_count: usize) !void { return self.ensureTotalCapacity(gpa, self.len + additional_count); } /// Modify the array so that it can hold exactly `new_capacity` items. /// Invalidates pointers if additional memory is needed. /// `new_capacity` must be greater or equal to `len`. - pub fn setCapacity(self: *Self, gpa: *Allocator, new_capacity: usize) !void { + pub fn setCapacity(self: *Self, gpa: Allocator, new_capacity: usize) !void { assert(new_capacity >= self.len); const new_bytes = try gpa.allocAdvanced( u8, @@ -372,7 +372,7 @@ pub fn MultiArrayList(comptime S: type) type { /// Create a copy of this list with a new backing store, /// using the specified allocator. - pub fn clone(self: Self, gpa: *Allocator) !Self { + pub fn clone(self: Self, gpa: Allocator) !Self { var result = Self{}; errdefer result.deinit(gpa); try result.ensureTotalCapacity(gpa, self.len); diff --git a/lib/std/net.zig b/lib/std/net.zig index 759adaa756..6199d739d7 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -664,7 +664,7 @@ pub const AddressList = struct { }; /// All memory allocated with `allocator` will be freed before this function returns. -pub fn tcpConnectToHost(allocator: *mem.Allocator, name: []const u8, port: u16) !Stream { +pub fn tcpConnectToHost(allocator: mem.Allocator, name: []const u8, port: u16) !Stream { const list = try getAddressList(allocator, name, port); defer list.deinit(); @@ -699,12 +699,12 @@ pub fn tcpConnectToAddress(address: Address) !Stream { } /// Call `AddressList.deinit` on the result. -pub fn getAddressList(allocator: *mem.Allocator, name: []const u8, port: u16) !*AddressList { +pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) !*AddressList { const result = blk: { var arena = std.heap.ArenaAllocator.init(allocator); errdefer arena.deinit(); - const result = try arena.allocator.create(AddressList); + const result = try arena.allocator().create(AddressList); result.* = AddressList{ .arena = arena, .addrs = undefined, @@ -712,7 +712,7 @@ pub fn getAddressList(allocator: *mem.Allocator, name: []const u8, port: u16) !* }; break :blk result; }; - const arena = &result.arena.allocator; + const arena = result.arena.allocator(); errdefer result.arena.deinit(); if (builtin.target.os.tag == .windows or builtin.link_libc) { @@ -1303,7 +1303,7 @@ const ResolvConf = struct { /// Ignores lines longer than 512 bytes. /// TODO: https://github.com/ziglang/zig/issues/2765 and https://github.com/ziglang/zig/issues/2761 -fn getResolvConf(allocator: *mem.Allocator, rc: *ResolvConf) !void { +fn getResolvConf(allocator: mem.Allocator, rc: *ResolvConf) !void { rc.* = ResolvConf{ .ns = std.ArrayList(LookupAddr).init(allocator), .search = std.ArrayList(u8).init(allocator), diff --git a/lib/std/net/test.zig b/lib/std/net/test.zig index 1742fb2947..f181bb49ea 100644 --- a/lib/std/net/test.zig +++ b/lib/std/net/test.zig @@ -230,7 +230,7 @@ test "listen on ipv4 try connect on ipv6 then ipv4" { try await client_frame; } -fn testClientToHost(allocator: *mem.Allocator, name: []const u8, port: u16) anyerror!void { +fn testClientToHost(allocator: mem.Allocator, name: []const u8, port: u16) anyerror!void { if (builtin.os.tag == .wasi) return error.SkipZigTest; const connection = try net.tcpConnectToHost(allocator, name, port); diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig index d3c8d13bd1..3e6603677c 100644 --- a/lib/std/os/test.zig +++ b/lib/std/os/test.zig @@ -58,10 +58,11 @@ test "open smoke test" { // Get base abs path var arena = ArenaAllocator.init(testing.allocator); defer arena.deinit(); + const allocator = arena.allocator(); const base_path = blk: { - const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] }); - break :blk try fs.realpathAlloc(&arena.allocator, relative_path); + const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] }); + break :blk try fs.realpathAlloc(allocator, relative_path); }; var file_path: []u8 = undefined; @@ -69,34 +70,34 @@ test "open smoke test" { const mode: os.mode_t = if (native_os == .windows) 0 else 0o666; // Create some file using `open`. - file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" }); + file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" }); fd = try os.open(file_path, os.O.RDWR | os.O.CREAT | os.O.EXCL, mode); os.close(fd); // Try this again with the same flags. This op should fail with error.PathAlreadyExists. - file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" }); + file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" }); try expectError(error.PathAlreadyExists, os.open(file_path, os.O.RDWR | os.O.CREAT | os.O.EXCL, mode)); // Try opening without `O.EXCL` flag. - file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" }); + file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" }); fd = try os.open(file_path, os.O.RDWR | os.O.CREAT, mode); os.close(fd); // Try opening as a directory which should fail. - file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" }); + file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" }); try expectError(error.NotDir, os.open(file_path, os.O.RDWR | os.O.DIRECTORY, mode)); // Create some directory - file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_dir" }); + file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_dir" }); try os.mkdir(file_path, mode); // Open dir using `open` - file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_dir" }); + file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_dir" }); fd = try os.open(file_path, os.O.RDONLY | os.O.DIRECTORY, mode); os.close(fd); // Try opening as file which should fail. - file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_dir" }); + file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_dir" }); try expectError(error.IsDir, os.open(file_path, os.O.RDWR, mode)); } diff --git a/lib/std/pdb.zig b/lib/std/pdb.zig index 26688d028e..0a484fed31 100644 --- a/lib/std/pdb.zig +++ b/lib/std/pdb.zig @@ -460,7 +460,7 @@ pub const PDBStringTableHeader = packed struct { ByteSize: u32, }; -fn readSparseBitVector(stream: anytype, allocator: *mem.Allocator) ![]u32 { +fn readSparseBitVector(stream: anytype, allocator: mem.Allocator) ![]u32 { const num_words = try stream.readIntLittle(u32); var list = ArrayList(u32).init(allocator); errdefer list.deinit(); @@ -481,7 +481,7 @@ fn readSparseBitVector(stream: anytype, allocator: *mem.Allocator) ![]u32 { pub const Pdb = struct { in_file: File, msf: Msf, - allocator: *mem.Allocator, + allocator: mem.Allocator, string_table: ?*MsfStream, dbi: ?*MsfStream, modules: []Module, @@ -500,7 +500,7 @@ pub const Pdb = struct { checksum_offset: ?usize, }; - pub fn init(allocator: *mem.Allocator, path: []const u8) !Pdb { + pub fn init(allocator: mem.Allocator, path: []const u8) !Pdb { const file = try fs.cwd().openFile(path, .{ .intended_io_mode = .blocking }); errdefer file.close(); @@ -858,7 +858,7 @@ const Msf = struct { directory: MsfStream, streams: []MsfStream, - fn init(allocator: *mem.Allocator, file: File) !Msf { + fn init(allocator: mem.Allocator, file: File) !Msf { const in = file.reader(); const superblock = try in.readStruct(SuperBlock); diff --git a/lib/std/priority_dequeue.zig b/lib/std/priority_dequeue.zig index be81abd96c..289ad9480f 100644 --- a/lib/std/priority_dequeue.zig +++ b/lib/std/priority_dequeue.zig @@ -21,10 +21,10 @@ pub fn PriorityDequeue(comptime T: type, comptime compareFn: fn (T, T) Order) ty items: []T, len: usize, - allocator: *Allocator, + allocator: Allocator, /// Initialize and return a new priority dequeue. - pub fn init(allocator: *Allocator) Self { + pub fn init(allocator: Allocator) Self { return Self{ .items = &[_]T{}, .len = 0, @@ -336,7 +336,7 @@ pub fn PriorityDequeue(comptime T: type, comptime compareFn: fn (T, T) Order) ty /// Dequeue takes ownership of the passed in slice. The slice must have been /// allocated with `allocator`. /// De-initialize with `deinit`. - pub fn fromOwnedSlice(allocator: *Allocator, items: []T) Self { + pub fn fromOwnedSlice(allocator: Allocator, items: []T) Self { var queue = Self{ .items = items, .len = items.len, @@ -945,7 +945,7 @@ fn fuzzTestMinMax(rng: std.rand.Random, queue_size: usize) !void { } } -fn generateRandomSlice(allocator: *std.mem.Allocator, rng: std.rand.Random, size: usize) ![]u32 { +fn generateRandomSlice(allocator: std.mem.Allocator, rng: std.rand.Random, size: usize) ![]u32 { var array = std.ArrayList(u32).init(allocator); try array.ensureTotalCapacity(size); diff --git a/lib/std/priority_queue.zig b/lib/std/priority_queue.zig index 6d4b6634a4..1ae958f4e4 100644 --- a/lib/std/priority_queue.zig +++ b/lib/std/priority_queue.zig @@ -20,10 +20,10 @@ pub fn PriorityQueue(comptime T: type, comptime compareFn: fn (a: T, b: T) Order items: []T, len: usize, - allocator: *Allocator, + allocator: Allocator, /// Initialize and return a priority queue. - pub fn init(allocator: *Allocator) Self { + pub fn init(allocator: Allocator) Self { return Self{ .items = &[_]T{}, .len = 0, @@ -153,7 +153,7 @@ pub fn PriorityQueue(comptime T: type, comptime compareFn: fn (a: T, b: T) Order /// PriorityQueue takes ownership of the passed in slice. The slice must have been /// allocated with `allocator`. /// Deinitialize with `deinit`. - pub fn fromOwnedSlice(allocator: *Allocator, items: []T) Self { + pub fn fromOwnedSlice(allocator: Allocator, items: []T) Self { var queue = Self{ .items = items, .len = items.len, diff --git a/lib/std/process.zig b/lib/std/process.zig index 0e7b5b25ec..243978591b 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -21,7 +21,7 @@ pub fn getCwd(out_buffer: []u8) ![]u8 { } /// Caller must free the returned memory. -pub fn getCwdAlloc(allocator: *Allocator) ![]u8 { +pub fn getCwdAlloc(allocator: Allocator) ![]u8 { // The use of MAX_PATH_BYTES here is just a heuristic: most paths will fit // in stack_buf, avoiding an extra allocation in the common case. var stack_buf: [fs.MAX_PATH_BYTES]u8 = undefined; @@ -54,7 +54,7 @@ test "getCwdAlloc" { } /// Caller owns resulting `BufMap`. -pub fn getEnvMap(allocator: *Allocator) !BufMap { +pub fn getEnvMap(allocator: Allocator) !BufMap { var result = BufMap.init(allocator); errdefer result.deinit(); @@ -154,7 +154,7 @@ pub const GetEnvVarOwnedError = error{ }; /// Caller must free returned memory. -pub fn getEnvVarOwned(allocator: *mem.Allocator, key: []const u8) GetEnvVarOwnedError![]u8 { +pub fn getEnvVarOwned(allocator: mem.Allocator, key: []const u8) GetEnvVarOwnedError![]u8 { if (builtin.os.tag == .windows) { const result_w = blk: { const key_w = try std.unicode.utf8ToUtf16LeWithNull(allocator, key); @@ -183,10 +183,10 @@ pub fn hasEnvVarConstant(comptime key: []const u8) bool { } } -pub fn hasEnvVar(allocator: *Allocator, key: []const u8) error{OutOfMemory}!bool { +pub fn hasEnvVar(allocator: Allocator, key: []const u8) error{OutOfMemory}!bool { if (builtin.os.tag == .windows) { var stack_alloc = std.heap.stackFallback(256 * @sizeOf(u16), allocator); - const key_w = try std.unicode.utf8ToUtf16LeWithNull(&stack_alloc.allocator, key); + const key_w = try std.unicode.utf8ToUtf16LeWithNull(stack_alloc.get(), key); defer stack_alloc.allocator.free(key_w); return std.os.getenvW(key_w) != null; } else { @@ -227,7 +227,7 @@ pub const ArgIteratorPosix = struct { }; pub const ArgIteratorWasi = struct { - allocator: *mem.Allocator, + allocator: mem.Allocator, index: usize, args: [][:0]u8, @@ -235,7 +235,7 @@ pub const ArgIteratorWasi = struct { /// You must call deinit to free the internal buffer of the /// iterator after you are done. - pub fn init(allocator: *mem.Allocator) InitError!ArgIteratorWasi { + pub fn init(allocator: mem.Allocator) InitError!ArgIteratorWasi { const fetched_args = try ArgIteratorWasi.internalInit(allocator); return ArgIteratorWasi{ .allocator = allocator, @@ -244,7 +244,7 @@ pub const ArgIteratorWasi = struct { }; } - fn internalInit(allocator: *mem.Allocator) InitError![][:0]u8 { + fn internalInit(allocator: mem.Allocator) InitError![][:0]u8 { const w = os.wasi; var count: usize = undefined; var buf_size: usize = undefined; @@ -325,7 +325,7 @@ pub const ArgIteratorWindows = struct { } /// You must free the returned memory when done. - pub fn next(self: *ArgIteratorWindows, allocator: *Allocator) ?(NextError![:0]u8) { + pub fn next(self: *ArgIteratorWindows, allocator: Allocator) ?(NextError![:0]u8) { // march forward over whitespace while (true) : (self.index += 1) { const character = self.getPointAtIndex(); @@ -379,7 +379,7 @@ pub const ArgIteratorWindows = struct { } } - fn internalNext(self: *ArgIteratorWindows, allocator: *Allocator) NextError![:0]u8 { + fn internalNext(self: *ArgIteratorWindows, allocator: Allocator) NextError![:0]u8 { var buf = std.ArrayList(u16).init(allocator); defer buf.deinit(); @@ -423,7 +423,7 @@ pub const ArgIteratorWindows = struct { } } - fn convertFromWindowsCmdLineToUTF8(allocator: *Allocator, buf: []u16) NextError![:0]u8 { + fn convertFromWindowsCmdLineToUTF8(allocator: Allocator, buf: []u16) NextError![:0]u8 { return std.unicode.utf16leToUtf8AllocZ(allocator, buf) catch |err| switch (err) { error.ExpectedSecondSurrogateHalf, error.DanglingSurrogateHalf, @@ -463,7 +463,7 @@ pub const ArgIterator = struct { pub const InitError = ArgIteratorWasi.InitError; /// You must deinitialize iterator's internal buffers by calling `deinit` when done. - pub fn initWithAllocator(allocator: *mem.Allocator) InitError!ArgIterator { + pub fn initWithAllocator(allocator: mem.Allocator) InitError!ArgIterator { if (builtin.os.tag == .wasi and !builtin.link_libc) { return ArgIterator{ .inner = try InnerType.init(allocator) }; } @@ -474,7 +474,7 @@ pub const ArgIterator = struct { pub const NextError = ArgIteratorWindows.NextError; /// You must free the returned memory when done. - pub fn next(self: *ArgIterator, allocator: *Allocator) ?(NextError![:0]u8) { + pub fn next(self: *ArgIterator, allocator: Allocator) ?(NextError![:0]u8) { if (builtin.os.tag == .windows) { return self.inner.next(allocator); } else { @@ -513,7 +513,7 @@ pub fn args() ArgIterator { } /// You must deinitialize iterator's internal buffers by calling `deinit` when done. -pub fn argsWithAllocator(allocator: *mem.Allocator) ArgIterator.InitError!ArgIterator { +pub fn argsWithAllocator(allocator: mem.Allocator) ArgIterator.InitError!ArgIterator { return ArgIterator.initWithAllocator(allocator); } @@ -539,7 +539,7 @@ test "args iterator" { } /// Caller must call argsFree on result. -pub fn argsAlloc(allocator: *mem.Allocator) ![][:0]u8 { +pub fn argsAlloc(allocator: mem.Allocator) ![][:0]u8 { // TODO refactor to only make 1 allocation. var it = if (builtin.os.tag == .wasi) try argsWithAllocator(allocator) else args(); defer it.deinit(); @@ -579,7 +579,7 @@ pub fn argsAlloc(allocator: *mem.Allocator) ![][:0]u8 { return result_slice_list; } -pub fn argsFree(allocator: *mem.Allocator, args_alloc: []const [:0]u8) void { +pub fn argsFree(allocator: mem.Allocator, args_alloc: []const [:0]u8) void { var total_bytes: usize = 0; for (args_alloc) |arg| { total_bytes += @sizeOf([]u8) + arg.len + 1; @@ -741,7 +741,7 @@ pub fn getBaseAddress() usize { /// requirement from `std.zig.system.NativeTargetInfo.detect`. Most likely this will require /// introducing a new, lower-level function which takes a callback function, and then this /// function which takes an allocator can exist on top of it. -pub fn getSelfExeSharedLibPaths(allocator: *Allocator) error{OutOfMemory}![][:0]u8 { +pub fn getSelfExeSharedLibPaths(allocator: Allocator) error{OutOfMemory}![][:0]u8 { switch (builtin.link_mode) { .Static => return &[_][:0]u8{}, .Dynamic => {}, @@ -833,7 +833,7 @@ pub const ExecvError = std.os.ExecveError || error{OutOfMemory}; /// This function also uses the PATH environment variable to get the full path to the executable. /// Due to the heap-allocation, it is illegal to call this function in a fork() child. /// For that use case, use the `std.os` functions directly. -pub fn execv(allocator: *mem.Allocator, argv: []const []const u8) ExecvError { +pub fn execv(allocator: mem.Allocator, argv: []const []const u8) ExecvError { return execve(allocator, argv, null); } @@ -846,7 +846,7 @@ pub fn execv(allocator: *mem.Allocator, argv: []const []const u8) ExecvError { /// Due to the heap-allocation, it is illegal to call this function in a fork() child. /// For that use case, use the `std.os` functions directly. pub fn execve( - allocator: *mem.Allocator, + allocator: mem.Allocator, argv: []const []const u8, env_map: ?*const std.BufMap, ) ExecvError { @@ -854,7 +854,7 @@ pub fn execve( var arena_allocator = std.heap.ArenaAllocator.init(allocator); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.allocator(); const argv_buf = try arena.allocSentinel(?[*:0]u8, argv.len, null); for (argv) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr; diff --git a/lib/std/special/build_runner.zig b/lib/std/special/build_runner.zig index 37b783771f..d798d2ab6f 100644 --- a/lib/std/special/build_runner.zig +++ b/lib/std/special/build_runner.zig @@ -16,7 +16,7 @@ pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); - const allocator = &arena.allocator; + const allocator = arena.allocator(); var args = try process.argsAlloc(allocator); defer process.argsFree(allocator, args); diff --git a/lib/std/special/test_runner.zig b/lib/std/special/test_runner.zig index e72204377f..9a52ebdbb1 100644 --- a/lib/std/special/test_runner.zig +++ b/lib/std/special/test_runner.zig @@ -10,7 +10,7 @@ var args_buffer: [std.fs.MAX_PATH_BYTES + std.mem.page_size]u8 = undefined; var args_allocator = std.heap.FixedBufferAllocator.init(&args_buffer); fn processArgs() void { - const args = std.process.argsAlloc(&args_allocator.allocator) catch { + const args = std.process.argsAlloc(args_allocator.allocator()) catch { @panic("Too many bytes passed over the CLI to the test runner"); }; if (args.len != 2) { diff --git a/lib/std/target.zig b/lib/std/target.zig index 8a7fb923de..3f44b19bc2 100644 --- a/lib/std/target.zig +++ b/lib/std/target.zig @@ -1323,15 +1323,15 @@ pub const Target = struct { pub const stack_align = 16; - pub fn zigTriple(self: Target, allocator: *mem.Allocator) ![]u8 { + pub fn zigTriple(self: Target, allocator: mem.Allocator) ![]u8 { return std.zig.CrossTarget.fromTarget(self).zigTriple(allocator); } - pub fn linuxTripleSimple(allocator: *mem.Allocator, cpu_arch: Cpu.Arch, os_tag: Os.Tag, abi: Abi) ![]u8 { + pub fn linuxTripleSimple(allocator: mem.Allocator, cpu_arch: Cpu.Arch, os_tag: Os.Tag, abi: Abi) ![]u8 { return std.fmt.allocPrint(allocator, "{s}-{s}-{s}", .{ @tagName(cpu_arch), @tagName(os_tag), @tagName(abi) }); } - pub fn linuxTriple(self: Target, allocator: *mem.Allocator) ![]u8 { + pub fn linuxTriple(self: Target, allocator: mem.Allocator) ![]u8 { return linuxTripleSimple(allocator, self.cpu.arch, self.os.tag, self.abi); } diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 53fc05f6db..e5c2afab40 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -7,11 +7,11 @@ const print = std.debug.print; pub const FailingAllocator = @import("testing/failing_allocator.zig").FailingAllocator; /// This should only be used in temporary test programs. -pub const allocator = &allocator_instance.allocator; +pub const allocator = allocator_instance.allocator(); pub var allocator_instance = std.heap.GeneralPurposeAllocator(.{}){}; -pub const failing_allocator = &failing_allocator_instance.allocator; -pub var failing_allocator_instance = FailingAllocator.init(&base_allocator_instance.allocator, 0); +pub const failing_allocator = failing_allocator_instance.allocator(); +pub var failing_allocator_instance = FailingAllocator.init(base_allocator_instance.allocator(), 0); pub var base_allocator_instance = std.heap.FixedBufferAllocator.init(""); diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig index d8b243d0fa..677ca6f51b 100644 --- a/lib/std/testing/failing_allocator.zig +++ b/lib/std/testing/failing_allocator.zig @@ -12,10 +12,9 @@ const mem = std.mem; /// Then use `failing_allocator` anywhere you would have used a /// different allocator. pub const FailingAllocator = struct { - allocator: mem.Allocator, index: usize, fail_index: usize, - internal_allocator: *mem.Allocator, + internal_allocator: mem.Allocator, allocated_bytes: usize, freed_bytes: usize, allocations: usize, @@ -29,34 +28,33 @@ pub const FailingAllocator = struct { /// var a = try failing_alloc.create(i32); /// var b = try failing_alloc.create(i32); /// testing.expectError(error.OutOfMemory, failing_alloc.create(i32)); - pub fn init(allocator: *mem.Allocator, fail_index: usize) FailingAllocator { + pub fn init(internal_allocator: mem.Allocator, fail_index: usize) FailingAllocator { return FailingAllocator{ - .internal_allocator = allocator, + .internal_allocator = internal_allocator, .fail_index = fail_index, .index = 0, .allocated_bytes = 0, .freed_bytes = 0, .allocations = 0, .deallocations = 0, - .allocator = mem.Allocator{ - .allocFn = alloc, - .resizeFn = resize, - }, }; } + pub fn allocator(self: *FailingAllocator) mem.Allocator { + return mem.Allocator.init(self, alloc, resize, free); + } + fn alloc( - allocator: *std.mem.Allocator, + self: *FailingAllocator, len: usize, ptr_align: u29, len_align: u29, return_address: usize, ) error{OutOfMemory}![]u8 { - const self = @fieldParentPtr(FailingAllocator, "allocator", allocator); if (self.index == self.fail_index) { return error.OutOfMemory; } - const result = try self.internal_allocator.allocFn(self.internal_allocator, len, ptr_align, len_align, return_address); + const result = try self.internal_allocator.rawAlloc(len, ptr_align, len_align, return_address); self.allocated_bytes += result.len; self.allocations += 1; self.index += 1; @@ -64,26 +62,30 @@ pub const FailingAllocator = struct { } fn resize( - allocator: *std.mem.Allocator, + self: *FailingAllocator, old_mem: []u8, old_align: u29, new_len: usize, len_align: u29, ra: usize, - ) error{OutOfMemory}!usize { - const self = @fieldParentPtr(FailingAllocator, "allocator", allocator); - const r = self.internal_allocator.resizeFn(self.internal_allocator, old_mem, old_align, new_len, len_align, ra) catch |e| { - std.debug.assert(new_len > old_mem.len); - return e; - }; - if (new_len == 0) { - self.deallocations += 1; - self.freed_bytes += old_mem.len; - } else if (r < old_mem.len) { + ) ?usize { + const r = self.internal_allocator.rawResize(old_mem, old_align, new_len, len_align, ra) orelse return null; + if (r < old_mem.len) { self.freed_bytes += old_mem.len - r; } else { self.allocated_bytes += r - old_mem.len; } return r; } + + fn free( + self: *FailingAllocator, + old_mem: []u8, + old_align: u29, + ra: usize, + ) void { + self.internal_allocator.rawFree(old_mem, old_align, ra); + self.deallocations += 1; + self.freed_bytes += old_mem.len; + } }; diff --git a/lib/std/unicode.zig b/lib/std/unicode.zig index 947a1030bb..0bd7f37832 100644 --- a/lib/std/unicode.zig +++ b/lib/std/unicode.zig @@ -550,7 +550,7 @@ fn testDecode(bytes: []const u8) !u21 { } /// Caller must free returned memory. -pub fn utf16leToUtf8Alloc(allocator: *mem.Allocator, utf16le: []const u16) ![]u8 { +pub fn utf16leToUtf8Alloc(allocator: mem.Allocator, utf16le: []const u16) ![]u8 { // optimistically guess that it will all be ascii. var result = try std.ArrayList(u8).initCapacity(allocator, utf16le.len); errdefer result.deinit(); @@ -567,7 +567,7 @@ pub fn utf16leToUtf8Alloc(allocator: *mem.Allocator, utf16le: []const u16) ![]u8 } /// Caller must free returned memory. -pub fn utf16leToUtf8AllocZ(allocator: *mem.Allocator, utf16le: []const u16) ![:0]u8 { +pub fn utf16leToUtf8AllocZ(allocator: mem.Allocator, utf16le: []const u16) ![:0]u8 { // optimistically guess that it will all be ascii. var result = try std.ArrayList(u8).initCapacity(allocator, utf16le.len); errdefer result.deinit(); @@ -661,7 +661,7 @@ test "utf16leToUtf8" { } } -pub fn utf8ToUtf16LeWithNull(allocator: *mem.Allocator, utf8: []const u8) ![:0]u16 { +pub fn utf8ToUtf16LeWithNull(allocator: mem.Allocator, utf8: []const u8) ![:0]u16 { // optimistically guess that it will not require surrogate pairs var result = try std.ArrayList(u16).initCapacity(allocator, utf8.len + 1); errdefer result.deinit(); diff --git a/lib/std/wasm.zig b/lib/std/wasm.zig index f2ae8d34f6..f96c1bc1b9 100644 --- a/lib/std/wasm.zig +++ b/lib/std/wasm.zig @@ -361,7 +361,7 @@ pub const Type = struct { std.mem.eql(Valtype, self.returns, other.returns); } - pub fn deinit(self: *Type, gpa: *std.mem.Allocator) void { + pub fn deinit(self: *Type, gpa: std.mem.Allocator) void { gpa.free(self.params); gpa.free(self.returns); self.* = undefined; diff --git a/lib/std/zig.zig b/lib/std/zig.zig index 56981a74ac..1420db8ec2 100644 --- a/lib/std/zig.zig +++ b/lib/std/zig.zig @@ -100,7 +100,7 @@ pub const BinNameOptions = struct { }; /// Returns the standard file system basename of a binary generated by the Zig compiler. -pub fn binNameAlloc(allocator: *std.mem.Allocator, options: BinNameOptions) error{OutOfMemory}![]u8 { +pub fn binNameAlloc(allocator: std.mem.Allocator, options: BinNameOptions) error{OutOfMemory}![]u8 { const root_name = options.root_name; const target = options.target; const ofmt = options.object_format orelse target.getObjectFormat(); diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig index 02672fbfd1..7729805c88 100644 --- a/lib/std/zig/Ast.zig +++ b/lib/std/zig/Ast.zig @@ -34,7 +34,7 @@ pub const Location = struct { line_end: usize, }; -pub fn deinit(tree: *Tree, gpa: *mem.Allocator) void { +pub fn deinit(tree: *Tree, gpa: mem.Allocator) void { tree.tokens.deinit(gpa); tree.nodes.deinit(gpa); gpa.free(tree.extra_data); @@ -52,7 +52,7 @@ pub const RenderError = error{ /// for allocating extra stack memory if needed, because this function utilizes recursion. /// Note: that's not actually true yet, see https://github.com/ziglang/zig/issues/1006. /// Caller owns the returned slice of bytes, allocated with `gpa`. -pub fn render(tree: Tree, gpa: *mem.Allocator) RenderError![]u8 { +pub fn render(tree: Tree, gpa: mem.Allocator) RenderError![]u8 { var buffer = std.ArrayList(u8).init(gpa); defer buffer.deinit(); diff --git a/lib/std/zig/CrossTarget.zig b/lib/std/zig/CrossTarget.zig index 3c6057a8d9..03bb6bc5ff 100644 --- a/lib/std/zig/CrossTarget.zig +++ b/lib/std/zig/CrossTarget.zig @@ -520,7 +520,7 @@ pub fn isNative(self: CrossTarget) bool { return self.isNativeCpu() and self.isNativeOs() and self.isNativeAbi(); } -pub fn zigTriple(self: CrossTarget, allocator: *mem.Allocator) error{OutOfMemory}![]u8 { +pub fn zigTriple(self: CrossTarget, allocator: mem.Allocator) error{OutOfMemory}![]u8 { if (self.isNative()) { return allocator.dupe(u8, "native"); } @@ -559,13 +559,13 @@ pub fn zigTriple(self: CrossTarget, allocator: *mem.Allocator) error{OutOfMemory return result.toOwnedSlice(); } -pub fn allocDescription(self: CrossTarget, allocator: *mem.Allocator) ![]u8 { +pub fn allocDescription(self: CrossTarget, allocator: mem.Allocator) ![]u8 { // TODO is there anything else worthy of the description that is not // already captured in the triple? return self.zigTriple(allocator); } -pub fn linuxTriple(self: CrossTarget, allocator: *mem.Allocator) ![]u8 { +pub fn linuxTriple(self: CrossTarget, allocator: mem.Allocator) ![]u8 { return Target.linuxTripleSimple(allocator, self.getCpuArch(), self.getOsTag(), self.getAbi()); } @@ -576,7 +576,7 @@ pub fn wantSharedLibSymLinks(self: CrossTarget) bool { pub const VcpkgLinkage = std.builtin.LinkMode; /// Returned slice must be freed by the caller. -pub fn vcpkgTriplet(self: CrossTarget, allocator: *mem.Allocator, linkage: VcpkgLinkage) ![]u8 { +pub fn vcpkgTriplet(self: CrossTarget, allocator: mem.Allocator, linkage: VcpkgLinkage) ![]u8 { const arch = switch (self.getCpuArch()) { .i386 => "x86", .x86_64 => "x64", diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig index 28a0c1a196..89abb35006 100644 --- a/lib/std/zig/parse.zig +++ b/lib/std/zig/parse.zig @@ -11,7 +11,7 @@ pub const Error = error{ParseError} || Allocator.Error; /// Result should be freed with tree.deinit() when there are /// no more references to any of the tokens or nodes. -pub fn parse(gpa: *Allocator, source: [:0]const u8) Allocator.Error!Ast { +pub fn parse(gpa: Allocator, source: [:0]const u8) Allocator.Error!Ast { var tokens = Ast.TokenList{}; defer tokens.deinit(gpa); @@ -81,7 +81,7 @@ const null_node: Node.Index = 0; /// Represents in-progress parsing, will be converted to an Ast after completion. const Parser = struct { - gpa: *Allocator, + gpa: Allocator, source: []const u8, token_tags: []const Token.Tag, token_starts: []const Ast.ByteOffset, diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig index fb1a8120d4..ef716ffb32 100644 --- a/lib/std/zig/parser_test.zig +++ b/lib/std/zig/parser_test.zig @@ -1220,7 +1220,7 @@ test "zig fmt: doc comments on param decl" { try testCanonical( \\pub const Allocator = struct { \\ shrinkFn: fn ( - \\ self: *Allocator, + \\ self: Allocator, \\ /// Guaranteed to be the same as what was returned from most recent call to \\ /// `allocFn`, `reallocFn`, or `shrinkFn`. \\ old_mem: []u8, @@ -4250,7 +4250,7 @@ test "zig fmt: Only indent multiline string literals in function calls" { test "zig fmt: Don't add extra newline after if" { try testCanonical( - \\pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) !void { + \\pub fn atomicSymLink(allocator: Allocator, existing_path: []const u8, new_path: []const u8) !void { \\ if (cwd().symLink(existing_path, new_path, .{})) { \\ return; \\ } @@ -5319,7 +5319,7 @@ const maxInt = std.math.maxInt; var fixed_buffer_mem: [100 * 1024]u8 = undefined; -fn testParse(source: [:0]const u8, allocator: *mem.Allocator, anything_changed: *bool) ![]u8 { +fn testParse(source: [:0]const u8, allocator: mem.Allocator, anything_changed: *bool) ![]u8 { const stderr = io.getStdErr().writer(); var tree = try std.zig.parse(allocator, source); @@ -5351,9 +5351,10 @@ fn testTransform(source: [:0]const u8, expected_source: []const u8) !void { const needed_alloc_count = x: { // Try it once with unlimited memory, make sure it works var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]); - var failing_allocator = std.testing.FailingAllocator.init(&fixed_allocator.allocator, maxInt(usize)); + var failing_allocator = std.testing.FailingAllocator.init(fixed_allocator.allocator(), maxInt(usize)); + const allocator = failing_allocator.allocator(); var anything_changed: bool = undefined; - const result_source = try testParse(source, &failing_allocator.allocator, &anything_changed); + const result_source = try testParse(source, allocator, &anything_changed); try std.testing.expectEqualStrings(expected_source, result_source); const changes_expected = source.ptr != expected_source.ptr; if (anything_changed != changes_expected) { @@ -5361,16 +5362,16 @@ fn testTransform(source: [:0]const u8, expected_source: []const u8) !void { return error.TestFailed; } try std.testing.expect(anything_changed == changes_expected); - failing_allocator.allocator.free(result_source); + allocator.free(result_source); break :x failing_allocator.index; }; var fail_index: usize = 0; while (fail_index < needed_alloc_count) : (fail_index += 1) { var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]); - var failing_allocator = std.testing.FailingAllocator.init(&fixed_allocator.allocator, fail_index); + var failing_allocator = std.testing.FailingAllocator.init(fixed_allocator.allocator(), fail_index); var anything_changed: bool = undefined; - if (testParse(source, &failing_allocator.allocator, &anything_changed)) |_| { + if (testParse(source, failing_allocator.allocator(), &anything_changed)) |_| { return error.NondeterministicMemoryUsage; } else |err| switch (err) { error.OutOfMemory => { diff --git a/lib/std/zig/perf_test.zig b/lib/std/zig/perf_test.zig index b6f513cc0a..d3fc90eaea 100644 --- a/lib/std/zig/perf_test.zig +++ b/lib/std/zig/perf_test.zig @@ -33,7 +33,7 @@ pub fn main() !void { fn testOnce() usize { var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]); - var allocator = &fixed_buf_alloc.allocator; + var allocator = fixed_buf_alloc.allocator(); _ = std.zig.parse(allocator, source) catch @panic("parse failure"); return fixed_buf_alloc.end_index; } diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig index 8a909bf562..a703e1f3ea 100644 --- a/lib/std/zig/render.zig +++ b/lib/std/zig/render.zig @@ -37,7 +37,7 @@ pub fn renderTree(buffer: *std.ArrayList(u8), tree: Ast) Error!void { } /// Render all members in the given slice, keeping empty lines where appropriate -fn renderMembers(gpa: *Allocator, ais: *Ais, tree: Ast, members: []const Ast.Node.Index) Error!void { +fn renderMembers(gpa: Allocator, ais: *Ais, tree: Ast, members: []const Ast.Node.Index) Error!void { if (members.len == 0) return; try renderMember(gpa, ais, tree, members[0], .newline); for (members[1..]) |member| { @@ -46,7 +46,7 @@ fn renderMembers(gpa: *Allocator, ais: *Ais, tree: Ast, members: []const Ast.Nod } } -fn renderMember(gpa: *Allocator, ais: *Ais, tree: Ast, decl: Ast.Node.Index, space: Space) Error!void { +fn renderMember(gpa: Allocator, ais: *Ais, tree: Ast, decl: Ast.Node.Index, space: Space) Error!void { const token_tags = tree.tokens.items(.tag); const main_tokens = tree.nodes.items(.main_token); const datas = tree.nodes.items(.data); @@ -168,7 +168,7 @@ fn renderMember(gpa: *Allocator, ais: *Ais, tree: Ast, decl: Ast.Node.Index, spa } /// Render all expressions in the slice, keeping empty lines where appropriate -fn renderExpressions(gpa: *Allocator, ais: *Ais, tree: Ast, expressions: []const Ast.Node.Index, space: Space) Error!void { +fn renderExpressions(gpa: Allocator, ais: *Ais, tree: Ast, expressions: []const Ast.Node.Index, space: Space) Error!void { if (expressions.len == 0) return; try renderExpression(gpa, ais, tree, expressions[0], space); for (expressions[1..]) |expression| { @@ -177,7 +177,7 @@ fn renderExpressions(gpa: *Allocator, ais: *Ais, tree: Ast, expressions: []const } } -fn renderExpression(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void { +fn renderExpression(gpa: Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void { const token_tags = tree.tokens.items(.tag); const main_tokens = tree.nodes.items(.main_token); const node_tags = tree.nodes.items(.tag); @@ -710,7 +710,7 @@ fn renderExpression(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, } fn renderArrayType( - gpa: *Allocator, + gpa: Allocator, ais: *Ais, tree: Ast, array_type: Ast.full.ArrayType, @@ -732,7 +732,7 @@ fn renderArrayType( } fn renderPtrType( - gpa: *Allocator, + gpa: Allocator, ais: *Ais, tree: Ast, ptr_type: Ast.full.PtrType, @@ -825,7 +825,7 @@ fn renderPtrType( } fn renderSlice( - gpa: *Allocator, + gpa: Allocator, ais: *Ais, tree: Ast, slice_node: Ast.Node.Index, @@ -861,7 +861,7 @@ fn renderSlice( } fn renderAsmOutput( - gpa: *Allocator, + gpa: Allocator, ais: *Ais, tree: Ast, asm_output: Ast.Node.Index, @@ -891,7 +891,7 @@ fn renderAsmOutput( } fn renderAsmInput( - gpa: *Allocator, + gpa: Allocator, ais: *Ais, tree: Ast, asm_input: Ast.Node.Index, @@ -912,7 +912,7 @@ fn renderAsmInput( return renderToken(ais, tree, datas[asm_input].rhs, space); // rparen } -fn renderVarDecl(gpa: *Allocator, ais: *Ais, tree: Ast, var_decl: Ast.full.VarDecl) Error!void { +fn renderVarDecl(gpa: Allocator, ais: *Ais, tree: Ast, var_decl: Ast.full.VarDecl) Error!void { if (var_decl.visib_token) |visib_token| { try renderToken(ais, tree, visib_token, Space.space); // pub } @@ -1019,7 +1019,7 @@ fn renderVarDecl(gpa: *Allocator, ais: *Ais, tree: Ast, var_decl: Ast.full.VarDe return renderToken(ais, tree, var_decl.ast.mut_token + 2, .newline); // ; } -fn renderIf(gpa: *Allocator, ais: *Ais, tree: Ast, if_node: Ast.full.If, space: Space) Error!void { +fn renderIf(gpa: Allocator, ais: *Ais, tree: Ast, if_node: Ast.full.If, space: Space) Error!void { return renderWhile(gpa, ais, tree, .{ .ast = .{ .while_token = if_node.ast.if_token, @@ -1038,7 +1038,7 @@ fn renderIf(gpa: *Allocator, ais: *Ais, tree: Ast, if_node: Ast.full.If, space: /// Note that this function is additionally used to render if and for expressions, with /// respective values set to null. -fn renderWhile(gpa: *Allocator, ais: *Ais, tree: Ast, while_node: Ast.full.While, space: Space) Error!void { +fn renderWhile(gpa: Allocator, ais: *Ais, tree: Ast, while_node: Ast.full.While, space: Space) Error!void { const node_tags = tree.nodes.items(.tag); const token_tags = tree.tokens.items(.tag); @@ -1141,7 +1141,7 @@ fn renderWhile(gpa: *Allocator, ais: *Ais, tree: Ast, while_node: Ast.full.While } fn renderContainerField( - gpa: *Allocator, + gpa: Allocator, ais: *Ais, tree: Ast, field: Ast.full.ContainerField, @@ -1215,7 +1215,7 @@ fn renderContainerField( } fn renderBuiltinCall( - gpa: *Allocator, + gpa: Allocator, ais: *Ais, tree: Ast, builtin_token: Ast.TokenIndex, @@ -1272,7 +1272,7 @@ fn renderBuiltinCall( } } -fn renderFnProto(gpa: *Allocator, ais: *Ais, tree: Ast, fn_proto: Ast.full.FnProto, space: Space) Error!void { +fn renderFnProto(gpa: Allocator, ais: *Ais, tree: Ast, fn_proto: Ast.full.FnProto, space: Space) Error!void { const token_tags = tree.tokens.items(.tag); const token_starts = tree.tokens.items(.start); @@ -1488,7 +1488,7 @@ fn renderFnProto(gpa: *Allocator, ais: *Ais, tree: Ast, fn_proto: Ast.full.FnPro } fn renderSwitchCase( - gpa: *Allocator, + gpa: Allocator, ais: *Ais, tree: Ast, switch_case: Ast.full.SwitchCase, @@ -1541,7 +1541,7 @@ fn renderSwitchCase( } fn renderBlock( - gpa: *Allocator, + gpa: Allocator, ais: *Ais, tree: Ast, block_node: Ast.Node.Index, @@ -1581,7 +1581,7 @@ fn renderBlock( } fn renderStructInit( - gpa: *Allocator, + gpa: Allocator, ais: *Ais, tree: Ast, struct_node: Ast.Node.Index, @@ -1640,7 +1640,7 @@ fn renderStructInit( } fn renderArrayInit( - gpa: *Allocator, + gpa: Allocator, ais: *Ais, tree: Ast, array_init: Ast.full.ArrayInit, @@ -1859,7 +1859,7 @@ fn renderArrayInit( } fn renderContainerDecl( - gpa: *Allocator, + gpa: Allocator, ais: *Ais, tree: Ast, container_decl_node: Ast.Node.Index, @@ -1956,7 +1956,7 @@ fn renderContainerDecl( } fn renderAsm( - gpa: *Allocator, + gpa: Allocator, ais: *Ais, tree: Ast, asm_node: Ast.full.Asm, @@ -2105,7 +2105,7 @@ fn renderAsm( } fn renderCall( - gpa: *Allocator, + gpa: Allocator, ais: *Ais, tree: Ast, call: Ast.full.Call, @@ -2180,7 +2180,7 @@ fn renderCall( /// Renders the given expression indented, popping the indent before rendering /// any following line comments -fn renderExpressionIndented(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void { +fn renderExpressionIndented(gpa: Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void { const token_starts = tree.tokens.items(.start); const token_tags = tree.tokens.items(.tag); @@ -2238,7 +2238,7 @@ fn renderExpressionIndented(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Nod /// Render an expression, and the comma that follows it, if it is present in the source. /// If a comma is present, and `space` is `Space.comma`, render only a single comma. -fn renderExpressionComma(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void { +fn renderExpressionComma(gpa: Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void { const token_tags = tree.tokens.items(.tag); const maybe_comma = tree.lastToken(node) + 1; if (token_tags[maybe_comma] == .comma and space != .comma) { diff --git a/lib/std/zig/string_literal.zig b/lib/std/zig/string_literal.zig index 2a38195b1f..1eaab26e3a 100644 --- a/lib/std/zig/string_literal.zig +++ b/lib/std/zig/string_literal.zig @@ -131,7 +131,7 @@ pub fn parseAppend(buf: *std.ArrayList(u8), bytes: []const u8) error{OutOfMemory /// Higher level API. Does not return extra info about parse errors. /// Caller owns returned memory. -pub fn parseAlloc(allocator: *std.mem.Allocator, bytes: []const u8) ParseError![]u8 { +pub fn parseAlloc(allocator: std.mem.Allocator, bytes: []const u8) ParseError![]u8 { var buf = std.ArrayList(u8).init(allocator); defer buf.deinit(); @@ -147,7 +147,7 @@ test "parse" { var fixed_buf_mem: [32]u8 = undefined; var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buf_mem[0..]); - var alloc = &fixed_buf_alloc.allocator; + var alloc = fixed_buf_alloc.allocator(); try expect(eql(u8, "foo", try parseAlloc(alloc, "\"foo\""))); try expect(eql(u8, "foo", try parseAlloc(alloc, "\"f\x6f\x6f\""))); diff --git a/lib/std/zig/system.zig b/lib/std/zig/system.zig index 353ad25096..5ba0d8198c 100644 --- a/lib/std/zig/system.zig +++ b/lib/std/zig/system.zig @@ -21,7 +21,7 @@ pub const NativePaths = struct { rpaths: ArrayList([:0]u8), warnings: ArrayList([:0]u8), - pub fn detect(allocator: *Allocator, native_info: NativeTargetInfo) !NativePaths { + pub fn detect(allocator: Allocator, native_info: NativeTargetInfo) !NativePaths { const native_target = native_info.target; var self: NativePaths = .{ @@ -237,7 +237,7 @@ pub const NativeTargetInfo = struct { /// Any resources this function allocates are released before returning, and so there is no /// deinitialization method. /// TODO Remove the Allocator requirement from this function. - pub fn detect(allocator: *Allocator, cross_target: CrossTarget) DetectError!NativeTargetInfo { + pub fn detect(allocator: Allocator, cross_target: CrossTarget) DetectError!NativeTargetInfo { var os = cross_target.getOsTag().defaultVersionRange(cross_target.getCpuArch()); if (cross_target.os_tag == null) { switch (builtin.target.os.tag) { @@ -441,7 +441,7 @@ pub const NativeTargetInfo = struct { /// we fall back to the defaults. /// TODO Remove the Allocator requirement from this function. fn detectAbiAndDynamicLinker( - allocator: *Allocator, + allocator: Allocator, cpu: Target.Cpu, os: Target.Os, cross_target: CrossTarget, diff --git a/lib/std/zig/system/darwin.zig b/lib/std/zig/system/darwin.zig index 5ce769a792..c20607440d 100644 --- a/lib/std/zig/system/darwin.zig +++ b/lib/std/zig/system/darwin.zig @@ -11,7 +11,7 @@ pub const macos = @import("darwin/macos.zig"); /// Therefore, we resort to the same tool used by Homebrew, namely, invoking `xcode-select --print-path` /// and checking if the status is nonzero or the returned string in nonempty. /// https://github.com/Homebrew/brew/blob/e119bdc571dcb000305411bc1e26678b132afb98/Library/Homebrew/brew.sh#L630 -pub fn isDarwinSDKInstalled(allocator: *Allocator) bool { +pub fn isDarwinSDKInstalled(allocator: Allocator) bool { const argv = &[_][]const u8{ "/usr/bin/xcode-select", "--print-path" }; const result = std.ChildProcess.exec(.{ .allocator = allocator, .argv = argv }) catch return false; defer { @@ -29,7 +29,7 @@ pub fn isDarwinSDKInstalled(allocator: *Allocator) bool { /// Calls `xcrun --sdk <target_sdk> --show-sdk-path` which fetches the path to the SDK sysroot (if any). /// Subsequently calls `xcrun --sdk <target_sdk> --show-sdk-version` which fetches version of the SDK. /// The caller needs to deinit the resulting struct. -pub fn getDarwinSDK(allocator: *Allocator, target: Target) ?DarwinSDK { +pub fn getDarwinSDK(allocator: Allocator, target: Target) ?DarwinSDK { const is_simulator_abi = target.abi == .simulator; const sdk = switch (target.os.tag) { .macos => "macosx", @@ -82,7 +82,7 @@ pub const DarwinSDK = struct { path: []const u8, version: Version, - pub fn deinit(self: DarwinSDK, allocator: *Allocator) void { + pub fn deinit(self: DarwinSDK, allocator: Allocator) void { allocator.free(self.path); } }; |
