aboutsummaryrefslogtreecommitdiff
path: root/lib/std/mem.zig
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2020-06-27 18:21:00 -0400
committerGitHub <noreply@github.com>2020-06-27 18:21:00 -0400
commit0cfe8e5d6ff06eed0cde6aed0c009a58ceffc395 (patch)
treed0dd3f43e534528d5c99ae28506c846a7d9063d0 /lib/std/mem.zig
parent626b5eccab7264e579ce58f56be5fbc3aa42efc4 (diff)
parenta728436992415d1bce44b0c63938f6443a4e9a11 (diff)
downloadzig-0cfe8e5d6ff06eed0cde6aed0c009a58ceffc395.tar.gz
zig-0cfe8e5d6ff06eed0cde6aed0c009a58ceffc395.zip
Merge pull request #5064 from marler8997/newAllocator
new allocator interface
Diffstat (limited to 'lib/std/mem.zig')
-rw-r--r--lib/std/mem.zig315
1 files changed, 258 insertions, 57 deletions
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index b942fd3bf4..bf1e000056 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -16,6 +16,52 @@ pub const page_size = switch (builtin.arch) {
pub const Allocator = struct {
pub const Error = error{OutOfMemory};
+ /// Attempt to allocate at least `len` bytes aligned to `ptr_align`.
+ ///
+ /// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
+ /// otherwise, the length must be aligned to `len_align`.
+ ///
+ /// `len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
+ allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Error![]u8,
+
+ /// Attempt to expand or shrink memory in place. `buf.len` must equal the most recent
+ /// length returned by `allocFn` or `resizeFn`.
+ ///
+ /// Passing a `new_len` of 0 frees and invalidates the buffer such that it can no
+ /// longer be passed to `resizeFn`.
+ ///
+ /// error.OutOfMemory can only be returned if `new_len` is greater than `buf.len`.
+ /// If `buf` cannot be expanded to accomodate `new_len`, then the allocation MUST be
+ /// unmodified and error.OutOfMemory MUST be returned.
+ ///
+ /// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
+ /// otherwise, the length must be aligned to `len_align`.
+ ///
+ /// `new_len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
+ resizeFn: fn (self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Error!usize,
+
+ pub fn callAllocFn(self: *Allocator, new_len: usize, alignment: u29, len_align: u29) Error![]u8 {
+ return self.allocFn(self, new_len, alignment, len_align);
+ }
+
+ pub fn callResizeFn(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Error!usize {
+ return self.resizeFn(self, buf, new_len, len_align);
+ }
+
+ /// Set to resizeFn if in-place resize is not supported.
+ pub fn noResize(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Error!usize {
+ if (new_len > buf.len)
+ return error.OutOfMemory;
+ return new_len;
+ }
+
+ /// Call `resizeFn`, but caller guarantees that `new_len` <= `buf.len` meaning
+ /// error.OutOfMemory should be impossible.
+ pub fn shrinkBytes(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) usize {
+ assert(new_len <= buf.len);
+ return self.callResizeFn(buf, new_len, len_align) catch unreachable;
+ }
+
/// Realloc is used to modify the size or alignment of an existing allocation,
/// as well as to provide the allocator with an opportunity to move an allocation
/// to a better location.
@@ -24,7 +70,7 @@ pub const Allocator = struct {
/// When the size/alignment is less than or equal to the previous allocation,
/// this function returns `error.OutOfMemory` when the allocator decides the client
/// would be better off keeping the extra alignment/size. Clients will call
- /// `shrinkFn` when they require the allocator to track a new alignment/size,
+ /// `callResizeFn` when they require the allocator to track a new alignment/size,
/// and so this function should only return success when the allocator considers
/// the reallocation desirable from the allocator's perspective.
/// As an example, `std.ArrayList` tracks a "capacity", and therefore can handle
@@ -37,16 +83,15 @@ pub const Allocator = struct {
/// as `old_mem` was when `reallocFn` is called. The bytes of
/// `return_value[old_mem.len..]` have undefined values.
/// The returned slice must have its pointer aligned at least to `new_alignment` bytes.
- reallocFn: fn (
+ fn reallocBytes(
self: *Allocator,
/// Guaranteed to be the same as what was returned from most recent call to
- /// `reallocFn` or `shrinkFn`.
+ /// `allocFn` or `resizeFn`.
/// If `old_mem.len == 0` then this is a new allocation and `new_byte_count`
/// is guaranteed to be >= 1.
old_mem: []u8,
/// If `old_mem.len == 0` then this is `undefined`, otherwise:
- /// Guaranteed to be the same as what was returned from most recent call to
- /// `reallocFn` or `shrinkFn`.
+ /// Guaranteed to be the same as what was passed to `allocFn`.
/// Guaranteed to be >= 1.
/// Guaranteed to be a power of 2.
old_alignment: u29,
@@ -57,23 +102,52 @@ pub const Allocator = struct {
/// Guaranteed to be a power of 2.
/// Returned slice's pointer must have this alignment.
new_alignment: u29,
- ) Error![]u8,
+ /// 0 indicates the length of the slice returned MUST match `new_byte_count` exactly
+ /// non-zero means the length of the returned slice must be aligned by `len_align`
+ /// `new_len` must be aligned by `len_align`
+ len_align: u29,
+ ) Error![]u8 {
+ if (old_mem.len == 0) {
+ const new_mem = try self.callAllocFn(new_byte_count, new_alignment, len_align);
+ @memset(new_mem.ptr, undefined, new_byte_count);
+ return new_mem;
+ }
- /// This function deallocates memory. It must succeed.
- shrinkFn: fn (
- self: *Allocator,
- /// Guaranteed to be the same as what was returned from most recent call to
- /// `reallocFn` or `shrinkFn`.
- old_mem: []u8,
- /// Guaranteed to be the same as what was returned from most recent call to
- /// `reallocFn` or `shrinkFn`.
- old_alignment: u29,
- /// Guaranteed to be less than or equal to `old_mem.len`.
- new_byte_count: usize,
- /// If `new_byte_count == 0` then this is `undefined`, otherwise:
- /// Guaranteed to be less than or equal to `old_alignment`.
- new_alignment: u29,
- ) []u8,
+ if (isAligned(@ptrToInt(old_mem.ptr), new_alignment)) {
+ if (new_byte_count <= old_mem.len) {
+ const shrunk_len = self.shrinkBytes(old_mem, new_byte_count, len_align);
+ if (shrunk_len < old_mem.len) {
+ @memset(old_mem.ptr + shrunk_len, undefined, old_mem.len - shrunk_len);
+ }
+ return old_mem.ptr[0..shrunk_len];
+ }
+ if (self.callResizeFn(old_mem, new_byte_count, len_align)) |resized_len| {
+ assert(resized_len >= new_byte_count);
+ @memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count);
+ return old_mem.ptr[0..resized_len];
+ } else |_| { }
+ }
+ if (new_byte_count <= old_mem.len and new_alignment <= old_alignment) {
+ return error.OutOfMemory;
+ }
+ return self.moveBytes(old_mem, new_byte_count, new_alignment, len_align);
+ }
+
+ /// Move the given memory to a new location in the given allocator to accomodate a new
+ /// size and alignment.
+ fn moveBytes(self: *Allocator, old_mem: []u8, new_len: usize, new_alignment: u29, len_align: u29) Error![]u8 {
+ assert(old_mem.len > 0);
+ assert(new_len > 0);
+ const new_mem = try self.callAllocFn(new_len, new_alignment, len_align);
+ @memcpy(new_mem.ptr, old_mem.ptr, std.math.min(new_len, old_mem.len));
+ // DISABLED TO AVOID BUGS IN TRANSLATE C
+ // use './zig build test-translate-c' to reproduce, some of the symbols in the
+ // generated C code will be a sequence of 0xaa (the undefined value), meaning
+ // it is printing data that has been freed
+ //@memset(old_mem.ptr, undefined, old_mem.len);
+ _ = self.shrinkBytes(old_mem, 0, 0);
+ return new_mem;
+ }
/// Returns a pointer to undefined memory.
/// Call `destroy` with the result to free the memory.
@@ -89,8 +163,7 @@ pub const Allocator = struct {
const T = @TypeOf(ptr).Child;
if (@sizeOf(T) == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
- const shrink_result = self.shrinkFn(self, non_const_ptr[0..@sizeOf(T)], @alignOf(T), 0, 1);
- assert(shrink_result.len == 0);
+ _ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], 0, 0);
}
/// Allocates an array of `n` items of type `T` and sets all the
@@ -144,6 +217,7 @@ pub const Allocator = struct {
return self.allocWithOptions(Elem, n, null, sentinel);
}
+ /// Deprecated: use `allocAdvanced`
pub fn alignedAlloc(
self: *Allocator,
comptime T: type,
@@ -151,8 +225,20 @@ pub const Allocator = struct {
comptime alignment: ?u29,
n: usize,
) Error![]align(alignment orelse @alignOf(T)) T {
+ return self.allocAdvanced(T, alignment, n, .exact);
+ }
+
+ const Exact = enum {exact,at_least};
+ pub fn allocAdvanced(
+ self: *Allocator,
+ comptime T: type,
+ /// null means naturally aligned
+ comptime alignment: ?u29,
+ n: usize,
+ exact: Exact,
+ ) Error![]align(alignment orelse @alignOf(T)) T {
const a = if (alignment) |a| blk: {
- if (a == @alignOf(T)) return alignedAlloc(self, T, null, n);
+ if (a == @alignOf(T)) return allocAdvanced(self, T, null, n, exact);
break :blk a;
} else @alignOf(T);
@@ -161,15 +247,19 @@ pub const Allocator = struct {
}
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
- const byte_slice = try self.reallocFn(self, &[0]u8{}, undefined, byte_count, a);
- assert(byte_slice.len == byte_count);
+ // TODO The `if (alignment == null)` blocks are workarounds for zig not being able to
+ // access certain type information about T without creating a circular dependency in async
+ // functions that heap-allocate their own frame with @Frame(func).
+ const sizeOfT = if (alignment == null) @intCast(u29, @divExact(byte_count, n)) else @sizeOf(T);
+ const byte_slice = try self.callAllocFn(byte_count, a, if (exact == .exact) @as(u29, 0) else sizeOfT);
+ switch (exact) {
+ .exact => assert(byte_slice.len == byte_count),
+ .at_least => assert(byte_slice.len >= byte_count),
+ }
@memset(byte_slice.ptr, undefined, byte_slice.len);
if (alignment == null) {
- // TODO This is a workaround for zig not being able to successfully do
- // @bytesToSlice(T, @alignCast(a, byte_slice)) without resolving alignment of T,
- // which causes a circular dependency in async functions which try to heap-allocate
- // their own frame with @Frame(func).
- return @intToPtr([*]T, @ptrToInt(byte_slice.ptr))[0..n];
+ // This if block is a workaround (see comment above)
+ return @intToPtr([*]T, @ptrToInt(byte_slice.ptr))[0..@divExact(byte_slice.len, @sizeOf(T))];
} else {
return mem.bytesAsSlice(T, @alignCast(a, byte_slice));
}
@@ -190,22 +280,41 @@ pub const Allocator = struct {
break :t Error![]align(Slice.alignment) Slice.child;
} {
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
- return self.alignedRealloc(old_mem, old_alignment, new_n);
+ return self.reallocAdvanced(old_mem, old_alignment, new_n, .exact);
+ }
+
+ pub fn reallocAtLeast(self: *Allocator, old_mem: var, new_n: usize) t: {
+ const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
+ break :t Error![]align(Slice.alignment) Slice.child;
+ } {
+ const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
+ return self.reallocAdvanced(old_mem, old_alignment, new_n, .at_least);
+ }
+
+ // Deprecated: use `reallocAdvanced`
+ pub fn alignedRealloc(
+ self: *Allocator,
+ old_mem: var,
+ comptime new_alignment: u29,
+ new_n: usize,
+ ) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
+ return self.reallocAdvanced(old_mem, new_alignment, new_n, .exact);
}
/// This is the same as `realloc`, except caller may additionally request
/// a new alignment, which can be larger, smaller, or the same as the old
/// allocation.
- pub fn alignedRealloc(
+ pub fn reallocAdvanced(
self: *Allocator,
old_mem: var,
comptime new_alignment: u29,
new_n: usize,
+ exact: Exact,
) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
const T = Slice.child;
if (old_mem.len == 0) {
- return self.alignedAlloc(T, new_alignment, new_n);
+ return self.allocAdvanced(T, new_alignment, new_n, exact);
}
if (new_n == 0) {
self.free(old_mem);
@@ -215,12 +324,9 @@ pub const Allocator = struct {
const old_byte_slice = mem.sliceAsBytes(old_mem);
const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
// Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure
- const byte_slice = try self.reallocFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment);
- assert(byte_slice.len == byte_count);
- if (new_n > old_mem.len) {
- @memset(byte_slice.ptr + old_byte_slice.len, undefined, byte_slice.len - old_byte_slice.len);
- }
- return mem.bytesAsSlice(T, @alignCast(new_alignment, byte_slice));
+ const new_byte_slice = try self.reallocBytes(old_byte_slice, Slice.alignment, byte_count, new_alignment,
+ if (exact == .exact) @as(u29, 0) else @sizeOf(T));
+ return mem.bytesAsSlice(T, @alignCast(new_alignment, new_byte_slice));
}
/// Prefer calling realloc to shrink if you can tolerate failure, such as
@@ -248,12 +354,9 @@ pub const Allocator = struct {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
const T = Slice.child;
- if (new_n == 0) {
- self.free(old_mem);
- return old_mem[0..0];
- }
-
- assert(new_n <= old_mem.len);
+ if (new_n == old_mem.len)
+ return old_mem;
+ assert(new_n < old_mem.len);
assert(new_alignment <= Slice.alignment);
// Here we skip the overflow checking on the multiplication because
@@ -262,9 +365,8 @@ pub const Allocator = struct {
const old_byte_slice = mem.sliceAsBytes(old_mem);
@memset(old_byte_slice.ptr + byte_count, undefined, old_byte_slice.len - byte_count);
- const byte_slice = self.shrinkFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment);
- assert(byte_slice.len == byte_count);
- return mem.bytesAsSlice(T, @alignCast(new_alignment, byte_slice));
+ _ = self.shrinkBytes(old_byte_slice, byte_count, 0);
+ return old_mem[0..new_n];
}
/// Free an array allocated with `alloc`. To free a single item,
@@ -276,8 +378,7 @@ pub const Allocator = struct {
if (bytes_len == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
@memset(non_const_ptr, undefined, bytes_len);
- const shrink_result = self.shrinkFn(self, non_const_ptr[0..bytes_len], Slice.alignment, 0, 1);
- assert(shrink_result.len == 0);
+ _ = self.shrinkBytes(non_const_ptr[0..bytes_len], 0, 0);
}
/// Copies `m` to newly allocated memory. Caller owns the memory.
@@ -296,16 +397,94 @@ pub const Allocator = struct {
}
};
+/// Detects and asserts if the std.mem.Allocator interface is violated by the caller
+/// or the allocator.
+pub fn ValidationAllocator(comptime T: type) type { return struct {
+ const Self = @This();
+ allocator: Allocator,
+ underlying_allocator: T,
+ pub fn init(allocator: T) @This() {
+ return .{
+ .allocator = .{
+ .allocFn = alloc,
+ .resizeFn = resize,
+ },
+ .underlying_allocator = allocator,
+ };
+ }
+ fn getUnderlyingAllocatorPtr(self: *@This()) *Allocator {
+ if (T == *Allocator) return self.underlying_allocator;
+ if (*T == *Allocator) return &self.underlying_allocator;
+ return &self.underlying_allocator.allocator;
+ }
+ pub fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) Allocator.Error![]u8 {
+ assert(n > 0);
+ assert(mem.isValidAlign(ptr_align));
+ if (len_align != 0) {
+ assert(mem.isAlignedAnyAlign(n, len_align));
+ assert(n >= len_align);
+ }
+
+ const self = @fieldParentPtr(@This(), "allocator", allocator);
+ const result = try self.getUnderlyingAllocatorPtr().callAllocFn(n, ptr_align, len_align);
+ assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align));
+ if (len_align == 0) {
+ assert(result.len == n);
+ } else {
+ assert(result.len >= n);
+ assert(mem.isAlignedAnyAlign(result.len, len_align));
+ }
+ return result;
+ }
+ pub fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) Allocator.Error!usize {
+ assert(buf.len > 0);
+ if (len_align != 0) {
+ assert(mem.isAlignedAnyAlign(new_len, len_align));
+ assert(new_len >= len_align);
+ }
+ const self = @fieldParentPtr(@This(), "allocator", allocator);
+ const result = try self.getUnderlyingAllocatorPtr().callResizeFn(buf, new_len, len_align);
+ if (len_align == 0) {
+ assert(result == new_len);
+ } else {
+ assert(result >= new_len);
+ assert(mem.isAlignedAnyAlign(result, len_align));
+ }
+ return result;
+ }
+ pub usingnamespace if (T == *Allocator or !@hasDecl(T, "reset")) struct {} else struct {
+ pub fn reset(self: *Self) void {
+ self.underlying_allocator.reset();
+ }
+ };
+};}
+
+pub fn validationWrap(allocator: var) ValidationAllocator(@TypeOf(allocator)) {
+ return ValidationAllocator(@TypeOf(allocator)).init(allocator);
+}
+
+/// An allocator helper function. Adjusts an allocation length satisfy `len_align`.
+/// `full_len` should be the full capacity of the allocation which may be greater
+/// than the `len` that was requsted. This function should only be used by allocators
+/// that are unaffected by `len_align`.
+pub fn alignAllocLen(full_len: usize, alloc_len: usize, len_align: u29) usize {
+ assert(alloc_len > 0);
+ assert(alloc_len >= len_align);
+ assert(full_len >= alloc_len);
+ if (len_align == 0)
+ return alloc_len;
+ const adjusted = alignBackwardAnyAlign(full_len, len_align);
+ assert(adjusted >= alloc_len);
+ return adjusted;
+}
+
var failAllocator = Allocator{
- .reallocFn = failAllocatorRealloc,
- .shrinkFn = failAllocatorShrink,
+ .allocFn = failAllocatorAlloc,
+ .resizeFn = Allocator.noResize,
};
-fn failAllocatorRealloc(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
+fn failAllocatorAlloc(self: *Allocator, n: usize, alignment: u29, len_align: u29) Allocator.Error![]u8 {
return error.OutOfMemory;
}
-fn failAllocatorShrink(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
- @panic("failAllocatorShrink should never be called because it cannot allocate");
-}
test "mem.Allocator basics" {
testing.expectError(error.OutOfMemory, failAllocator.alloc(u8, 1));
@@ -2191,6 +2370,15 @@ test "alignForward" {
}
/// Round an address up to the previous aligned address
+/// Unlike `alignBackward`, `alignment` can be any positive number, not just a power of 2.
+pub fn alignBackwardAnyAlign(i: usize, alignment: usize) usize {
+ if (@popCount(usize, alignment) == 1)
+ return alignBackward(i, alignment);
+ assert(alignment != 0);
+ return i - @mod(i, alignment);
+}
+
+/// Round an address up to the previous aligned address
/// The alignment must be a power of 2 and greater than 0.
pub fn alignBackward(addr: usize, alignment: usize) usize {
return alignBackwardGeneric(usize, addr, alignment);
@@ -2206,6 +2394,19 @@ pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T {
return addr & ~(alignment - 1);
}
+/// Returns whether `alignment` is a valid alignment, meaning it is
+/// a positive power of 2.
+pub fn isValidAlign(alignment: u29) bool {
+ return @popCount(u29, alignment) == 1;
+}
+
+pub fn isAlignedAnyAlign(i: usize, alignment: usize) bool {
+ if (@popCount(usize, alignment) == 1)
+ return isAligned(i, alignment);
+ assert(alignment != 0);
+ return 0 == @mod(i, alignment);
+}
+
/// Given an address and an alignment, return true if the address is a multiple of the alignment
/// The alignment must be a power of 2 and greater than 0.
pub fn isAligned(addr: usize, alignment: usize) bool {