aboutsummaryrefslogtreecommitdiff
path: root/std/heap.zig
diff options
context:
space:
mode:
authorAndrea Orru <andrea@orru.io>2018-08-06 01:43:19 -0400
committerAndrea Orru <andrea@orru.io>2018-08-06 01:43:19 -0400
commitd2f5e57b68da0b16e5789ca19045ccbcb4ecfa8d (patch)
treee9fa3caec533a0d1e2b434868b2fde1f9240e5c8 /std/heap.zig
parent06614b3fa09954464c2e2f32756cacedc178a282 (diff)
parent63a23e848a62d5f167f8d5478de9766cb24aa6eb (diff)
downloadzig-d2f5e57b68da0b16e5789ca19045ccbcb4ecfa8d.tar.gz
zig-d2f5e57b68da0b16e5789ca19045ccbcb4ecfa8d.zip
Merge branch 'master' into zen_stdlib
Diffstat (limited to 'std/heap.zig')
-rw-r--r--std/heap.zig386
1 files changed, 309 insertions, 77 deletions
diff --git a/std/heap.zig b/std/heap.zig
index ca6736af1e..f5e0484b25 100644
--- a/std/heap.zig
+++ b/std/heap.zig
@@ -10,24 +10,21 @@ const c = std.c;
const Allocator = mem.Allocator;
pub const c_allocator = &c_allocator_state;
-var c_allocator_state = Allocator {
+var c_allocator_state = Allocator{
.allocFn = cAlloc,
.reallocFn = cRealloc,
.freeFn = cFree,
};
-fn cAlloc(self: &Allocator, n: usize, alignment: u29) ![]u8 {
+fn cAlloc(self: *Allocator, n: usize, alignment: u29) ![]u8 {
assert(alignment <= @alignOf(c_longdouble));
- return if (c.malloc(n)) |buf|
- @ptrCast(&u8, buf)[0..n]
- else
- error.OutOfMemory;
+ return if (c.malloc(n)) |buf| @ptrCast([*]u8, buf)[0..n] else error.OutOfMemory;
}
-fn cRealloc(self: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
- const old_ptr = @ptrCast(&c_void, old_mem.ptr);
+fn cRealloc(self: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ const old_ptr = @ptrCast(*c_void, old_mem.ptr);
if (c.realloc(old_ptr, new_size)) |buf| {
- return @ptrCast(&u8, buf)[0..new_size];
+ return @ptrCast([*]u8, buf)[0..new_size];
} else if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
@@ -35,28 +32,22 @@ fn cRealloc(self: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![
}
}
-fn cFree(self: &Allocator, old_mem: []u8) void {
- const old_ptr = @ptrCast(&c_void, old_mem.ptr);
+fn cFree(self: *Allocator, old_mem: []u8) void {
+ const old_ptr = @ptrCast(*c_void, old_mem.ptr);
c.free(old_ptr);
}
/// This allocator makes a syscall directly for every allocation and free.
+/// Thread-safe and lock-free.
pub const DirectAllocator = struct {
allocator: Allocator,
heap_handle: ?HeapHandle,
const HeapHandle = if (builtin.os == Os.windows) os.windows.HANDLE else void;
- //pub const canary_bytes = []u8 {48, 239, 128, 46, 18, 49, 147, 9, 195, 59, 203, 3, 245, 54, 9, 122};
- //pub const want_safety = switch (builtin.mode) {
- // builtin.Mode.Debug => true,
- // builtin.Mode.ReleaseSafe => true,
- // else => false,
- //};
-
pub fn init() DirectAllocator {
- return DirectAllocator {
- .allocator = Allocator {
+ return DirectAllocator{
+ .allocator = Allocator{
.allocFn = alloc,
.reallocFn = realloc,
.freeFn = free,
@@ -65,7 +56,7 @@ pub const DirectAllocator = struct {
};
}
- pub fn deinit(self: &DirectAllocator) void {
+ pub fn deinit(self: *DirectAllocator) void {
switch (builtin.os) {
Os.windows => if (self.heap_handle) |heap_handle| {
_ = os.windows.HeapDestroy(heap_handle);
@@ -74,41 +65,58 @@ pub const DirectAllocator = struct {
}
}
- fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
+ fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(DirectAllocator, "allocator", allocator);
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios => {
- assert(alignment <= os.page_size);
const p = os.posix;
- const addr = p.mmap(null, n, p.PROT_READ|p.PROT_WRITE,
- p.MAP_PRIVATE|p.MAP_ANONYMOUS, -1, 0);
- if (addr == p.MAP_FAILED) {
- return error.OutOfMemory;
- }
- return @intToPtr(&u8, addr)[0..n];
+ const alloc_size = if (alignment <= os.page_size) n else n + alignment;
+ const addr = p.mmap(null, alloc_size, p.PROT_READ | p.PROT_WRITE, p.MAP_PRIVATE | p.MAP_ANONYMOUS, -1, 0);
+ if (addr == p.MAP_FAILED) return error.OutOfMemory;
+ if (alloc_size == n) return @intToPtr([*]u8, addr)[0..n];
+
+ const aligned_addr = (addr & ~usize(alignment - 1)) + alignment;
+
+ // We can unmap the unused portions of our mmap, but we must only
+ // pass munmap bytes that exist outside our allocated pages or it
+ // will happily eat us too.
+
+ // Since alignment > page_size, we are by definition on a page boundary.
+ const unused_start = addr;
+ const unused_len = aligned_addr - 1 - unused_start;
+
+ const err = p.munmap(unused_start, unused_len);
+ assert(p.getErrno(err) == 0);
+
+ // It is impossible that there is an unoccupied page at the top of our
+ // mmap.
+
+ return @intToPtr([*]u8, aligned_addr)[0..n];
},
Os.windows => {
const amt = n + alignment + @sizeOf(usize);
- const heap_handle = self.heap_handle ?? blk: {
- const hh = os.windows.HeapCreate(os.windows.HEAP_NO_SERIALIZE, amt, 0) ?? return error.OutOfMemory;
- self.heap_handle = hh;
- break :blk hh;
+ const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, builtin.AtomicOrder.SeqCst);
+ const heap_handle = optional_heap_handle orelse blk: {
+ const hh = os.windows.HeapCreate(0, amt, 0) orelse return error.OutOfMemory;
+ const other_hh = @cmpxchgStrong(?HeapHandle, &self.heap_handle, null, hh, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) orelse break :blk hh;
+ _ = os.windows.HeapDestroy(hh);
+ break :blk other_hh.?; // can't be null because of the cmpxchg
};
- const ptr = os.windows.HeapAlloc(heap_handle, 0, amt) ?? return error.OutOfMemory;
+ const ptr = os.windows.HeapAlloc(heap_handle, 0, amt) orelse return error.OutOfMemory;
const root_addr = @ptrToInt(ptr);
const rem = @rem(root_addr, alignment);
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
const adjusted_addr = root_addr + march_forward_bytes;
const record_addr = adjusted_addr + n;
- *@intToPtr(&align(1) usize, record_addr) = root_addr;
- return @intToPtr(&u8, adjusted_addr)[0..n];
+ @intToPtr(*align(1) usize, record_addr).* = root_addr;
+ return @intToPtr([*]u8, adjusted_addr)[0..n];
},
else => @compileError("Unsupported OS"),
}
}
- fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(DirectAllocator, "allocator", allocator);
switch (builtin.os) {
@@ -120,7 +128,7 @@ pub const DirectAllocator = struct {
const rem = @rem(new_addr_end, os.page_size);
const new_addr_end_rounded = new_addr_end + if (rem == 0) 0 else (os.page_size - rem);
if (old_addr_end > new_addr_end_rounded) {
- _ = os.posix.munmap(@intToPtr(&u8, new_addr_end_rounded), old_addr_end - new_addr_end_rounded);
+ _ = os.posix.munmap(new_addr_end_rounded, old_addr_end - new_addr_end_rounded);
}
return old_mem[0..new_size];
}
@@ -132,13 +140,13 @@ pub const DirectAllocator = struct {
Os.windows => {
const old_adjusted_addr = @ptrToInt(old_mem.ptr);
const old_record_addr = old_adjusted_addr + old_mem.len;
- const root_addr = *@intToPtr(&align(1) usize, old_record_addr);
- const old_ptr = @intToPtr(os.windows.LPVOID, root_addr);
+ const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
+ const old_ptr = @intToPtr(*c_void, root_addr);
const amt = new_size + alignment + @sizeOf(usize);
- const new_ptr = os.windows.HeapReAlloc(??self.heap_handle, 0, old_ptr, amt) ?? blk: {
+ const new_ptr = os.windows.HeapReAlloc(self.heap_handle.?, 0, old_ptr, amt) orelse blk: {
if (new_size > old_mem.len) return error.OutOfMemory;
const new_record_addr = old_record_addr - new_size + old_mem.len;
- *@intToPtr(&align(1) usize, new_record_addr) = root_addr;
+ @intToPtr(*align(1) usize, new_record_addr).* = root_addr;
return old_mem[0..new_size];
};
const offset = old_adjusted_addr - root_addr;
@@ -146,25 +154,25 @@ pub const DirectAllocator = struct {
const new_adjusted_addr = new_root_addr + offset;
assert(new_adjusted_addr % alignment == 0);
const new_record_addr = new_adjusted_addr + new_size;
- *@intToPtr(&align(1) usize, new_record_addr) = new_root_addr;
- return @intToPtr(&u8, new_adjusted_addr)[0..new_size];
+ @intToPtr(*align(1) usize, new_record_addr).* = new_root_addr;
+ return @intToPtr([*]u8, new_adjusted_addr)[0..new_size];
},
else => @compileError("Unsupported OS"),
}
}
- fn free(allocator: &Allocator, bytes: []u8) void {
+ fn free(allocator: *Allocator, bytes: []u8) void {
const self = @fieldParentPtr(DirectAllocator, "allocator", allocator);
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios => {
- _ = os.posix.munmap(bytes.ptr, bytes.len);
+ _ = os.posix.munmap(@ptrToInt(bytes.ptr), bytes.len);
},
Os.windows => {
const record_addr = @ptrToInt(bytes.ptr) + bytes.len;
- const root_addr = *@intToPtr(&align(1) usize, record_addr);
- const ptr = @intToPtr(os.windows.LPVOID, root_addr);
- _ = os.windows.HeapFree(??self.heap_handle, 0, ptr);
+ const root_addr = @intToPtr(*align(1) usize, record_addr).*;
+ const ptr = @intToPtr(*c_void, root_addr);
+ _ = os.windows.HeapFree(self.heap_handle.?, 0, ptr);
},
else => @compileError("Unsupported OS"),
}
@@ -176,15 +184,15 @@ pub const DirectAllocator = struct {
pub const ArenaAllocator = struct {
pub allocator: Allocator,
- child_allocator: &Allocator,
+ child_allocator: *Allocator,
buffer_list: std.LinkedList([]u8),
end_index: usize,
const BufNode = std.LinkedList([]u8).Node;
- pub fn init(child_allocator: &Allocator) ArenaAllocator {
- return ArenaAllocator {
- .allocator = Allocator {
+ pub fn init(child_allocator: *Allocator) ArenaAllocator {
+ return ArenaAllocator{
+ .allocator = Allocator{
.allocFn = alloc,
.reallocFn = realloc,
.freeFn = free,
@@ -195,7 +203,7 @@ pub const ArenaAllocator = struct {
};
}
- pub fn deinit(self: &ArenaAllocator) void {
+ pub fn deinit(self: *ArenaAllocator) void {
var it = self.buffer_list.first;
while (it) |node| {
// this has to occur before the free because the free frees node
@@ -205,7 +213,7 @@ pub const ArenaAllocator = struct {
}
}
- fn createNode(self: &ArenaAllocator, prev_len: usize, minimum_size: usize) !&BufNode {
+ fn createNode(self: *ArenaAllocator, prev_len: usize, minimum_size: usize) !*BufNode {
const actual_min_size = minimum_size + @sizeOf(BufNode);
var len = prev_len;
while (true) {
@@ -214,9 +222,9 @@ pub const ArenaAllocator = struct {
if (len >= actual_min_size) break;
}
const buf = try self.child_allocator.alignedAlloc(u8, @alignOf(BufNode), len);
- const buf_node_slice = ([]BufNode)(buf[0..@sizeOf(BufNode)]);
+ const buf_node_slice = @bytesToSlice(BufNode, buf[0..@sizeOf(BufNode)]);
const buf_node = &buf_node_slice[0];
- *buf_node = BufNode {
+ buf_node.* = BufNode{
.data = buf,
.prev = null,
.next = null,
@@ -226,7 +234,7 @@ pub const ArenaAllocator = struct {
return buf_node;
}
- fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
+ fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
var cur_node = if (self.buffer_list.last) |last_node| last_node else try self.createNode(0, n + alignment);
@@ -241,13 +249,13 @@ pub const ArenaAllocator = struct {
cur_node = try self.createNode(cur_buf.len, n + alignment);
continue;
}
- const result = cur_buf[adjusted_index .. new_end_index];
+ const result = cur_buf[adjusted_index..new_end_index];
self.end_index = new_end_index;
return result;
}
}
- fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
@@ -257,7 +265,7 @@ pub const ArenaAllocator = struct {
}
}
- fn free(allocator: &Allocator, bytes: []u8) void { }
+ fn free(allocator: *Allocator, bytes: []u8) void {}
};
pub const FixedBufferAllocator = struct {
@@ -266,8 +274,8 @@ pub const FixedBufferAllocator = struct {
buffer: []u8,
pub fn init(buffer: []u8) FixedBufferAllocator {
- return FixedBufferAllocator {
- .allocator = Allocator {
+ return FixedBufferAllocator{
+ .allocator = Allocator{
.allocFn = alloc,
.reallocFn = realloc,
.freeFn = free,
@@ -277,9 +285,9 @@ pub const FixedBufferAllocator = struct {
};
}
- fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
+ fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
- const addr = @ptrToInt(&self.buffer[self.end_index]);
+ const addr = @ptrToInt(self.buffer.ptr) + self.end_index;
const rem = @rem(addr, alignment);
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
const adjusted_index = self.end_index + march_forward_bytes;
@@ -287,13 +295,69 @@ pub const FixedBufferAllocator = struct {
if (new_end_index > self.buffer.len) {
return error.OutOfMemory;
}
- const result = self.buffer[adjusted_index .. new_end_index];
+ const result = self.buffer[adjusted_index..new_end_index];
self.end_index = new_end_index;
return result;
}
- fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
+ assert(old_mem.len <= self.end_index);
+ if (new_size <= old_mem.len) {
+ return old_mem[0..new_size];
+ } else if (old_mem.ptr == self.buffer.ptr + self.end_index - old_mem.len) {
+ const start_index = self.end_index - old_mem.len;
+ const new_end_index = start_index + new_size;
+ if (new_end_index > self.buffer.len) return error.OutOfMemory;
+ const result = self.buffer[start_index..new_end_index];
+ self.end_index = new_end_index;
+ return result;
+ } else {
+ const result = try alloc(allocator, new_size, alignment);
+ mem.copy(u8, result, old_mem);
+ return result;
+ }
+ }
+
+ fn free(allocator: *Allocator, bytes: []u8) void {}
+};
+
+/// lock free
+pub const ThreadSafeFixedBufferAllocator = struct {
+ allocator: Allocator,
+ end_index: usize,
+ buffer: []u8,
+
+ pub fn init(buffer: []u8) ThreadSafeFixedBufferAllocator {
+ return ThreadSafeFixedBufferAllocator{
+ .allocator = Allocator{
+ .allocFn = alloc,
+ .reallocFn = realloc,
+ .freeFn = free,
+ },
+ .buffer = buffer,
+ .end_index = 0,
+ };
+ }
+
+ fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
+ const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);
+ var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst);
+ while (true) {
+ const addr = @ptrToInt(self.buffer.ptr) + end_index;
+ const rem = @rem(addr, alignment);
+ const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
+ const adjusted_index = end_index + march_forward_bytes;
+ const new_end_index = adjusted_index + n;
+ if (new_end_index > self.buffer.len) {
+ return error.OutOfMemory;
+ }
+ end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) orelse return self.buffer[adjusted_index..new_end_index];
+ }
+ }
+
+ fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
@@ -303,10 +367,75 @@ pub const FixedBufferAllocator = struct {
}
}
- fn free(allocator: &Allocator, bytes: []u8) void { }
+ fn free(allocator: *Allocator, bytes: []u8) void {}
};
+pub fn stackFallback(comptime size: usize, fallback_allocator: *Allocator) StackFallbackAllocator(size) {
+ return StackFallbackAllocator(size){
+ .buffer = undefined,
+ .fallback_allocator = fallback_allocator,
+ .fixed_buffer_allocator = undefined,
+ .allocator = Allocator{
+ .allocFn = StackFallbackAllocator(size).alloc,
+ .reallocFn = StackFallbackAllocator(size).realloc,
+ .freeFn = StackFallbackAllocator(size).free,
+ },
+ };
+}
+
+pub fn StackFallbackAllocator(comptime size: usize) type {
+ return struct {
+ const Self = this;
+
+ buffer: [size]u8,
+ allocator: Allocator,
+ fallback_allocator: *Allocator,
+ fixed_buffer_allocator: FixedBufferAllocator,
+
+ pub fn get(self: *Self) *Allocator {
+ self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]);
+ return &self.allocator;
+ }
+
+ fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
+ const self = @fieldParentPtr(Self, "allocator", allocator);
+ return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator.allocator, n, alignment) catch
+ self.fallback_allocator.allocFn(self.fallback_allocator, n, alignment);
+ }
+
+ fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ const self = @fieldParentPtr(Self, "allocator", allocator);
+ const in_buffer = @ptrToInt(old_mem.ptr) >= @ptrToInt(&self.buffer) and
+ @ptrToInt(old_mem.ptr) < @ptrToInt(&self.buffer) + self.buffer.len;
+ if (in_buffer) {
+ return FixedBufferAllocator.realloc(
+ &self.fixed_buffer_allocator.allocator,
+ old_mem,
+ new_size,
+ alignment,
+ ) catch {
+ const result = try self.fallback_allocator.allocFn(
+ self.fallback_allocator,
+ new_size,
+ alignment,
+ );
+ mem.copy(u8, result, old_mem);
+ return result;
+ };
+ }
+ return self.fallback_allocator.reallocFn(self.fallback_allocator, old_mem, new_size, alignment);
+ }
+ fn free(allocator: *Allocator, bytes: []u8) void {
+ const self = @fieldParentPtr(Self, "allocator", allocator);
+ const in_buffer = @ptrToInt(bytes.ptr) >= @ptrToInt(&self.buffer) and
+ @ptrToInt(bytes.ptr) < @ptrToInt(&self.buffer) + self.buffer.len;
+ if (!in_buffer) {
+ return self.fallback_allocator.freeFn(self.fallback_allocator, bytes);
+ }
+ }
+ };
+}
test "c_allocator" {
if (builtin.link_libc) {
@@ -322,6 +451,8 @@ test "DirectAllocator" {
const allocator = &direct_allocator.allocator;
try testAllocator(allocator);
+ try testAllocatorAligned(allocator, 16);
+ try testAllocatorLargeAlignment(allocator);
}
test "ArenaAllocator" {
@@ -332,6 +463,8 @@ test "ArenaAllocator" {
defer arena_allocator.deinit();
try testAllocator(&arena_allocator.allocator);
+ try testAllocatorAligned(&arena_allocator.allocator, 16);
+ try testAllocatorLargeAlignment(&arena_allocator.allocator);
}
var test_fixed_buffer_allocator_memory: [30000 * @sizeOf(usize)]u8 = undefined;
@@ -339,24 +472,123 @@ test "FixedBufferAllocator" {
var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
try testAllocator(&fixed_buffer_allocator.allocator);
+ try testAllocatorAligned(&fixed_buffer_allocator.allocator, 16);
+ try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator);
}
-fn testAllocator(allocator: &mem.Allocator) !void {
- var slice = try allocator.alloc(&i32, 100);
+test "FixedBufferAllocator Reuse memory on realloc" {
+ var small_fixed_buffer: [10]u8 = undefined;
+ // check if we re-use the memory
+ {
+ var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
+
+ var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 5);
+ assert(slice0.len == 5);
+ var slice1 = try fixed_buffer_allocator.allocator.realloc(u8, slice0, 10);
+ assert(slice1.ptr == slice0.ptr);
+ assert(slice1.len == 10);
+ debug.assertError(fixed_buffer_allocator.allocator.realloc(u8, slice1, 11), error.OutOfMemory);
+ }
+ // check that we don't re-use the memory if it's not the most recent block
+ {
+ var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
+
+ var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 2);
+ slice0[0] = 1;
+ slice0[1] = 2;
+ var slice1 = try fixed_buffer_allocator.allocator.alloc(u8, 2);
+ var slice2 = try fixed_buffer_allocator.allocator.realloc(u8, slice0, 4);
+ assert(slice0.ptr != slice2.ptr);
+ assert(slice1.ptr != slice2.ptr);
+ assert(slice2[0] == 1);
+ assert(slice2[1] == 2);
+ }
+}
+
+test "ThreadSafeFixedBufferAllocator" {
+ var fixed_buffer_allocator = ThreadSafeFixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
+
+ try testAllocator(&fixed_buffer_allocator.allocator);
+ try testAllocatorAligned(&fixed_buffer_allocator.allocator, 16);
+ try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator);
+}
+fn testAllocator(allocator: *mem.Allocator) !void {
+ var slice = try allocator.alloc(*i32, 100);
+ assert(slice.len == 100);
for (slice) |*item, i| {
- *item = try allocator.create(i32);
- **item = i32(i);
+ item.* = try allocator.create(@intCast(i32, i));
}
- for (slice) |item, i| {
+ slice = try allocator.realloc(*i32, slice, 20000);
+ assert(slice.len == 20000);
+
+ for (slice[0..100]) |item, i| {
+ assert(item.* == @intCast(i32, i));
allocator.destroy(item);
}
- slice = try allocator.realloc(&i32, slice, 20000);
- slice = try allocator.realloc(&i32, slice, 50);
- slice = try allocator.realloc(&i32, slice, 25);
- slice = try allocator.realloc(&i32, slice, 10);
+ slice = try allocator.realloc(*i32, slice, 50);
+ assert(slice.len == 50);
+ slice = try allocator.realloc(*i32, slice, 25);
+ assert(slice.len == 25);
+ slice = try allocator.realloc(*i32, slice, 0);
+ assert(slice.len == 0);
+ slice = try allocator.realloc(*i32, slice, 10);
+ assert(slice.len == 10);
+
+ allocator.free(slice);
+}
+
+fn testAllocatorAligned(allocator: *mem.Allocator, comptime alignment: u29) !void {
+ // initial
+ var slice = try allocator.alignedAlloc(u8, alignment, 10);
+ assert(slice.len == 10);
+ // grow
+ slice = try allocator.alignedRealloc(u8, alignment, slice, 100);
+ assert(slice.len == 100);
+ // shrink
+ slice = try allocator.alignedRealloc(u8, alignment, slice, 10);
+ assert(slice.len == 10);
+ // go to zero
+ slice = try allocator.alignedRealloc(u8, alignment, slice, 0);
+ assert(slice.len == 0);
+ // realloc from zero
+ slice = try allocator.alignedRealloc(u8, alignment, slice, 100);
+ assert(slice.len == 100);
+ // shrink with shrink
+ slice = allocator.alignedShrink(u8, alignment, slice, 10);
+ assert(slice.len == 10);
+ // shrink to zero
+ slice = allocator.alignedShrink(u8, alignment, slice, 0);
+ assert(slice.len == 0);
+}
+
+fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!void {
+ //Maybe a platform's page_size is actually the same as or
+ // very near usize?
+ if (os.page_size << 2 > @maxValue(usize)) return;
+
+ const USizeShift = @IntType(false, std.math.log2(usize.bit_count));
+ const large_align = u29(os.page_size << 2);
+
+ var align_mask: usize = undefined;
+ _ = @shlWithOverflow(usize, ~usize(0), USizeShift(@ctz(large_align)), &align_mask);
+
+ var slice = try allocator.allocFn(allocator, 500, large_align);
+ debug.assert(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
+
+ slice = try allocator.reallocFn(allocator, slice, 100, large_align);
+ debug.assert(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
+
+ slice = try allocator.reallocFn(allocator, slice, 5000, large_align);
+ debug.assert(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
+
+ slice = try allocator.reallocFn(allocator, slice, 10, large_align);
+ debug.assert(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
+
+ slice = try allocator.reallocFn(allocator, slice, 20000, large_align);
+ debug.assert(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
allocator.free(slice);
}