aboutsummaryrefslogtreecommitdiff
path: root/lib/std/heap.zig
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2019-11-25 17:53:26 -0500
committerAndrew Kelley <andrew@ziglang.org>2019-11-25 17:53:26 -0500
commit659c1bdeeebce7bf32e122be6a728fe727112c56 (patch)
tree774feeeee06ea881be48caf769cd0e96944d5345 /lib/std/heap.zig
parentcb38bd0a1436bd18de8ed57c45ffc890c8ddfb78 (diff)
parent0f0d01a0374dd62d55a0fe3eed72b9ec009af410 (diff)
downloadzig-659c1bdeeebce7bf32e122be6a728fe727112c56.tar.gz
zig-659c1bdeeebce7bf32e122be6a728fe727112c56.zip
Merge branch 'wasi-run-tests' of https://github.com/fengb/zig into fengb-wasi-run-tests
Diffstat (limited to 'lib/std/heap.zig')
-rw-r--r--lib/std/heap.zig189
1 files changed, 91 insertions, 98 deletions
diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index dae836611e..259c29f6e6 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -33,11 +33,19 @@ fn cShrink(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new
/// This allocator makes a syscall directly for every allocation and free.
/// Thread-safe and lock-free.
-pub const page_allocator = &page_allocator_state;
+pub const page_allocator = if (std.Target.current.isWasm())
+ &wasm_page_allocator_state
+else
+ &page_allocator_state;
+
var page_allocator_state = Allocator{
.reallocFn = PageAllocator.realloc,
.shrinkFn = PageAllocator.shrink,
};
+var wasm_page_allocator_state = Allocator{
+ .reallocFn = WasmPageAllocator.realloc,
+ .shrinkFn = WasmPageAllocator.shrink,
+};
/// Deprecated. Use `page_allocator`.
pub const direct_allocator = page_allocator;
@@ -238,6 +246,88 @@ const PageAllocator = struct {
}
};
+// TODO Exposed LLVM intrinsics is a bug
+// See: https://github.com/ziglang/zig/issues/2291
+extern fn @"llvm.wasm.memory.size.i32"(u32) u32;
+extern fn @"llvm.wasm.memory.grow.i32"(u32, u32) i32;
+
+/// TODO: make this re-use freed pages, and cooperate with other callers of these global intrinsics
+/// by better utilizing the return value of grow()
+const WasmPageAllocator = struct {
+ var start_ptr: [*]u8 = undefined;
+ var num_pages: usize = 0;
+ var end_index: usize = 0;
+
+ comptime {
+ if (builtin.arch != .wasm32) {
+ @compileError("WasmPageAllocator is only available for wasm32 arch");
+ }
+ }
+
+ fn alloc(allocator: *Allocator, size: usize, alignment: u29) ![]u8 {
+ const addr = @ptrToInt(start_ptr) + end_index;
+ const adjusted_addr = mem.alignForward(addr, alignment);
+ const adjusted_index = end_index + (adjusted_addr - addr);
+ const new_end_index = adjusted_index + size;
+
+ if (new_end_index > num_pages * mem.page_size) {
+ const required_memory = new_end_index - (num_pages * mem.page_size);
+
+ var num_pages: usize = required_memory / mem.page_size;
+ if (required_memory % mem.page_size != 0) {
+ num_pages += 1;
+ }
+
+ const prev_page = @"llvm.wasm.memory.grow.i32"(0, @intCast(u32, num_pages));
+ if (prev_page == -1) {
+ return error.OutOfMemory;
+ }
+
+ num_pages += num_pages;
+ }
+
+ const result = start_ptr[adjusted_index..new_end_index];
+ end_index = new_end_index;
+
+ return result;
+ }
+
+ // Check if memory is the last "item" and is aligned correctly
+ fn is_last_item(memory: []u8, alignment: u29) bool {
+ return memory.ptr == start_ptr + end_index - memory.len and mem.alignForward(@ptrToInt(memory.ptr), alignment) == @ptrToInt(memory.ptr);
+ }
+
+ fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
+ // Initialize start_ptr at the first realloc
+ if (num_pages == 0) {
+ start_ptr = @intToPtr([*]u8, @intCast(usize, @"llvm.wasm.memory.size.i32"(0)) * mem.page_size);
+ }
+
+ if (is_last_item(old_mem, new_align)) {
+ const start_index = end_index - old_mem.len;
+ const new_end_index = start_index + new_size;
+
+ if (new_end_index > num_pages * mem.page_size) {
+ _ = try alloc(allocator, new_end_index - end_index, new_align);
+ }
+ const result = start_ptr[start_index..new_end_index];
+
+ end_index = new_end_index;
+ return result;
+ } else if (new_size <= old_mem.len and new_align <= old_align) {
+ return error.OutOfMemory;
+ } else {
+ const result = try alloc(allocator, new_size, new_align);
+ mem.copy(u8, result, old_mem);
+ return result;
+ }
+ }
+
+ fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
+ return old_mem[0..new_size];
+ }
+};
+
pub const HeapAllocator = switch (builtin.os) {
.windows => struct {
allocator: Allocator,
@@ -487,103 +577,6 @@ pub const FixedBufferAllocator = struct {
}
};
-// TODO Exposed LLVM intrinsics is a bug
-// See: https://github.com/ziglang/zig/issues/2291
-extern fn @"llvm.wasm.memory.size.i32"(u32) u32;
-extern fn @"llvm.wasm.memory.grow.i32"(u32, u32) i32;
-
-pub const wasm_allocator = &wasm_allocator_state.allocator;
-var wasm_allocator_state = WasmAllocator{
- .allocator = Allocator{
- .reallocFn = WasmAllocator.realloc,
- .shrinkFn = WasmAllocator.shrink,
- },
- .start_ptr = undefined,
- .num_pages = 0,
- .end_index = 0,
-};
-
-const WasmAllocator = struct {
- allocator: Allocator,
- start_ptr: [*]u8,
- num_pages: usize,
- end_index: usize,
-
- comptime {
- if (builtin.arch != .wasm32) {
- @compileError("WasmAllocator is only available for wasm32 arch");
- }
- }
-
- fn alloc(allocator: *Allocator, size: usize, alignment: u29) ![]u8 {
- const self = @fieldParentPtr(WasmAllocator, "allocator", allocator);
-
- const addr = @ptrToInt(self.start_ptr) + self.end_index;
- const adjusted_addr = mem.alignForward(addr, alignment);
- const adjusted_index = self.end_index + (adjusted_addr - addr);
- const new_end_index = adjusted_index + size;
-
- if (new_end_index > self.num_pages * mem.page_size) {
- const required_memory = new_end_index - (self.num_pages * mem.page_size);
-
- var num_pages: usize = required_memory / mem.page_size;
- if (required_memory % mem.page_size != 0) {
- num_pages += 1;
- }
-
- const prev_page = @"llvm.wasm.memory.grow.i32"(0, @intCast(u32, num_pages));
- if (prev_page == -1) {
- return error.OutOfMemory;
- }
-
- self.num_pages += num_pages;
- }
-
- const result = self.start_ptr[adjusted_index..new_end_index];
- self.end_index = new_end_index;
-
- return result;
- }
-
- // Check if memory is the last "item" and is aligned correctly
- fn is_last_item(allocator: *Allocator, memory: []u8, alignment: u29) bool {
- const self = @fieldParentPtr(WasmAllocator, "allocator", allocator);
- return memory.ptr == self.start_ptr + self.end_index - memory.len and mem.alignForward(@ptrToInt(memory.ptr), alignment) == @ptrToInt(memory.ptr);
- }
-
- fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
- const self = @fieldParentPtr(WasmAllocator, "allocator", allocator);
-
- // Initialize start_ptr at the first realloc
- if (self.num_pages == 0) {
- self.start_ptr = @intToPtr([*]u8, @intCast(usize, @"llvm.wasm.memory.size.i32"(0)) * mem.page_size);
- }
-
- if (is_last_item(allocator, old_mem, new_align)) {
- const start_index = self.end_index - old_mem.len;
- const new_end_index = start_index + new_size;
-
- if (new_end_index > self.num_pages * mem.page_size) {
- _ = try alloc(allocator, new_end_index - self.end_index, new_align);
- }
- const result = self.start_ptr[start_index..new_end_index];
-
- self.end_index = new_end_index;
- return result;
- } else if (new_size <= old_mem.len and new_align <= old_align) {
- return error.OutOfMemory;
- } else {
- const result = try alloc(allocator, new_size, new_align);
- mem.copy(u8, result, old_mem);
- return result;
- }
- }
-
- fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
- return old_mem[0..new_size];
- }
-};
-
pub const ThreadSafeFixedBufferAllocator = blk: {
if (builtin.single_threaded) {
break :blk FixedBufferAllocator;