aboutsummaryrefslogtreecommitdiff
path: root/lib/std/heap.zig
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2020-05-10 02:05:54 -0400
committerAndrew Kelley <andrew@ziglang.org>2020-05-10 02:05:54 -0400
commita32d3a85d21d614e5960b9eadcd85374954b910f (patch)
tree8712bfc619205eaf23201215a4a19f7c0c108cfa /lib/std/heap.zig
parentae080b5c217fbcfd350a5d52b8b4626a95540ab3 (diff)
downloadzig-a32d3a85d21d614e5960b9eadcd85374954b910f.tar.gz
zig-a32d3a85d21d614e5960b9eadcd85374954b910f.zip
rework self-hosted compiler for incremental builds
* introduce std.ArrayListUnmanaged for when you have the allocator stored elsewhere * move std.heap.ArenaAllocator implementation to its own file. extract the main state into std.heap.ArenaAllocator.State, which can be stored as an alternative to storing the entire ArenaAllocator, saving 24 bytes per ArenaAllocator on 64 bit targets. * std.LinkedList.Node pointer field now defaults to being null initialized. * Rework self-hosted compiler Package API * Delete almost all the bitrotted self-hosted compiler code. The only bit rotted code left is in main.zig and compilation.zig * Add call instruction to ZIR * self-hosted compiler ir API and link API are reworked to support a long-running compiler that incrementally updates declarations * Introduce the concept of scopes to ZIR semantic analysis * ZIR text format supports referencing named decls that are declared later in the file * Figure out how memory management works for the long-running compiler and incremental compilation. The main roots are top level declarations. There is a table of decls. The key is a cryptographic hash of the fully qualified decl name. Each decl has an arena allocator where all of the memory related to that decl is stored. Each code block has its own arena allocator for the lifetime of the block. Values that want to survive when going out of scope in a block must get copied into the outer block. Finally, values must get copied into the Decl arena to be long-lived. * Delete the unused MemoryCell struct. Instead, comptime pointers are based on references to Decl structs. * Figure out how caching works. Each Decl will store a set of other Decls which must be recompiled when it changes. This branch is still work-in-progress; this commit breaks the build.
Diffstat (limited to 'lib/std/heap.zig')
-rw-r--r--lib/std/heap.zig90
1 files changed, 1 insertions, 89 deletions
diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index 3e00ca5d59..6bbb688ef0 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -11,6 +11,7 @@ const maxInt = std.math.maxInt;
pub const LoggingAllocator = @import("heap/logging_allocator.zig").LoggingAllocator;
pub const loggingAllocator = @import("heap/logging_allocator.zig").loggingAllocator;
+pub const ArenaAllocator = @import("heap/arena_allocator.zig").ArenaAllocator;
const Allocator = mem.Allocator;
@@ -510,95 +511,6 @@ pub const HeapAllocator = switch (builtin.os.tag) {
else => @compileError("Unsupported OS"),
};
-/// This allocator takes an existing allocator, wraps it, and provides an interface
-/// where you can allocate without freeing, and then free it all together.
-pub const ArenaAllocator = struct {
- allocator: Allocator,
-
- child_allocator: *Allocator,
- buffer_list: std.SinglyLinkedList([]u8),
- end_index: usize,
-
- const BufNode = std.SinglyLinkedList([]u8).Node;
-
- pub fn init(child_allocator: *Allocator) ArenaAllocator {
- return ArenaAllocator{
- .allocator = Allocator{
- .reallocFn = realloc,
- .shrinkFn = shrink,
- },
- .child_allocator = child_allocator,
- .buffer_list = std.SinglyLinkedList([]u8).init(),
- .end_index = 0,
- };
- }
-
- pub fn deinit(self: ArenaAllocator) void {
- var it = self.buffer_list.first;
- while (it) |node| {
- // this has to occur before the free because the free frees node
- const next_it = node.next;
- self.child_allocator.free(node.data);
- it = next_it;
- }
- }
-
- fn createNode(self: *ArenaAllocator, prev_len: usize, minimum_size: usize) !*BufNode {
- const actual_min_size = minimum_size + @sizeOf(BufNode);
- var len = prev_len;
- while (true) {
- len += len / 2;
- len += mem.page_size - @rem(len, mem.page_size);
- if (len >= actual_min_size) break;
- }
- const buf = try self.child_allocator.alignedAlloc(u8, @alignOf(BufNode), len);
- const buf_node_slice = mem.bytesAsSlice(BufNode, buf[0..@sizeOf(BufNode)]);
- const buf_node = &buf_node_slice[0];
- buf_node.* = BufNode{
- .data = buf,
- .next = null,
- };
- self.buffer_list.prepend(buf_node);
- self.end_index = 0;
- return buf_node;
- }
-
- fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
- const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
-
- var cur_node = if (self.buffer_list.first) |first_node| first_node else try self.createNode(0, n + alignment);
- while (true) {
- const cur_buf = cur_node.data[@sizeOf(BufNode)..];
- const addr = @ptrToInt(cur_buf.ptr) + self.end_index;
- const adjusted_addr = mem.alignForward(addr, alignment);
- const adjusted_index = self.end_index + (adjusted_addr - addr);
- const new_end_index = adjusted_index + n;
- if (new_end_index > cur_buf.len) {
- cur_node = try self.createNode(cur_buf.len, n + alignment);
- continue;
- }
- const result = cur_buf[adjusted_index..new_end_index];
- self.end_index = new_end_index;
- return result;
- }
- }
-
- fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
- if (new_size <= old_mem.len and new_align <= new_size) {
- // We can't do anything with the memory, so tell the client to keep it.
- return error.OutOfMemory;
- } else {
- const result = try alloc(allocator, new_size, new_align);
- @memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
- return result;
- }
- }
-
- fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
- return old_mem[0..new_size];
- }
-};
-
pub const FixedBufferAllocator = struct {
allocator: Allocator,
end_index: usize,